max_stars_count
int64
301
224k
text
stringlengths
6
1.05M
token_count
int64
3
727k
678
/** * This header is generated by class-dump-z 0.2b. * * Source: /System/Library/PrivateFrameworks/iTunesStoreUI.framework/iTunesStoreUI */ #import <iTunesStoreUI/UITableViewDelegate.h> #import <iTunesStoreUI/iTunesStoreUI-Structs.h> #import <iTunesStoreUI/SUViewController.h> #import <iTunesStoreUI/UITableViewDataSource.h> @class SUTableDataSource, SUTableView, NSIndexPath, UITableView; @interface SUTableViewController : SUViewController <UITableViewDataSource, UITableViewDelegate> { SUTableDataSource *_dataSource; // 212 = 0xd4 int _disappearOrientation; // 216 = 0xd8 NSIndexPath *_firstTapIndexPath; // 220 = 0xdc int _placeholderRowCount; // 224 = 0xe0 BOOL _preferUserInteractionWhileScrolling; // 228 = 0xe4 SUTableView *_tableView; // 232 = 0xe8 int _tableViewStyle; // 236 = 0xec } @property(assign, nonatomic) int tableViewStyle; // G=0x1ab69; S=0x19981; @synthesize=_tableViewStyle @property(readonly, assign, nonatomic) UITableView *tableView; // G=0x1ab59; @synthesize=_tableView @property(retain, nonatomic) SUTableDataSource *dataSource; // G=0x1ab49; S=0x1a041; @synthesize=_dataSource @property(readonly, assign, nonatomic) unsigned numberOfRows; // G=0x19fe5; // declared property getter: - (int)tableViewStyle; // 0x1ab69 // declared property getter: - (id)tableView; // 0x1ab59 // declared property getter: - (id)dataSource; // 0x1ab49 - (void)_resetTableView; // 0x1a9fd - (void)_reloadPlaceholderCells; // 0x1a889 - (void)_deliverTapCount:(int)count forIndexPath:(id)indexPath; // 0x1a839 - (void)_doubleTapTimeout; // 0x1a7cd - (id)tableView:(id)view willSelectRowAtIndexPath:(id)indexPath; // 0x1a729 - (id)tableView:(id)view viewForHeaderInSection:(int)section; // 0x1a709 - (id)tableView:(id)view titleForDeleteConfirmationButtonForRowAtIndexPath:(id)indexPath; // 0x1a6e9 - (float)tableView:(id)view heightForRowAtIndexPath:(id)indexPath; // 0x1a665 - (float)tableView:(id)view heightForHeaderInSection:(int)section; // 0x1a5cd - (float)tableView:(id)view heightForFooterInSection:(int)section; // 0x1a5ad - (int)tableView:(id)view editingStyleForRowAtIndexPath:(id)indexPath; // 0x1a555 - (void)tableView:(id)view didSelectRowAtIndexPath:(id)indexPath; // 0x1a41d - (int)numberOfSectionsInTableView:(id)tableView; // 0x1a3f1 - (void)tableView:(id)view willDisplayCell:(id)cell forRowAtIndexPath:(id)indexPath; // 0x1a311 - (id)tableView:(id)view titleForHeaderInSection:(int)section; // 0x1a2f1 - (int)tableView:(id)view sectionForSectionIndexTitle:(id)sectionIndexTitle atIndex:(int)index; // 0x1a2c9 - (int)tableView:(id)view numberOfRowsInSection:(int)section; // 0x1a259 - (void)tableView:(id)view commitEditingStyle:(int)style forRowAtIndexPath:(id)indexPath; // 0x1a239 - (id)tableView:(id)view cellForRowAtIndexPath:(id)indexPath; // 0x1a1b9 - (id)sectionIndexTitlesForTableView:(id)tableView; // 0x1a199 - (void)scrollViewWillBeginDragging:(id)scrollView; // 0x1a161 - (void)scrollViewDidEndDecelerating:(id)scrollView; // 0x1a129 - (void)scrollViewDidEndDragging:(id)scrollView willDecelerate:(BOOL)decelerate; // 0x1a0e9 // declared property setter: - (void)setDataSource:(id)source; // 0x1a041 // declared property getter: - (unsigned)numberOfRows; // 0x19fe5 - (void)willAnimateRotationToInterfaceOrientation:(int)interfaceOrientation duration:(double)duration; // 0x19f79 - (void)viewWillDisappear:(BOOL)view; // 0x19edd - (void)viewWillAppear:(BOOL)view; // 0x19df1 - (void)viewDidAppear:(BOOL)view; // 0x19da1 - (void)setScriptProperties:(id)properties; // 0x19c89 - (void)purgeMemoryForReason:(int)reason; // 0x19c35 - (void)loadView; // 0x19bb9 - (id)copyScriptProperties; // 0x19ae5 - (id)copyDefaultScriptProperties; // 0x19a8d - (id)copyArchivableContext; // 0x199ad // declared property setter: - (void)setTableViewStyle:(int)style; // 0x19981 - (void)scrollToRowAtIndexPath:(id)indexPath atScrollPosition:(int)scrollPosition animated:(BOOL)animated; // 0x19869 - (void)reloadData; // 0x19605 - (void)reloadForChangedRowCount:(int)changedRowCount; // 0x19601 - (id)newTableView; // 0x19575 - (BOOL)indexPathIsPlaceholder:(id)placeholder; // 0x194bd - (BOOL)handleSelectionForIndexPath:(id)indexPath tapCount:(int)count; // 0x194b9 - (BOOL)deleteRowAtIndexPath:(id)indexPath; // 0x1928d - (int)clippedCornersForIndexPath:(id)indexPath; // 0x1920d - (BOOL)canSelectRowAtIndexPath:(id)indexPath; // 0x19209 - (void)dealloc; // 0x19135 - (id)init; // 0x190e1 @end
1,734
536
#include "simple.h" static inline struct simplefs_super_block *SIMPLEFS_SB(struct super_block *sb) { return sb->s_fs_info; } static inline struct simplefs_inode *SIMPLEFS_INODE(struct inode *inode) { return inode->i_private; }
88
464
package dev.fiki.forgehax.api.cmd.settings.collections; import com.google.gson.JsonArray; import com.google.gson.JsonElement; import dev.fiki.forgehax.api.cmd.AbstractSettingCollection; import dev.fiki.forgehax.api.cmd.IParentCommand; import dev.fiki.forgehax.api.cmd.argument.IArgument; import dev.fiki.forgehax.api.cmd.flag.EnumFlag; import dev.fiki.forgehax.api.cmd.listener.ICommandListener; import dev.fiki.forgehax.api.cmd.value.IValue; import lombok.AccessLevel; import lombok.Getter; import lombok.NonNull; import java.util.Collection; import java.util.List; import java.util.Set; import java.util.function.Supplier; import java.util.stream.Collectors; class BaseSimpleSettingCollection<E, L extends Collection<E>> extends AbstractSettingCollection<E, L> { @Getter(AccessLevel.PROTECTED) private final IArgument<E> converterArgument; public BaseSimpleSettingCollection(IParentCommand parent, String name, Set<String> aliases, String description, Set<EnumFlag> flags, Supplier<L> supplier, Collection<E> defaultTo, @NonNull IArgument<E> argument, List<ICommandListener> listeners) { super(parent, name, aliases, description, flags, supplier, defaultTo, listeners); this.converterArgument = argument; newSimpleCommand() .name("add") .description("Adds an element to the collection") .argument(argument) .executor(args -> { IValue<E> arg = args.getFirst(); if (this.add(arg.getValue())) { args.inform("Added \"%s\" to the collection.", arg.getStringValue()); } else { args.warn("Could not add \"%s\" to the collection (possible duplicate?).", arg.getStringValue()); } }) .build(); newSimpleCommand() .name("remove") .alias("delete") .description("Removes an element to the collection") .argument(argument) .executor(args -> { IValue<E> arg = args.getFirst(); if (this.remove(arg.getValue())) { args.inform("Removed \"%s\" from the collection.", arg.getStringValue()); } else { args.warn("Could not add \"%s\" to the collection (possible duplicate?).", arg.getStringValue()); } }) .build(); newSimpleCommand() .name("list") .alias("show") .alias("display") .description("Lists all the elements in the collection") .executor(args -> { if (this.isEmpty()) { args.inform("Collection is empty."); } else { args.inform(this.stream() .map(argument::convert) .collect(Collectors.joining(", "))); } }) .build(); newSimpleCommand() .name("clear") .description("Clear all the elements in the collection") .executor(args -> { int size = this.size(); this.clear(); args.inform("Cleared %d elements from the collection.", size); }) .build(); } @Override public JsonElement serialize() { JsonArray array = new JsonArray(); IArgument<E> converter = getConverterArgument(); for (E obj : this) { array.add(converter.convert(obj)); } return array; } @Override public void deserialize(JsonElement json) { if (!json.isJsonArray()) { throw new IllegalArgumentException("expected JsonArray, got " + json.getClass().getSimpleName()); } // clear all current children this.wrapping.clear(); IArgument<E> converter = getConverterArgument(); for (JsonElement element : json.getAsJsonArray()) { this.wrapping.add(converter.parse(element.getAsString())); } } @Override protected String printableValue(E o) { return getConverterArgument().print(o); } }
1,558
561
////////////////////////////////////////////////////////////////////////// // // Copyright (c) 2018, Image Engine Design Inc. All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above // copyright notice, this list of conditions and the following // disclaimer. // // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following // disclaimer in the documentation and/or other materials provided with // the distribution. // // * Neither the name of <NAME> nor the names of // any other contributors to this software may be used to endorse or // promote products derived from this software without specific prior // written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS // IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, // THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // ////////////////////////////////////////////////////////////////////////// #include "boost/python.hpp" #include "RenderControllerBinding.h" #include "GafferScene/RenderController.h" #include "GafferBindings/SignalBinding.h" #include "Gaffer/Context.h" #include "IECorePython/RefCountedBinding.h" using namespace boost::python; using namespace Imath; using namespace IECoreScenePreview; using namespace Gaffer; using namespace GafferBindings; using namespace GafferScene; namespace { void setScene( RenderController &r, const ScenePlug &scene ) { IECorePython::ScopedGILRelease gilRelease; r.setScene( &scene ); } ScenePlugPtr getScene( RenderController &r ) { return const_cast<ScenePlug *>( r.getScene() ); } void setContext( RenderController &r, Gaffer::Context &c ) { IECorePython::ScopedGILRelease gilRelease; r.setContext( &c ); } ContextPtr getContext( RenderController &r ) { return const_cast<Context *>( r.getContext() ); } void setExpandedPaths( RenderController &r, const IECore::PathMatcher &expandedPaths ) { IECorePython::ScopedGILRelease gilRelease; r.setExpandedPaths( expandedPaths ); } void setMinimumExpansionDepth( RenderController &r, size_t depth ) { IECorePython::ScopedGILRelease gilRelease; r.setMinimumExpansionDepth( depth ); } void update( RenderController &r ) { IECorePython::ScopedGILRelease gilRelease; r.update(); } void updateMatchingPaths( RenderController &r, const IECore::PathMatcher &pathsToUpdate ) { IECorePython::ScopedGILRelease gilRelease; r.updateMatchingPaths( pathsToUpdate ); } } // namespace void GafferSceneModule::bindRenderController() { scope s = class_<RenderController, boost::noncopyable>( "RenderController", no_init ) .def( init<ConstScenePlugPtr, ConstContextPtr, RendererPtr>() ) .def( "renderer", &RenderController::renderer, return_value_policy<IECorePython::CastToIntrusivePtr>() ) .def( "setScene", &setScene ) .def( "getScene", &getScene ) .def( "setContext", &setContext ) .def( "getContext", &getContext ) .def( "setExpandedPaths", &setExpandedPaths ) .def( "getExpandedPaths", &RenderController::getExpandedPaths, return_value_policy<copy_const_reference>() ) .def( "setMinimumExpansionDepth", &setMinimumExpansionDepth ) .def( "getMinimumExpansionDepth", &RenderController::getMinimumExpansionDepth ) .def( "updateRequiredSignal", &RenderController::updateRequiredSignal, return_internal_reference<1>() ) .def( "update", &update ) .def( "updateMatchingPaths", &updateMatchingPaths ) ; SignalClass<RenderController::UpdateRequiredSignal>( "UpdateRequiredSignal" ); }
1,386
764
<gh_stars>100-1000 {"symbol": "bCEO","address": "0x19cA83a13b4C4BE43FA82c5E415E16f1D86f57F7","overview":{"en": ""},"email": "<EMAIL>","website": "https://bitceo.io/","state": "NORMAL","links": {"blog": "https://medium.com/bitceo-io","twitter": "https://twitter.com/bitceo_io","telegram": "https://t.me/bitCEO_io","github": "https://github.com/bitceo/"}}
146
14,668
// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef EXTENSIONS_BROWSER_API_IDLE_IDLE_API_CONSTANTS_H_ #define EXTENSIONS_BROWSER_API_IDLE_IDLE_API_CONSTANTS_H_ namespace extensions { namespace idle_api_constants { // Events. extern const char kOnStateChanged[]; // States. extern const char kStateActive[]; extern const char kStateIdle[]; extern const char kStateLocked[]; } // namespace idle_api_constants } // namespace extensions #endif // EXTENSIONS_BROWSER_API_IDLE_IDLE_API_CONSTANTS_H_
221
5,238
#include "../../lv_examples.h" #if LV_USE_CHART && LV_DRAW_COMPLEX && LV_BUILD_EXAMPLES static void add_data(lv_timer_t * t) { lv_obj_t * chart = t->user_data; lv_chart_series_t * ser = lv_chart_get_series_next(chart, NULL); lv_chart_set_next_value(chart, ser, lv_rand(10, 90)); uint16_t p = lv_chart_get_point_count(chart); uint16_t s = lv_chart_get_x_start_point(chart, ser); lv_coord_t * a = lv_chart_get_y_array(chart, ser); a[(s + 1) % p] = LV_CHART_POINT_NONE; a[(s + 2) % p] = LV_CHART_POINT_NONE; a[(s + 2) % p] = LV_CHART_POINT_NONE; lv_chart_refresh(chart); } /** * Circular line chart with gap */ void lv_example_chart_9(void) { /*Create a stacked_area_chart.obj*/ lv_obj_t * chart = lv_chart_create(lv_scr_act()); lv_chart_set_update_mode(chart, LV_CHART_UPDATE_MODE_CIRCULAR); lv_obj_set_size(chart, 200, 150); lv_obj_center(chart); lv_chart_set_point_count(chart, 30); lv_chart_series_t * ser = lv_chart_add_series(chart, lv_palette_main(LV_PALETTE_RED), LV_CHART_AXIS_PRIMARY_Y); /*Prefill with data*/ uint32_t i; for(i = 0; i < 30; i++) { lv_chart_set_next_value(chart, ser, lv_rand(10, 90)); } lv_timer_create(add_data, 300, chart); } #endif
615
724
<reponame>ashqal/ChromeLikeSwipeLayout<filename>chromelikeswipelayout/src/main/java/com/asha/AnimationListenerAdapter.java package com.asha; import android.view.animation.Animation; /** * Created by hzqiujiadi on 15/12/6. * hzqiujiadi <EMAIL> */ public class AnimationListenerAdapter implements Animation.AnimationListener { @Override public void onAnimationStart(Animation animation) { } @Override public void onAnimationEnd(Animation animation) { } @Override public void onAnimationRepeat(Animation animation) { } }
183
5,813
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.druid.segment.loading; import com.google.common.annotations.VisibleForTesting; import com.google.errorprone.annotations.concurrent.GuardedBy; import org.apache.commons.io.FileUtils; import org.apache.druid.java.util.emitter.EmittingLogger; import org.apache.druid.timeline.DataSegment; import javax.annotation.Nullable; import java.io.File; import java.util.HashSet; import java.util.Set; /** * This class is a very simple logical representation of a local path. It keeps track of files stored under the * {@link #path} via {@link #reserve}, so that the total size of stored files doesn't exceed the {@link #maxSizeBytes} * and available space is always kept smaller than {@link #freeSpaceToKeep}. * * This class is thread-safe, so that multiple threads can update its state at the same time. * One example usage is that a historical can use multiple threads to load different segments in parallel * from deep storage. */ public class StorageLocation { private static final EmittingLogger log = new EmittingLogger(StorageLocation.class); private final File path; private final long maxSizeBytes; private final long freeSpaceToKeep; /** * Set of files stored under the {@link #path}. */ @GuardedBy("this") private final Set<File> files = new HashSet<>(); /** * Current total size of files in bytes. */ @GuardedBy("this") private long currSizeBytes = 0; public StorageLocation(File path, long maxSizeBytes, @Nullable Double freeSpacePercent) { this.path = path; this.maxSizeBytes = maxSizeBytes; if (freeSpacePercent != null) { long totalSpaceInPartition = path.getTotalSpace(); this.freeSpaceToKeep = (long) ((freeSpacePercent * totalSpaceInPartition) / 100); log.info( "SegmentLocation[%s] will try and maintain [%d:%d] free space while loading segments.", path, freeSpaceToKeep, totalSpaceInPartition ); } else { this.freeSpaceToKeep = 0; } } public File getPath() { return path; } /** * Remove a segment file from this location. The given file argument must be a file rather than directory. */ public synchronized void removeFile(File file) { if (files.remove(file)) { currSizeBytes -= FileUtils.sizeOf(file); } else { log.warn("File[%s] is not found under this location[%s]", file, path); } } /** * Remove a segment dir from this location. The segment size is subtracted from currSizeBytes. */ public synchronized void removeSegmentDir(File segmentDir, DataSegment segment) { if (files.remove(segmentDir)) { currSizeBytes -= segment.getSize(); } else { log.warn("SegmentDir[%s] is not found under this location[%s]", segmentDir, path); } } /** * Reserves space to store the given segment. The segment size is added to currSizeBytes. * If it succeeds, it returns a file for the given segmentDir in this storage location. Returns null otherwise. */ @Nullable public synchronized File reserve(String segmentDir, DataSegment segment) { return reserve(segmentDir, segment.getId().toString(), segment.getSize()); } public synchronized boolean isReserved(String segmentDir) { return files.contains(segmentDirectoryAsFile(segmentDir)); } public File segmentDirectoryAsFile(String segmentDir) { return new File(path, segmentDir); //lgtm [java/path-injection] } /** * Reserves space to store the given segment, only if it has not been done already. This can be used * when segment is already downloaded on the disk. Unlike {@link #reserve(String, DataSegment)}, this function * skips the check on disk availability. We also account for segment usage even if available size dips below 0. * Such a situation indicates a configuration problem or a bug and we don't let segment loading fail because * of this. */ public synchronized void maybeReserve(String segmentFilePathToAdd, DataSegment segment) { final File segmentFileToAdd = new File(path, segmentFilePathToAdd); if (files.contains(segmentFileToAdd)) { // Already reserved return; } files.add(segmentFileToAdd); currSizeBytes += segment.getSize(); if (availableSizeBytes() < 0) { log.makeAlert( "storage[%s:%,d] has more segments than it is allowed. Currently loading Segment[%s:%,d]. Please increase druid.segmentCache.locations maxSize param", getPath(), availableSizeBytes(), segment.getId(), segment.getSize() ).emit(); } } /** * Reserves space to store the given segment. * If it succeeds, it returns a file for the given segmentFilePathToAdd in this storage location. * Returns null otherwise. */ @Nullable public synchronized File reserve(String segmentFilePathToAdd, String segmentId, long segmentSize) { final File segmentFileToAdd = new File(path, segmentFilePathToAdd); if (files.contains(segmentFileToAdd)) { return null; } if (canHandle(segmentId, segmentSize)) { files.add(segmentFileToAdd); currSizeBytes += segmentSize; return segmentFileToAdd; } else { return null; } } public synchronized boolean release(String segmentFilePath, long segmentSize) { final File segmentFile = new File(path, segmentFilePath); if (files.remove(segmentFile)) { currSizeBytes -= segmentSize; return true; } return false; } /** * This method is only package-private to use it in unit tests. Production code must not call this method directly. * Use {@link #reserve} instead. */ @VisibleForTesting @GuardedBy("this") boolean canHandle(String segmentId, long segmentSize) { if (availableSizeBytes() < segmentSize) { log.warn( "Segment[%s:%,d] too large for storage[%s:%,d]. Check your druid.segmentCache.locations maxSize param", segmentId, segmentSize, getPath(), availableSizeBytes() ); return false; } if (freeSpaceToKeep > 0) { long currFreeSpace = path.getFreeSpace(); if ((freeSpaceToKeep + segmentSize) > currFreeSpace) { log.warn( "Segment[%s:%,d] too large for storage[%s:%,d] to maintain suggested freeSpace[%d], current freeSpace is [%d].", segmentId, segmentSize, getPath(), availableSizeBytes(), freeSpaceToKeep, currFreeSpace ); return false; } } return true; } public synchronized long availableSizeBytes() { return maxSizeBytes - currSizeBytes; } public synchronized long currSizeBytes() { return currSizeBytes; } @VisibleForTesting synchronized boolean contains(String relativePath) { final File segmentFileToAdd = new File(path, relativePath); return files.contains(segmentFileToAdd); } }
2,608
892
<gh_stars>100-1000 { "schema_version": "1.2.0", "id": "GHSA-gj8g-jfxx-vxc7", "modified": "2022-02-12T00:01:02Z", "published": "2022-02-10T00:00:31Z", "aliases": [ "CVE-2021-37852" ], "details": "ESET products for Windows allows untrusted process to impersonate the client of a pipe, which can be leveraged by attacker to escalate privileges in the context of NT AUTHORITY\\SYSTEM.", "severity": [ ], "affected": [ ], "references": [ { "type": "ADVISORY", "url": "https://nvd.nist.gov/vuln/detail/CVE-2021-37852" }, { "type": "WEB", "url": "https://support.eset.com/en/ca8223-local-privilege-escalation-vulnerability-fixed-in-eset-products-for-windows" }, { "type": "WEB", "url": "https://www.zerodayinitiative.com/advisories/ZDI-22-148/" } ], "database_specific": { "cwe_ids": [ "CWE-863" ], "severity": "HIGH", "github_reviewed": false } }
435
892
{ "schema_version": "1.2.0", "id": "GHSA-629c-fg8h-8j2m", "modified": "2022-05-02T03:16:58Z", "published": "2022-05-02T03:16:58Z", "aliases": [ "CVE-2009-0608" ], "details": "Integer overflow in the showLog function in fake_log_device.c in liblog in Open Handset Alliance Android 1.0 allows attackers to trigger a buffer overflow and possibly have unspecified other impact by sending a large number of input lines.", "severity": [ ], "affected": [ ], "references": [ { "type": "ADVISORY", "url": "https://nvd.nist.gov/vuln/detail/CVE-2009-0608" }, { "type": "WEB", "url": "https://exchange.xforce.ibmcloud.com/vulnerabilities/48842" }, { "type": "WEB", "url": "http://www.securityfocus.com/archive/1/500753/100/0/threaded" }, { "type": "WEB", "url": "http://www.securityfocus.com/bid/33695" } ], "database_specific": { "cwe_ids": [ ], "severity": "HIGH", "github_reviewed": false } }
448
1,808
#include "QtWrapper.h" QtWrapper::QtWrapper( int updateRate_Hz, int display, int pixelDecimation, int cropLeft, int cropRight, int cropTop, int cropBottom ) : _timer(this), _grabber(display, cropLeft, cropRight, cropTop, cropBottom) { _grabber.setFramerate(updateRate_Hz); _grabber.setPixelDecimation(pixelDecimation); _timer.setTimerType(Qt::PreciseTimer); _timer.setSingleShot(false); _timer.setInterval(_grabber.getUpdateInterval()); // Connect capturing to the timeout signal of the timer connect(&_timer, SIGNAL(timeout()), this, SLOT(capture())); } const Image<ColorRgb> & QtWrapper::getScreenshot() { _grabber.grabFrame(_screenshot); return _screenshot; } void QtWrapper::start() { _timer.start(); } void QtWrapper::stop() { _timer.stop(); } bool QtWrapper::displayInit() { return _grabber.setupDisplay(); } void QtWrapper::capture() { if(unsigned(_grabber.getImageWidth()) != unsigned(_screenshot.width()) || unsigned(_grabber.getImageHeight()) != unsigned(_screenshot.height())) _screenshot.resize(_grabber.getImageWidth(),_grabber.getImageHeight()); _grabber.grabFrame(_screenshot); emit sig_screenshot(_screenshot); } void QtWrapper::setVideoMode(VideoMode mode) { _grabber.setVideoMode(mode); }
469
1,350
<gh_stars>1000+ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. package com.azure.spring.autoconfigure.b2c; import org.springframework.context.annotation.Bean; import org.springframework.security.config.annotation.web.builders.HttpSecurity; import org.springframework.security.config.annotation.web.configuration.EnableWebSecurity; import org.springframework.security.config.annotation.web.configuration.WebSecurityConfigurerAdapter; import org.springframework.security.oauth2.client.OAuth2AuthorizedClientManager; import org.springframework.security.oauth2.client.web.reactive.function.client.ServletOAuth2AuthorizedClientExchangeFilterFunction; import org.springframework.web.reactive.function.client.WebClient; @EnableWebSecurity public class WebappAccessResourceConfiguration extends WebSecurityConfigurerAdapter { private final AADB2COidcLoginConfigurer configurer; public WebappAccessResourceConfiguration(AADB2COidcLoginConfigurer configurer) { this.configurer = configurer; } @Override protected void configure(HttpSecurity http) throws Exception { // @formatter:off http.authorizeRequests() .anyRequest().authenticated() .and() .apply(configurer); // @formatter:on } @Bean public WebClient webClient(OAuth2AuthorizedClientManager oAuth2AuthorizedClientManager) { ServletOAuth2AuthorizedClientExchangeFilterFunction function = new ServletOAuth2AuthorizedClientExchangeFilterFunction(oAuth2AuthorizedClientManager); return WebClient.builder() .apply(function.oauth2Configuration()) .build(); } }
589
435
{ "copyright_text": null, "description": "", "duration": 1497, "language": "eng", "recorded": "2020-07-08", "related_urls": [ { "label": "Conference schedule", "url": "https://www.scipy2020.scipy.org/schedule" } ], "speakers": [ "<NAME>" ], "tags": [], "thumbnail_url": "https://i.ytimg.com/vi_webp/CbH9SVrPSUA/maxresdefault.webp", "title": "Treating Gridded Geospatial Data as Point Data to Simplify Analytics", "videos": [ { "type": "youtube", "url": "https://www.youtube.com/watch?v=CbH9SVrPSUA" } ] }
263
2,650
<filename>acme/agents/jax/ail/losses_test.py # Copyright 2018 DeepMind Technologies Limited. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for the AIL discriminator losses.""" from absl.testing import absltest from acme import types from acme.agents.jax.ail import losses from acme.jax import networks as networks_lib import jax from jax import test_util as jtu import jax.numpy as jnp import tree class AilLossTest(jtu.JaxTestCase): def test_gradient_penalty(self): def dummy_discriminator( transition: types.Transition) -> networks_lib.Logits: return transition.observation + jnp.square(transition.action) zero_transition = types.Transition(0., 0., 0., 0., 0.) zero_transition = tree.map_structure(lambda x: jnp.expand_dims(x, axis=0), zero_transition) self.assertEqual( losses._compute_gradient_penalty(zero_transition, dummy_discriminator, 0.), 1**2 + 0**2) one_transition = types.Transition(1., 1., 0., 0., 0.) one_transition = tree.map_structure(lambda x: jnp.expand_dims(x, axis=0), one_transition) self.assertEqual( losses._compute_gradient_penalty(one_transition, dummy_discriminator, 0.), 1**2 + 2**2) def test_pugail(self): def dummy_discriminator( state: losses.State, transition: types.Transition) -> losses.DiscriminatorOutput: return transition.observation, state zero_transition = types.Transition(.1, 0., 0., 0., 0.) zero_transition = tree.map_structure(lambda x: jnp.expand_dims(x, axis=0), zero_transition) one_transition = types.Transition(1., 0., 0., 0., 0.) one_transition = tree.map_structure(lambda x: jnp.expand_dims(x, axis=0), one_transition) prior = .7 loss_fn = losses.pugail_loss( positive_class_prior=prior, entropy_coefficient=0.) loss, _ = loss_fn(dummy_discriminator, {}, one_transition, zero_transition, ()) d_one = jax.nn.sigmoid(dummy_discriminator({}, one_transition)[0]) d_zero = jax.nn.sigmoid(dummy_discriminator({}, zero_transition)[0]) expected_loss = -prior * jnp.log( d_one) + -jnp.log(1. - d_zero) - prior * -jnp.log(1 - d_one) self.assertAlmostEqual(loss, expected_loss, places=6) if __name__ == '__main__': absltest.main()
1,280
4,174
import os from time import sleep import logging import records from prometheus_client import CollectorRegistry, start_http_server, ProcessCollector from explorer_python_api.ExplorerDumper import ExplorerDumper EXPORTER_PORT = 7001 POLL_TIME = 20 # Slot duration def explorerDumperInit(logger): metrics_registry = CollectorRegistry() start_http_server(EXPORTER_PORT, registry=metrics_registry) dbuser = os.environ.get('DBUSER', 'explorer_python_api') dbname = os.environ.get('DBNAME', 'explorer_python_api') epoch_slots = os.environ.get('EPOCHSLOTS', '21600') addr_max_len = os.environ.get('ADDRMAXLEN', '200') explorer_url = os.environ.get('EXPLORERURL', 'http://localhost:8100') dbsockpath = os.environ.get('DBSOCKPATH', '/tmp') # Postgres with Ident and Socket (ex: Nix deploy) # dbstring = f'postgres:///{dbname}?&host={dbsockpath}' # Postgres without Ident and Socket (ex: Nix deploy) dbstring = f'postgres:///{dbname}?user={dbuser}&host={dbsockpath}' # Postgres without socket spec (ex: Docker) # dbstring = f'postgres://localhost:5432/{dbname}?user=postgres&sslmode=disable' dbc = records.Database(dbstring) logger.info('Starting Explorer Dumper in %s seconds.', POLL_TIME) explorer_dumper = ExplorerDumper(logger, metrics_registry, dbc, explorer_url) try: epoch_slots = int(epoch_slots) except Exception as e: logger.exception(e) logger.info("The EPOCHSLOTS env parameter must be a positive integer: 0 < EPOCHSLOTS <= 21600. 21600 is default. Please update and restart the service.") sleep(20) exit(1) if epoch_slots < 1 or epoch_slots > 21600: logger.info("The EPOCHSLOTS env parameter must be a positive integer: 0 < EPOCHSLOTS <= 21600. 21600 is default. Please update and restart the service.") sleep(20) exit(1) elif epoch_slots != 60 and epoch_slots != 21600: logger.warning(f'EPOCHSLOTS of {epoch_slots} is not a standard deployment parameter. If this is not intended, adjust the EPOCHSLOTS env parameter and restart the service.') logger.info(f'Setting epoch slots to {epoch_slots}') explorer_dumper.epochSlots = epoch_slots try: addr_max_len = int(addr_max_len) except Exception as e: logger.exception(e) logger.info("The ADDRMAXLEN env parameter must be a positive integer: 200 <= ADDRMAXLEN <= 8000. 200 is default. Please update and restart the service.") sleep(20) exit(1) if addr_max_len < 200 or addr_max_len > 8000: logger.info("The ADDRMAXLEN env parameter must be a positive integer: 200 <= ADDRMAXLEN <= 8000. 200 is default. Please update and restart the service.") sleep(20) exit(1) logger.info(f'Setting address max length to {addr_max_len}. Larger addresses will be truncated.') explorer_dumper.addrMaxLen = addr_max_len return explorer_dumper def runDumper(): logger = initiateLogger() explorer_dumper = explorerDumperInit(logger) metrics_registry = explorer_dumper.getMetricsRegistry() process_collector = ProcessCollector(registry=metrics_registry) while True: process_collector.collect() logger.info("Preparing to run dumper") explorer_dumper.dump() logger.info('Dump completed. Restarting in %s seconds.', POLL_TIME) sleep(POLL_TIME) def initiateLogger(): # create logger logger = logging.getLogger('explorer-postgres-dumper') logger.setLevel(logging.DEBUG) # create console handler and set level to debug ch = logging.StreamHandler() ch.setLevel(logging.INFO) # create formatter formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') # add formatter to ch ch.setFormatter(formatter) # add ch to logger logger.addHandler(ch) # 'application' code return logger if __name__ == "__main__": runDumper()
1,522
3,084
#include "Mp_Precomp.h" #if WPP_SOFTWARE_TRACE #include "P2P_Build_PublicAction.tmh" #endif #include "P2P_Internal.h" #if (P2P_SUPPORT == 1) //----------------------------------------------------------------------------- // Local //----------------------------------------------------------------------------- static VOID p2p_build_GoNegReqIe( IN FRAME_BUF *pBuf, IN P2P_INFO *pP2PInfo ) { pu1Byte pLen = NULL; u1Byte grpCap = 0; P2P_WPS_ATTRIBUTES *pWps = &pP2PInfo->WpsAttributes; u1Byte intent = 0; if(NULL == (pLen = p2p_add_IEHdr(pBuf))) return; if(p2p_ActingAs_Go(pP2PInfo)) RT_TRACE_F(COMP_P2P, DBG_WARNING, ("Invalid role\n")); if(P2P_ADAPTER_OS_SUPPORT_P2P(pP2PInfo->pAdapter)) { grpCap = (u1Byte)pP2PInfo->NegotiationRequestGroupCapability; } else grpCap = pP2PInfo->GroupCapability; P2PAttr_Make_Capability(pBuf, pP2PInfo->DeviceCapability & ~P2P_DEV_CAP_CLIENT_DISCOVERABILITY, // this cap valid only in P2P Group Info and AssocReq grpCap); if(pP2PInfo->ConnectionContext.bProbePeerChannelList) intent = 0; // make sure that peer will become the GO, note that the tie breaker bit is 0 else intent = pP2PInfo->GOIntent; P2PAttr_Make_GoIntent(pBuf, intent); P2PAttr_Make_ConfigTimeout(pBuf, pP2PInfo->GOConfigurationTimeout, pP2PInfo->ClientConfigurationTimeout); P2PAttr_Make_ListenChannel(pBuf, pP2PInfo->CountryString, pP2PInfo->RegulatoryClass, pP2PInfo->ListenChannel); P2PAttr_Make_ExtListenTiming(pBuf, pP2PInfo->ExtListenTimingDuration, pP2PInfo->ExtListenTimingPeriod); P2PAttr_Make_IntendedIntfAddr(pBuf, pP2PInfo->InterfaceAddress); P2PAttr_Make_ChannelList(pBuf, pP2PInfo, &pP2PInfo->ChannelEntryList); P2PAttr_Make_DevInfo(pBuf, pP2PInfo->DeviceAddress, pWps->ConfigMethod, &pWps->PrimaryDeviceType, pWps->SecondaryDeviceTypeLength, pWps->SecondaryDeviceTypeList, pWps->DeviceNameLength, pWps->DeviceName); P2PAttr_Make_OperatingChannel(pBuf, pP2PInfo->CountryString, pP2PInfo->RegulatoryClass, pP2PInfo->OperatingChannel); //RT_TRACE_F(COMP_P2P, DBG_LOUD, ("Op Chnl: %d\n", pP2PInfo->OperatingChannel)); p2p_update_IeHdrLen(pBuf, pLen); return; } static VOID p2p_build_GoNegRspIe( IN FRAME_BUF *pBuf, IN P2P_INFO *pP2PInfo, IN const u1Byte *da ) { pu1Byte pLen = NULL; u1Byte grpCap = 0; PP2P_WPS_ATTRIBUTES pWps = &pP2PInfo->WpsAttributes; u1Byte status = 0; u1Byte intent = 0; pu1Byte pGrpDevAddr = NULL; pu1Byte pGrpSsidBuf = NULL; u1Byte grpSsidLen = 0; const P2P_DEV_LIST_ENTRY *pDev = NULL; if(NULL == (pLen = p2p_add_IEHdr(pBuf))) return; pDev = p2p_DevList_Find(&pP2PInfo->devList, da, P2P_DEV_TYPE_DEV); if(P2P_ADAPTER_OS_SUPPORT_P2P(pP2PInfo->pAdapter)) status = pP2PInfo->NegotiationResponseStatus; else status = pP2PInfo->Status; P2PAttr_Make_Status(pBuf, status); if(p2p_ActingAs_Go(pP2PInfo)) RT_TRACE_F(COMP_P2P, DBG_WARNING, ("Invalid role\n")); if(P2P_ADAPTER_OS_SUPPORT_P2P(pP2PInfo->pAdapter)) { grpCap = (u1Byte)pP2PInfo->NegotiationResponseGroupCapability; } else grpCap = pP2PInfo->GroupCapability; P2PAttr_Make_Capability(pBuf, pP2PInfo->DeviceCapability & ~P2P_DEV_CAP_CLIENT_DISCOVERABILITY, // this cap valid only in P2P Group Info and AssocReq grpCap); if(P2P_ADAPTER_OS_SUPPORT_P2P(pP2PInfo->pAdapter)) { intent = pP2PInfo->GOIntent; } else { // TODO: this is bad doing jobs other than making IE here!!! // The tie breaker bit in a GONRsp shall be toggled from the corresponding GONReq // pP2PInfo->GOIntent = (pP2PInfo->GOIntent | !(pP2PInfo->ConnectionContext.ConnectingDevice.GOIntent & 0x01)); intent = pP2PInfo->GOIntent; } P2PAttr_Make_GoIntent(pBuf, intent); P2PAttr_Make_ConfigTimeout(pBuf, pP2PInfo->GOConfigurationTimeout, pP2PInfo->ClientConfigurationTimeout); P2PAttr_Make_IntendedIntfAddr(pBuf, pP2PInfo->InterfaceAddress); if(pP2PInfo->ConnectionContext.bGoingToBeGO) P2PAttr_Make_ChannelList(pBuf, pP2PInfo, &pDev->p2p->commonChannels); else P2PAttr_Make_ChannelList(pBuf, pP2PInfo, &pP2PInfo->ChannelEntryList); P2PAttr_Make_DevInfo(pBuf, pP2PInfo->DeviceAddress, pWps->ConfigMethod, &pWps->PrimaryDeviceType, pWps->SecondaryDeviceTypeLength, pWps->SecondaryDeviceTypeList, pWps->DeviceNameLength, pWps->DeviceName); if(P2P_ADAPTER_OS_SUPPORT_P2P(pP2PInfo->pAdapter)) { if(pP2PInfo->bNegotiationResponseUseGroupID) { pGrpDevAddr = pP2PInfo->NegotiationResponseGroupIDDeviceAddress; pGrpSsidBuf = pP2PInfo->NegotiationResponseGroupIDSSID; grpSsidLen = pP2PInfo->uNegotiationResponseGroupIDSSIDLength; } } else { if(pP2PInfo->ConnectionContext.bGoingToBeGO && P2P_STATUS_SUCCESS == pP2PInfo->ConnectionContext.Status) { pGrpDevAddr = pP2PInfo->DeviceAddress; pGrpSsidBuf = pP2PInfo->SSIDBuf; grpSsidLen = pP2PInfo->SSIDLen; } } if(pGrpDevAddr) P2PAttr_Make_GroupId(pBuf, pGrpDevAddr, pGrpSsidBuf, grpSsidLen); // // Going to be GO: // The Operating Channel attribute shall indicate the intended Operating Channel // of the P2P Group. The channel indicated in the Operating Channel attribute shall // be one of the channels in the Channel List attribute in the GO Negotiation Response // frame. // // Going to be Client: // The Operating Channel attribute may indicate a preferred Operating Channel of the // P2P Group, or may be omitted. Any channel indicated in the Operating Channel attribute // shall be one of the channels in the Channel List attribute in the GO Negotiation Response frame. // P2PAttr_Make_OperatingChannel(pBuf, pP2PInfo->CountryString, pP2PInfo->RegulatoryClass, pP2PInfo->OperatingChannel); p2p_update_IeHdrLen(pBuf, pLen); return; } static VOID p2p_build_GoNegConfIe( IN FRAME_BUF *pBuf, IN P2P_INFO *pP2PInfo, IN const u1Byte *da ) { pu1Byte pLen = NULL; u1Byte grpCap = 0; u1Byte status = 0; pu1Byte pGrpDevAddr = NULL; pu1Byte pGrpSsidBuf = NULL; u1Byte grpSsidLen = 0; u1Byte opChannel = 0; const P2P_DEV_LIST_ENTRY *pDev = NULL; if(NULL == (pLen = p2p_add_IEHdr(pBuf))) return; pDev = p2p_DevList_Find(&pP2PInfo->devList, da, P2P_DEV_TYPE_DEV); if(P2P_ADAPTER_OS_SUPPORT_P2P(pP2PInfo->pAdapter)) status = pP2PInfo->NegotiationConfirmStatus; else status = pP2PInfo->Status; P2PAttr_Make_Status(pBuf, pP2PInfo->Status); if(p2p_ActingAs_Go(pP2PInfo)) RT_TRACE_F(COMP_P2P, DBG_WARNING, ("Invalid role\n")); if(P2P_ADAPTER_OS_SUPPORT_P2P(pP2PInfo->pAdapter)) { grpCap = (u1Byte)pP2PInfo->NegotiationConfirmGroupCapability; } else grpCap = pP2PInfo->GroupCapability; P2PAttr_Make_Capability(pBuf, pP2PInfo->DeviceCapability & ~P2P_DEV_CAP_CLIENT_DISCOVERABILITY, // this cap valid only in P2P Group Info and AssocReq grpCap); P2PAttr_Make_ChannelList(pBuf, pP2PInfo, &pDev->p2p->commonChannels); if(P2P_ADAPTER_OS_SUPPORT_P2P(pP2PInfo->pAdapter)) { if(pP2PInfo->bNegotiationConfirmUseGroupID) { pGrpDevAddr = pP2PInfo->NegotiationConfirmGroupIDDeviceAddress; pGrpSsidBuf = pP2PInfo->NegotiationConfirmGroupIDSSID; grpSsidLen = pP2PInfo->uNegotiationConfirmGroupIDSSIDLength; } } else { if(pP2PInfo->ConnectionContext.bGoingToBeGO && P2P_STATUS_SUCCESS == pP2PInfo->ConnectionContext.Status) { pGrpDevAddr = pP2PInfo->DeviceAddress; pGrpSsidBuf = pP2PInfo->SSIDBuf; grpSsidLen = pP2PInfo->SSIDLen; } } if(pGrpDevAddr) P2PAttr_Make_GroupId(pBuf, pGrpDevAddr, pGrpSsidBuf, grpSsidLen); // // Going to be GO: // The Operating Channel attribute shall indicate the intended Operating Channel of the // P2P Group. The channel indicated in the Operating Channel attribute shall be one of the // channels in the Channel List attribute in the GO Negotiation Confirmation frame. // // Going to be Client: // The Operating Channel attribute in the GO Negotiation Confirmation frame shall be the // Operating Channel attribute from the GO Negotiation Response frame. // if(pP2PInfo->ConnectionContext.bGoingToBeGO) { opChannel = pP2PInfo->OperatingChannel; } else {//Chnl from the GONRsp opChannel = pP2PInfo->ConnectionContext.ConnectingDevice.OperatingChannel; } P2PAttr_Make_OperatingChannel(pBuf, pP2PInfo->CountryString, pP2PInfo->RegulatoryClass, opChannel); p2p_update_IeHdrLen(pBuf, pLen); return; } static VOID p2p_build_InvitationReqIe( IN FRAME_BUF *pBuf, IN P2P_INFO *pP2PInfo, IN const u1Byte *da ) { pu1Byte pLen = NULL; PP2P_WPS_ATTRIBUTES pWps = &pP2PInfo->WpsAttributes; u1Byte goTimeout = 0; u1Byte cliTimeout = 0; u1Byte opChannel = 0; u1Byte invitFlag = 0; if(NULL == (pLen = p2p_add_IEHdr(pBuf))) return; if(pP2PInfo->InvitationContext.bPersistentInvitation) {// in persistent case, we have to tell the peer our conf time // No matter GO or client, we fill them with the max value goTimeout = pP2PInfo->GOConfigurationTimeout; cliTimeout = pP2PInfo->ClientConfigurationTimeout; } else {// otherwise, we're either a GO or client in operating phase, wo we don't have to fill the conf time // Ref Clause 3.1.5.1, a normal invitation always have them to be 0 goTimeout = 0; cliTimeout = 0; } P2PAttr_Make_ConfigTimeout(pBuf, goTimeout, cliTimeout); P2PAttr_Make_GroupBssid(pBuf, pP2PInfo->InvitationContext.GroupBssid); P2PAttr_Make_ChannelList(pBuf, pP2PInfo, &pP2PInfo->ChannelEntryList); P2PAttr_Make_DevInfo(pBuf, pP2PInfo->DeviceAddress, pWps->ConfigMethod, &pWps->PrimaryDeviceType, pWps->SecondaryDeviceTypeLength, pWps->SecondaryDeviceTypeList, pWps->DeviceNameLength, pWps->DeviceName); P2PAttr_Make_GroupId(pBuf, pP2PInfo->InvitationContext.GODeviceAddress, pP2PInfo->InvitationContext.SsidBuf, pP2PInfo->InvitationContext.SsidLen); // // GO: // the Operating Channel attribute indicates the Operating Channel of the P2P Group // // Peristent GO: // the Operating Channel attribute indicates the intended Operating Channel of the P2P Group // // Client: // an Operating Channel attribute shall also be present, indicating the Operating Channel of the P2P Group // // Persistent Client: // an Operating Channel attribute may be present to indicate a preferred Operating Channel // if(P2P_ADAPTER_OS_SUPPORT_P2P(pP2PInfo->pAdapter) && 0 != pP2PInfo->uInvitationRequestOperatingChannelNumber ) { opChannel = (u1Byte)pP2PInfo->uInvitationRequestOperatingChannelNumber; } else { if(pP2PInfo->InvitationContext.bPersistentInvitation) { if(0 == pP2PInfo->InvitationContext.OpChannel) {// follow op ch opChannel = pP2PInfo->OperatingChannel; } else {// forced op ch opChannel = pP2PInfo->InvitationContext.OpChannel; } } else { opChannel = pP2PInfo->InvitationContext.OpChannel; } } P2PAttr_Make_OperatingChannel(pBuf, pP2PInfo->CountryString, pP2PInfo->RegulatoryClass, opChannel); if(pP2PInfo->InvitationContext.bPersistentInvitation) SET_FLAG(invitFlag, P2P_INVITATION_FLAGS_TYPE); P2PAttr_Make_InvitationFlags(pBuf, invitFlag); p2p_update_IeHdrLen(pBuf, pLen); return; } static VOID p2p_build_InvitationRspIe( IN FRAME_BUF *pBuf, IN P2P_INFO *pP2PInfo, IN const u1Byte *da ) { pu1Byte pLen = NULL; u1Byte status = 0; u1Byte goTimeout = 0; u1Byte cliTimeout = 0; u1Byte opChannel = 0; const P2P_DEV_LIST_ENTRY *pDev = NULL; if(NULL == (pLen = p2p_add_IEHdr(pBuf))) return; pDev = p2p_DevList_Find(&pP2PInfo->devList, da, P2P_DEV_TYPE_DEV); if(P2P_ADAPTER_OS_SUPPORT_P2P(pP2PInfo->pAdapter)) status = pP2PInfo->InvitationResponseStatus; else status = pP2PInfo->Status; P2PAttr_Make_Status(pBuf, status); if(pP2PInfo->InvitationContext.bPersistentInvitation) {// in persistent case, we have to tell the peer our conf time // No matter GO or client, we fill them with the max value goTimeout = pP2PInfo->GOConfigurationTimeout; cliTimeout = pP2PInfo->ClientConfigurationTimeout; } else {// non persistent case goTimeout = 0; cliTimeout = 0; } P2PAttr_Make_ConfigTimeout(pBuf, goTimeout, cliTimeout); P2PAttr_Make_GroupBssid(pBuf, pP2PInfo->InvitationContext.GroupBssid); P2PAttr_Make_ChannelList(pBuf, pP2PInfo, &pDev->p2p->commonChannels); // // GO or Persisstent GO: // intended Operating Channel // // Client or Persistent Client (optional): // not specified in the spec // if(pP2PInfo->InvitationContext.bPersistentInvitation) { // // if GO => intended op chnl, shall be one of the channels in the channel list in the InvitationReq // if Client => intended op chnl. // if(P2P_CLIENT == pP2PInfo->InvitationContext.InvitorRole // peer is Cli && pP2PInfo->InvitationContext.bPersistentInvitation // persistent ) {// I'm persistent GO opChannel = pP2PInfo->InvitationContext.OpChannel; } else { opChannel = pP2PInfo->InvitationContext.InvitedDevice.OperatingChannel; } } else {// we are invited // // Not defined in the spec. // Follow the op channel of the peer. // opChannel = pP2PInfo->InvitationContext.InvitedDevice.OperatingChannel; } P2PAttr_Make_OperatingChannel(pBuf, pP2PInfo->CountryString, pP2PInfo->RegulatoryClass, opChannel); p2p_update_IeHdrLen(pBuf, pLen); return; } static VOID p2p_build_DevDiscReqIe( IN FRAME_BUF *pBuf, IN P2P_INFO *pP2PInfo ) { pu1Byte pLen = NULL; PP2P_DEVICE_DISCOVERABILITY_CONTEXT pDevDiscContext = &pP2PInfo->DeviceDiscoverabilityContext; if(NULL == (pLen = p2p_add_IEHdr(pBuf))) return; P2PAttr_Make_DevId(pBuf, pP2PInfo->DeviceDiscoverabilityContext.ClientDeviceAddress); P2PAttr_Make_GroupId(pBuf, pDevDiscContext->GODeviceAddr, pP2PInfo->SSIDBuf, pP2PInfo->SSIDLen); p2p_update_IeHdrLen(pBuf, pLen); return; } static VOID p2p_build_DevDiscRspIe( IN FRAME_BUF *pBuf, IN P2P_INFO *pP2PInfo, IN u1Byte status ) { pu1Byte pLen = NULL; if(NULL == (pLen = p2p_add_IEHdr(pBuf))) return; P2PAttr_Make_Status(pBuf, status); p2p_update_IeHdrLen(pBuf, pLen); return; } static VOID p2p_build_PdReqIe( IN FRAME_BUF *pBuf, IN P2P_INFO *pP2PInfo ) { pu1Byte pLen = NULL; PP2P_WPS_ATTRIBUTES pWps = &pP2PInfo->WpsAttributes; u1Byte grpCap = 0; pu1Byte pGrpDevAddr = NULL; pu1Byte pGrpSsidBuf = NULL; u1Byte grpSsidLen = 0; if(NULL == (pLen = p2p_add_IEHdr(pBuf))) return; if(p2p_ActingAs_Go(pP2PInfo)) RT_TRACE_F(COMP_P2P, DBG_WARNING, ("Invalid role\n")); if(P2P_ADAPTER_OS_SUPPORT_P2P(pP2PInfo->pAdapter)) { grpCap = (u1Byte)pP2PInfo->ProvisionRequestGroupCapability; } else grpCap = pP2PInfo->GroupCapability; P2PAttr_Make_Capability(pBuf, pP2PInfo->DeviceCapability & ~P2P_DEV_CAP_CLIENT_DISCOVERABILITY, // this cap valid only in P2P Group Info and AssocReq grpCap); P2PAttr_Make_DevInfo(pBuf, pP2PInfo->DeviceAddress, pWps->ConfigMethod, &pWps->PrimaryDeviceType, pWps->SecondaryDeviceTypeLength, pWps->SecondaryDeviceTypeList, pWps->DeviceNameLength, pWps->DeviceName); if(pP2PInfo->ProvisionDiscoveryContext.go) {// connecting to GO if(P2P_ADAPTER_OS_SUPPORT_P2P(pP2PInfo->pAdapter)) { if(pP2PInfo->bProvisionRequestUseGroupID) { pGrpDevAddr = pP2PInfo->ProvisionRequestGroupIDDeviceAddress; pGrpSsidBuf = pP2PInfo->ProvisionRequestGroupIDSSID; grpSsidLen = pP2PInfo->uProvisionRequestGroupIDSSIDLength; } } else { pGrpDevAddr = pP2PInfo->ProvisionDiscoveryContext.devAddr; pGrpSsidBuf = pP2PInfo->ProvisionDiscoveryContext.SsidBuf; grpSsidLen = pP2PInfo->ProvisionDiscoveryContext.SsidLen; } if(pGrpDevAddr) P2PAttr_Make_GroupId(pBuf, pGrpDevAddr, pGrpSsidBuf, grpSsidLen); } p2p_update_IeHdrLen(pBuf, pLen); return; } static VOID p2p_build_PdRspIe( IN FRAME_BUF *pBuf, IN P2P_INFO *pP2PInfo ) { /* Nothing to Append */ return; } static VOID p2p_add_WpsIeConfigMethods( IN FRAME_BUF *pBuf, IN u2Byte configMethods ) { pu1Byte pLen = NULL; FrameBuf_Add_u1(pBuf, (u1Byte)EID_Vendor); pLen = FrameBuf_Add(pBuf, 1); FrameBuf_Add_be_u4(pBuf, 0x0050F204); // Version FrameBuf_Add_be_u2(pBuf, P2P_WPS_ATTR_TAG_VERSION); FrameBuf_Add_be_u2(pBuf, 1); FrameBuf_Add_u1(pBuf, 0x10); // Config Method FrameBuf_Add_be_u2(pBuf, P2P_WPS_ATTR_TAG_CONFIG_METHODS); FrameBuf_Add_be_u2(pBuf, 2); FrameBuf_Add_be_u2(pBuf, configMethods); p2p_update_IeHdrLen(pBuf, pLen); return; } //----------------------------------------------------------------------------- // Exported //----------------------------------------------------------------------------- VOID p2p_add_P2PPublicActionHdr( IN FRAME_BUF *pBuf, IN u1Byte subtype, IN u1Byte dialogToken ) { RT_TRACE_F(COMP_P2P, pBuf->dbgLevel, ("token: %u\n", dialogToken)); FrameBuf_Add_u1(pBuf, WLAN_ACTION_PUBLIC); FrameBuf_Add_u1(pBuf, WLAN_PA_VENDOR_SPECIFIC); FrameBuf_Add_be_u4(pBuf, P2P_IE_VENDOR_TYPE); FrameBuf_Add_u1(pBuf, subtype); FrameBuf_Add_u1(pBuf, dialogToken); return; } VOID p2p_Construct_GoNegReq( IN P2P_INFO *pP2PInfo, IN FRAME_BUF *pBuf, IN const u1Byte *da, IN u1Byte dialogToken ) { FunctionIn(COMP_P2P); // MAC Header p2p_add_ActionFrameMacHdr(pBuf, da, pP2PInfo->DeviceAddress, da); // Action Header p2p_add_P2PPublicActionHdr(pBuf, P2P_GO_NEG_REQ, dialogToken); // P2P IE p2p_build_GoNegReqIe(pBuf, pP2PInfo); // Additional IE P2P_AddIe_Append(&pP2PInfo->AdditionalIEs, P2P_ADD_IE_GO_NEGOTIATION_REQUEST, pBuf); // WPS IE WPS_AppendElement(pP2PInfo->pAdapter, &pBuf->os, TRUE, WPS_INFO_PROBEREQ_IE); // WFD IE WFD_AppendP2pGoNegReqIEs(pP2PInfo->pAdapter, FrameBuf_Cap(pBuf), &pBuf->os); RT_TRACE(COMP_P2P, DBG_LOUD, ("%s(): intent = %u, tie breaker: %u, dialog token: %u\n", __FUNCTION__, pP2PInfo->GOIntent >> 1, pP2PInfo->GOIntent & 0x01, dialogToken)); FrameBuf_Dump(pBuf, 0, DBG_LOUD, __FUNCTION__); FunctionOut(COMP_P2P); return; } VOID p2p_Construct_GoNegRsp( IN P2P_INFO *pP2PInfo, IN u1Byte dialogToken, IN FRAME_BUF *pBuf, IN const u1Byte *da ) { FunctionIn(COMP_P2P); // MAC Header p2p_add_ActionFrameMacHdr(pBuf, da, pP2PInfo->DeviceAddress, pP2PInfo->DeviceAddress); // Action Header p2p_add_P2PPublicActionHdr(pBuf, P2P_GO_NEG_RSP, dialogToken); // P2P IE p2p_build_GoNegRspIe(pBuf, pP2PInfo, da); // Additional IE P2P_AddIe_Append(&pP2PInfo->AdditionalIEs, P2P_ADD_IE_GO_NEGOTIATION_RESPONSE, pBuf); // WPS IE WPS_AppendElement(pP2PInfo->pAdapter, &pBuf->os, TRUE, WPS_INFO_PROBERSP_IE); // WFD IE WFD_AppendP2pGoNegRspIEs(pP2PInfo->pAdapter, FrameBuf_Cap(pBuf), &pBuf->os); RT_TRACE(COMP_P2P, DBG_LOUD, ("%s(): intent = %u, tie breaker: %u, dialog token: %u\n", __FUNCTION__, pP2PInfo->GOIntent >> 1, pP2PInfo->GOIntent & 0x01, dialogToken)); FrameBuf_Dump(pBuf, 0, DBG_LOUD, __FUNCTION__); FunctionOut(COMP_P2P); return; } VOID p2p_Construct_GoNegConf( IN P2P_INFO *pP2PInfo, IN u1Byte dialogToken, IN FRAME_BUF *pBuf, IN const u1Byte *da ) { FunctionIn(COMP_P2P); // MAC Header p2p_add_ActionFrameMacHdr(pBuf, da, pP2PInfo->DeviceAddress, da); // Action Header p2p_add_P2PPublicActionHdr(pBuf, P2P_GO_NEG_CONF, dialogToken); // P2P IE p2p_build_GoNegConfIe(pBuf, pP2PInfo, da); // Additional IE P2P_AddIe_Append(&pP2PInfo->AdditionalIEs, P2P_ADD_IE_GO_NEGOTIATION_CONFIRM, pBuf); // WFD IE WFD_AppendP2pGoNegConfirmIEs(pP2PInfo->pAdapter, FrameBuf_Cap(pBuf), &pBuf->os); FrameBuf_Dump(pBuf, 0, DBG_LOUD, __FUNCTION__); FunctionOut(COMP_P2P); return; } VOID p2p_Construct_InvitationReq( IN P2P_INFO *pP2PInfo, IN FRAME_BUF *pBuf, IN const u1Byte *da, IN u1Byte dialogToken ) { FunctionIn(COMP_P2P); // MAC Header p2p_add_ActionFrameMacHdr(pBuf, da, pP2PInfo->DeviceAddress, da); // Action Header p2p_add_P2PPublicActionHdr(pBuf, P2P_INVITATION_REQ, dialogToken); // P2P IE p2p_build_InvitationReqIe(pBuf, pP2PInfo, da); // Additional IE P2P_AddIe_Append(&pP2PInfo->AdditionalIEs, P2P_ADD_IE_INVITATION_REQUEST, pBuf); /* Below use octet string ONLY */ { WFD_AppendP2pInvitationReqIEs(pP2PInfo->pAdapter, FrameBuf_Cap(pBuf), &pBuf->os); } FrameBuf_Dump(pBuf, 0, DBG_LOUD, __FUNCTION__); FunctionOut(COMP_P2P); return; } VOID p2p_Construct_InvitationRsp( IN P2P_INFO *pP2PInfo, IN u1Byte dialogToken, IN FRAME_BUF *pBuf, IN const u1Byte *da ) { // // Assume that pP2PInfo->Status has been set. // FunctionIn(COMP_P2P); // MAC Header p2p_add_ActionFrameMacHdr(pBuf, da, pP2PInfo->DeviceAddress, pP2PInfo->DeviceAddress); // Action Header p2p_add_P2PPublicActionHdr(pBuf, P2P_INVITATION_RSP, dialogToken); // P2P IE p2p_build_InvitationRspIe(pBuf, pP2PInfo, da); // Additional IE P2P_AddIe_Append(&pP2PInfo->AdditionalIEs, P2P_ADD_IE_INVITATION_RESPONSE, pBuf); // WFD IE WFD_AppendP2pInvitationRspIEs(pP2PInfo->pAdapter, FrameBuf_Cap(pBuf), &pBuf->os); FrameBuf_Dump(pBuf, 0, DBG_LOUD, __FUNCTION__); FunctionOut(COMP_P2P); return; } VOID p2p_Construct_DevDiscReq( IN P2P_INFO *pP2PInfo, IN u1Byte dialogToken, IN FRAME_BUF *pBuf, IN const u1Byte *da, IN const u1Byte *bssid ) { FunctionIn(COMP_P2P); // MAC Header p2p_add_ActionFrameMacHdr(pBuf, bssid, pP2PInfo->DeviceAddress, bssid); // Action Header p2p_add_P2PPublicActionHdr(pBuf, P2P_DEV_DISC_REQ, dialogToken); // P2P IE p2p_build_DevDiscReqIe(pBuf, pP2PInfo); FrameBuf_Dump(pBuf, 0, DBG_LOUD, __FUNCTION__); FunctionOut(COMP_P2P); return; } VOID p2p_Construct_DevDiscRsp( IN P2P_INFO *pP2PInfo, IN u1Byte dialogToken, IN FRAME_BUF *pBuf, IN const u1Byte *da, IN u1Byte status ) { FunctionIn(COMP_P2P); // MAC Header p2p_add_ActionFrameMacHdr(pBuf, da, pP2PInfo->DeviceAddress, pP2PInfo->DeviceAddress); // Action Header p2p_add_P2PPublicActionHdr(pBuf, P2P_DEV_DISC_RSP, dialogToken); // P2P IE p2p_build_DevDiscRspIe(pBuf, pP2PInfo, status); FrameBuf_Dump(pBuf, 0, DBG_LOUD, __FUNCTION__); FunctionOut(COMP_P2P); return; } VOID p2p_Construct_PDReq( IN P2P_INFO *pP2PInfo, IN FRAME_BUF *pBuf, IN const u1Byte *da, IN u1Byte dialogToken, IN u2Byte configMethod ) { FunctionIn(COMP_P2P); // MAC Header p2p_add_ActionFrameMacHdr(pBuf, da, pP2PInfo->DeviceAddress, da); // Action Header p2p_add_P2PPublicActionHdr(pBuf, P2P_PROV_DISC_REQ, dialogToken); // P2P IE p2p_build_PdReqIe(pBuf, pP2PInfo); // Additional IE if(0 == P2P_AddIe_Append(&pP2PInfo->AdditionalIEs, P2P_ADD_IE_PROVISION_DISCOVERY_REQUEST, pBuf)) { p2p_add_WpsIeConfigMethods(pBuf, configMethod); } // WFDS IE P2PSvc_MakePDReqIE(pP2PInfo->pP2PSvcInfo, pBuf); // WFD IE WFD_AppendP2pProvDiscoveryReqIEs(pP2PInfo->pAdapter, FrameBuf_Cap(pBuf), &pBuf->os); FrameBuf_Dump(pBuf, 0, DBG_LOUD, __FUNCTION__); FunctionOut(COMP_P2P); return; } VOID p2p_Construct_PDRsp( IN P2P_INFO *pP2PInfo, IN u1Byte dialogToken, IN OCTET_STRING *posP2PAttrs, IN u2Byte configMethod, IN FRAME_BUF *pBuf, IN const u1Byte *da ) { FunctionIn(COMP_P2P); // MAC Header p2p_add_ActionFrameMacHdr(pBuf, da, pP2PInfo->DeviceAddress, pP2PInfo->DeviceAddress); // Action Header p2p_add_P2PPublicActionHdr(pBuf, P2P_PROV_DISC_RSP, dialogToken); // P2P IE p2p_build_PdRspIe(pBuf, pP2PInfo); // Additional IE if(0 == P2P_AddIe_Append(&pP2PInfo->AdditionalIEs, P2P_ADD_IE_PROVISION_DISCOVERY_RESPONSE, pBuf)) { // TODO: we temporarilly accept all confing method in ProvisionDiscoveryReq p2p_add_WpsIeConfigMethods(pBuf, configMethod); } // WFDS IE P2PSvc_MakePDRspIE(pP2PInfo->pP2PSvcInfo, posP2PAttrs, pBuf); // WFD IE WFD_AppendP2pProvDiscoveryRspIEs(pP2PInfo->pAdapter, FrameBuf_Cap(pBuf), &pBuf->os); FrameBuf_Dump(pBuf, 0, DBG_LOUD, __FUNCTION__); FunctionOut(COMP_P2P); return; } VOID p2p_build_FakeInvitationRspIe( IN FRAME_BUF *pBuf, IN P2P_INFO *pP2PInfo, IN const u1Byte *da ) { p2p_build_InvitationRspIe(pBuf, pP2PInfo, da); return; } // // Description: // Construct fake provision discovery response frame by the device which is considered as the // respondor and fill the content by the previous received information. // Arguments: // [in] pP2PInfo - // P2P information context. // [in] pRspDev - // The device which sends the provision discovery response frame. // [in] da - // The destination which the response frame is sent to. // [out] -pBuf // The context of FRAME_BUF to put the frame. // Return: // Return RT_STATUS_SUCCESS if the construction of this response frame succeeds. // By Bruce, 2015-02-17. // RT_STATUS p2p_Construct_FakePDRsp( IN P2P_INFO *pP2PInfo, IN P2P_DEV_LIST_ENTRY *pRspDev, IN const u1Byte *da, OUT FRAME_BUF *pBuf ) { RT_STATUS rtStatus = RT_STATUS_SUCCESS; pu1Byte pPdReqFrame = NULL; OCTET_STRING osPdReq, osTmpIe; pu1Byte pPdRspTa = NULL;; FunctionIn(COMP_P2P); do { if(!pRspDev) { RT_ASSERT(FALSE, ("pRspDev = NULL\n")); rtStatus = RT_STATUS_INVALID_PARAMETER; break; } if(!pRspDev->txFrames[P2P_FID_PD_REQ]) { RT_ASSERT(FALSE, ("pRspDev->txFrames[P2P_FID_PD_REQ] = NULL\n")); rtStatus = RT_STATUS_INVALID_DATA; break; } if(0 == pRspDev->txFrames[P2P_FID_PD_REQ]->frameLen) { RT_TRACE_F(COMP_P2P, DBG_WARNING, ("pRspDev->txFrames[P2P_FID_PD_REQ].frameLen = 0\n")); rtStatus = RT_STATUS_INVALID_DATA; break; } FillOctetString(osPdReq, pRspDev->txFrames[P2P_FID_PD_REQ]->frame, pRspDev->txFrames[P2P_FID_PD_REQ]->frameLen); pPdReqFrame = pRspDev->txFrames[P2P_FID_PD_REQ]->frame; pPdRspTa = Frame_pDaddr(osPdReq); // MAC Header p2p_add_ActionFrameMacHdr(pBuf, da, pPdRspTa, pPdRspTa); // Action Header p2p_add_P2PPublicActionHdr(pBuf, P2P_PROV_DISC_RSP, pRspDev->txFrames[P2P_FID_PD_REQ]->token); // WPS IE, getting from the original PD request osTmpIe = PacketGetElement(osPdReq, EID_Vendor, OUI_SUB_SimpleConfig, OUI_SUB_DONT_CARE); if(osTmpIe.Length > 0) { PacketMakeElement(&(pBuf->os), EID_Vendor, osTmpIe); } // WFD IE if(pRspDev->rxFrames[P2P_FID_PROBE_RSP]->frameLen > 0) { osTmpIe = PacketGetElement(osPdReq, EID_Vendor, OUI_SUB_WIFI_DISPLAY, OUI_SUB_DONT_CARE); if(osTmpIe.Length > 0) { PacketMakeElement(&(pBuf->os), EID_Vendor, osTmpIe); } } FrameBuf_Dump(pBuf, 0, DBG_LOUD, __FUNCTION__); }while(FALSE); FunctionOut(COMP_P2P); return rtStatus; } #endif
13,908
589
/* * Copyright (c) 2019 The sky Authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.sky.xposed.rimet.plugin.main; import android.app.Activity; import android.os.Bundle; import android.view.View; import android.view.ViewGroup; import com.sky.xposed.common.ui.view.SimpleItemView; import com.sky.xposed.common.util.ResourceUtil; import com.sky.xposed.rimet.BuildConfig; import com.sky.xposed.rimet.Constant; import com.sky.xposed.rimet.data.model.PluginInfo; import com.sky.xposed.rimet.plugin.base.BasePlugin; import com.sky.xposed.rimet.plugin.interfaces.XPluginManager; /** * Created by sky on 2018/12/30. */ public class SettingsPlugin extends BasePlugin { public SettingsPlugin(XPluginManager pluginManager) { super(pluginManager); } @Override public Info getInfo() { return new PluginInfo(Constant.Plugin.MAIN_SETTINGS, "设置"); } @Override public void onHandleLoadPackage() { findMethod( "com.alibaba.android.user.settings.activity.NewSettingActivity", "onCreate", Bundle.class) .after(param -> onHnalderSettings((Activity) param.thisObject)); findMethod( "com.alibaba.android.user.settings.activity.UserSettingsActivity", "onCreate", Bundle.class) .after(param -> onHnalderSettings((Activity) param.thisObject)); } private void onHnalderSettings(Activity activity) { View view = activity.findViewById(ResourceUtil.getId(activity, "setting_msg_notice")); ViewGroup viewGroup = (ViewGroup) view.getParent(); final int index = viewGroup.indexOfChild(view); SimpleItemView viewDing = new SimpleItemView(activity); viewDing.getNameView().setTextSize(17); viewDing.setName(Constant.Name.TITLE); viewDing.setExtend("v" + BuildConfig.VERSION_NAME); viewDing.setOnClickListener(v -> { // 打开设置 openSettings(activity); }); viewGroup.addView(viewDing, index); } @Override public void openSettings(Activity activity) { // 打开插件设置 getPluginManager().getXPluginById(Constant.Plugin.DING_DING).openSettings(activity); // PluginSettingsDialog dialog = new PluginSettingsDialog(); // dialog.show(activity.getFragmentManager(), "settings"); } }
1,120
897
<gh_stars>100-1000 import java.util.*; class Min_Element_Rotated_array { public static void main(String args[]) { Scanner sc = new Scanner(System.in); System.out.println("Enter the elements"); int min = sc.nextInt(); while (sc.hasNextInt()) { int num = sc.nextInt(); if (num < min) min = num; } System.out.println(" Minimum element in the list : " + min); } } /* Sample Input and Output : * Minimum element in the list : 2 Enter the elements 3 4 5 1 2 Minimum element in the list : 1 Time Complexity : O(n) Space Complexity : O(1) */
268
701
""" An example of using ArUco markers with OpenCV. """ import cv2 import sys import cv2.aruco as aruco import numpy as np from skimage.data import astronaut device = 0 # Back camera try: device = int(sys.argv[1]) # 1 for front camera except IndexError: pass cap = cv2.VideoCapture(device) while cap.isOpened(): # Capture frame-by-frame ret, frame = cap.read() # Check if frame is not empty if not ret: continue # Auto rotate camera frame = cv2.autorotate(frame, device) # Convert from BGR to RGB frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250) parameters = aruco.DetectorParameters_create() corners, ids, _ = aruco.detectMarkers(frame, aruco_dict, parameters=parameters) if np.all(ids != None): x1 = (corners[0][0][0][0], corners[0][0][0][1]) x2 = (corners[0][0][1][0], corners[0][0][1][1]) x3 = (corners[0][0][2][0], corners[0][0][2][1]) x4 = (corners[0][0][3][0], corners[0][0][3][1]) im_dst = frame im_src = astronaut() size = im_src.shape pts_dst = np.array([x1, x2, x3, x4]) pts_src = np.array( [ [0,0], [size[1] - 1, 0], [size[1] - 1, size[0] -1], [0, size[0] - 1 ] ],dtype=float ); h, status = cv2.findHomography(pts_src, pts_dst) temp = cv2.warpPerspective(im_src, h, (im_dst.shape[1], im_dst.shape[0])) cv2.fillConvexPoly(im_dst, pts_dst.astype(int), 0, 16); frame = im_dst + temp # Display the resulting frame cv2.imshow('frame', frame)
926
4,036
<filename>cpp/ql/test/library-tests/pointsto/basic/test.cpp typedef struct { int data[10]; } MyStruct; MyStruct a, b, c, d; MyStruct *p1, *p2, *p3; MyStruct **pp1, **pp2; void use(MyStruct v, ...); void test(int cond) { if (cond) { p1 = &a; } else { p1 = &b; } p2 = p1; // p1, p2 could point to a or b p3 = &c; pp1 = &p3; p3 = &d; pp2 = &p3; // pp1, pp2 point to p3; p3 could point to c or d (at different times) use(a, b, c, d, p1, p2, p3, pp1, pp2); }
243
378
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.openejb.jee.jpa; import org.apache.openejb.jee.Keyable; import java.util.List; public interface RelationField extends Keyable { List<JoinColumn> getJoinColumn(); JoinTable getJoinTable(); void setJoinTable(JoinTable value); CascadeType getCascade(); void setCascade(CascadeType value); FetchType getFetch(); void setFetch(FetchType value); String getMappedBy(); void setMappedBy(String value); String getName(); void setName(String value); String getTargetEntity(); void setTargetEntity(String value); /** * This is only used for xml converters and will normally return null. * Gets the field on the target entity for this relationship. * * @return the field on the target entity for this relationship. */ RelationField getRelatedField(); /** * Gets the field on the target entity for this relationship. * * @param value field on the target entity for this relationship. */ void setRelatedField(RelationField value); /** * This is only used for xml converters and will normally return false. * A true value indicates that this field was generated for CMR back references. * * @return true if this field was generated for CMR back references. */ boolean isSyntheticField(); /** * This is only used for xml converters and will normally return false. * A true value indicates that this field was generated for CMR back references. * * @return true if this field was generated for CMR back references. */ void setSyntheticField(boolean syntheticField); }
732
1,380
<reponame>kean/DFImageManager // The MIT License (MIT) // // Copyright (c) 2015 <NAME> (github.com/kean). #import "DFImageCaching.h" #import <Foundation/Foundation.h> /*! Memory cache implementation built on top of NSCache. Adds cached entries expiration, automatic cleanup on memory warnings and more. */ @interface DFImageCache : NSObject <DFImageCaching> /*! Returns the cache that the DFImageCache was initialized with. */ @property (nonnull, nonatomic, readonly) NSCache *cache; /*! Initializes image cache with an instance of NSCache class. */ - (nonnull instancetype)initWithCache:(nonnull NSCache *)cache NS_DESIGNATED_INITIALIZER; /*! Returns cost for a given cached image response. */ - (NSUInteger)costForImageResponse:(nonnull DFCachedImageResponse *)cachedResponse; @end
241
428
/** * Copyright 2008 - 2015 The Loon Game Engine Authors * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. * * @project loon * @author cping * @email:<EMAIL> * @version 0.5 */ package loon.action.sprite.effect; import loon.LSystem; import loon.action.sprite.SpriteBatch; import loon.canvas.Image; import loon.canvas.LColor; import loon.geom.Vector2f; import loon.utils.MathUtils; import loon.utils.TArray; import loon.utils.timer.LTimer; /** * 在指定范围内创造随机数量闪电(就是野比饭初次变超赛2那种效果) */ public class LightningRandom implements ILightning { private LTimer timer = new LTimer(0); private TArray<Vector2f> particles = new TArray<Vector2f>(); private TArray<LightningBranch> bolts = new TArray<LightningBranch>(); private float hue = 4.5f; private float[] noise = null; private LColor color = null; private boolean closed; public LightningRandom(int count, Vector2f source, Vector2f dest) { this(count, source.x, source.y, dest.x, dest.y, null); } public LightningRandom(int count, Vector2f source, Vector2f dest, LColor c) { this(count, source.x, source.y, dest.x, dest.y, c); } public LightningRandom(int count, float x, float y, float w, float h) { this(count, x, y, w, h, null); } public LightningRandom(int count, float x, float y, float w, float h, LColor c) { this.color = c; for (int i = 0; i < count; i++) { Vector2f v = new Vector2f(MathUtils.random(x, w), MathUtils.random(y, h)); particles.add(v); } noise = new float[count]; for (int i = 0; i < count; i++) { noise[i] = MathUtils.random(); } } public LightningRandom(Image image, Vector2f pos, LColor c) { this(image, pos.x, pos.y, c); } public LightningRandom(Image image, float x, float y, LColor c) { this(createParticle(image), x, y, c); } public LightningRandom(TArray<Vector2f> pars, Vector2f pos, LColor c) { this(pars, pos.x, pos.y, c); } public LightningRandom(TArray<Vector2f> pars, float x, float y, LColor c) { int len = 35; for (int i = 0; i < pars.size; i++) { pars.get(i).addSelf(x, y); } this.particles.addAll(pars); this.color = c; this.noise = new float[len]; for (int i = 0; i < len; i++) { noise[i] = MathUtils.random(); } } final static TArray<Vector2f> createParticle(Image img) { Vector2f size = Vector2f.at(img.getWidth() / 2, img.getHeight() / 2); final int interval = 2; final float scale = 1.5f; TArray<Vector2f> points = img.getPoints(size, interval, scale); return points; } public void draw(SpriteBatch batch, float x, float y) { for (LightningBranch bolt : bolts) { bolt.draw(batch, x, y); } } public void setDelay(long delay) { timer.setDelay(delay); } public long getDelay() { return timer.getDelay(); } public void update(long elapsedTime) { if (timer.action(elapsedTime)) { bolts.clear(); hue += 0.01f; if (hue >= 6) { hue -= 6; } int size = LSystem.viewSize.getWidth(); for (Vector2f particle : particles) { float x = particle.x / size; int boltChance = (int) (20 * MathUtils.sin(3 * hue * MathUtils.PI - x + 1 * getNoise(hue + x)) + 52); if (MathUtils.nextInt(boltChance) == 0) { Vector2f nearestParticle = Vector2f.ZERO(); float nearestDist = Float.MAX_VALUE; for (int i = 0; i < 50; i++) { Vector2f other = particles.get(MathUtils.nextInt(particles.size)); float dist = Vector2f.dst(particle, other); if (dist < nearestDist && dist > 10 * 10) { nearestDist = dist; nearestParticle = other; } } if (nearestDist < 200 * 200 && nearestDist > 10 * 10) { bolts.add(new LightningBranch(particle, nearestParticle, color == null ? LColor.hsvToColor(hue, 0.5f, 1f) : color)); } } } } } private final float getNoise(float x) { x = MathUtils.max(x, 0); int length = noise.length; int i = ((int) (length * x)) % length; int j = (i + 1) % length; return MathUtils.smoothStep(noise[i], noise[j], x - (int) x); } @Override public boolean isComplete() { return false; } public boolean isClosed() { return closed; } @Override public void close() { if (particles != null) { particles.clear(); } if (bolts != null) { bolts.clear(); } closed = true; } }
1,918
627
/* Copyright 2020 The Chromium OS Authors. All rights reserved. * Use of this source code is governed by a BSD-style license that can be * found in the LICENSE file. */ /* Trogdor baseboard-specific configuration */ #include "charger.h" #include "driver/charger/isl923x.h" #include "i2c.h" #include "power.h" /* Wake-up pins for hibernate */ const enum gpio_signal hibernate_wake_pins[] = { GPIO_LID_OPEN, GPIO_AC_PRESENT, GPIO_POWER_BUTTON_L, GPIO_EC_RST_ODL, }; const int hibernate_wake_pins_used = ARRAY_SIZE(hibernate_wake_pins); /* Power signal list. Must match order of enum power_signal. */ const struct power_signal_info power_signal_list[] = { [SC7180_AP_RST_ASSERTED] = { GPIO_AP_RST_L, POWER_SIGNAL_ACTIVE_LOW | POWER_SIGNAL_DISABLE_AT_BOOT, "AP_RST_ASSERTED"}, [SC7180_PS_HOLD] = { GPIO_PS_HOLD, POWER_SIGNAL_ACTIVE_HIGH, "PS_HOLD"}, [SC7180_PMIC_FAULT_L] = { GPIO_PMIC_FAULT_L, POWER_SIGNAL_ACTIVE_HIGH | POWER_SIGNAL_DISABLE_AT_BOOT, "PMIC_FAULT_L"}, [SC7180_POWER_GOOD] = { GPIO_POWER_GOOD, POWER_SIGNAL_ACTIVE_HIGH, "POWER_GOOD"}, [SC7180_WARM_RESET] = { GPIO_WARM_RESET_L, POWER_SIGNAL_ACTIVE_HIGH, "WARM_RESET_L"}, [SC7180_AP_SUSPEND] = { GPIO_AP_SUSPEND, POWER_SIGNAL_ACTIVE_HIGH, "AP_SUSPEND"}, [SC7180_DEPRECATED_AP_RST_REQ] = { GPIO_DEPRECATED_AP_RST_REQ, POWER_SIGNAL_ACTIVE_HIGH, "DEPRECATED_AP_RST_REQ"}, }; BUILD_ASSERT(ARRAY_SIZE(power_signal_list) == POWER_SIGNAL_COUNT); const struct charger_config_t chg_chips[] = { { .i2c_port = I2C_PORT_CHARGER, .i2c_addr_flags = ISL923X_ADDR_FLAGS, .drv = &isl923x_drv, }, }; int board_allow_i2c_passthru(int port) { return (port == I2C_PORT_VIRTUAL_BATTERY); }
819
697
<filename>book/swidgets/java/examples/clearfield.java import javax.swing.*; import java.awt.FlowLayout; import swidgets.*; import nz.sodium.*; public class clearfield { public static void main(String[] args) { JFrame frame = new JFrame("clearfield"); frame.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE); frame.setLayout(new FlowLayout()); SButton clear = new SButton("Clear"); Stream<String> sClearIt = clear.sClicked.map(u -> ""); STextField text = new STextField(sClearIt, "Hello"); frame.add(text); frame.add(clear); frame.setSize(400, 160); frame.setVisible(true); } }
275
877
package org.checkerframework.checker.testchecker.disbaruse; import org.checkerframework.common.basetype.BaseTypeChecker; /** * A checker that issues a "disbar.use" error at any use of fields, methods or parameters whose type * is {@code @DisbarUse}. */ public class DisbarUseChecker extends BaseTypeChecker {}
94
1,806
<gh_stars>1000+ # Copyright 2021 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from __future__ import annotations from typing import cast from pants.backend.docker.goals.package_image import BuiltDockerImage, DockerFieldSet from pants.backend.docker.subsystems.docker_options import DockerOptions from pants.backend.docker.util_rules.docker_binary import DockerBinary from pants.core.goals.package import BuiltPackage, PackageFieldSet from pants.core.goals.run import RunRequest from pants.engine.environment import Environment, EnvironmentRequest from pants.engine.rules import Get, MultiGet, collect_rules, rule @rule async def docker_image_run_request( field_set: DockerFieldSet, docker: DockerBinary, options: DockerOptions ) -> RunRequest: env, image = await MultiGet( Get(Environment, EnvironmentRequest(options.env_vars)), Get(BuiltPackage, PackageFieldSet, field_set), ) tag = cast(BuiltDockerImage, image.artifacts[0]).tags[0] run = docker.run_image(tag, docker_run_args=options.run_args, env=env) return RunRequest(args=run.argv, digest=image.digest, extra_env=run.env) def rules(): return collect_rules()
380
1,645
/* * Seldon -- open source prediction engine * ======================================= * Copyright 2011-2015 Seldon Technologies Ltd and Rummble Ltd (http://www.seldon.io/) * ********************************************************************************************** * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ********************************************************************************************** */ package io.seldon.recommendation.baseline; import io.seldon.clustering.recommender.ItemRecommendationResultSet; import java.util.ArrayList; import java.util.HashSet; import java.util.List; import java.util.Set; import junit.framework.Assert; import org.junit.Test; public class RecentInteractionsRecommenderTest { @Test public void testSimple() { final String client = "test"; final int dimension = 1; Set<Integer> dimensions = new HashSet<Integer>(); dimensions.add(dimension); List<Long> recentItemInteractions = new ArrayList<Long>(); Long item1 = 1L; Long item2 = 2L; recentItemInteractions.add(item1); recentItemInteractions.add(item2); RecentInteractionsRecommender r = new RecentInteractionsRecommender(); ItemRecommendationResultSet res = r.recommend(client, null, dimensions, 2, null, recentItemInteractions); Assert.assertEquals(2,res.getResults().size()); Assert.assertEquals(item1,res.getResults().get(0).item); Assert.assertEquals(1.0f,res.getResults().get(0).score); Assert.assertEquals(item2,res.getResults().get(1).item); Assert.assertEquals(0.5f,res.getResults().get(1).score); } @Test public void testMaxRecs() { final String client = "test"; final int dimension = 1; Set<Integer> dimensions = new HashSet<Integer>(); dimensions.add(dimension); List<Long> recentItemInteractions = new ArrayList<Long>(); Long item1 = 1L; Long item2 = 2L; Long item3 = 3L; recentItemInteractions.add(item1); recentItemInteractions.add(item2); recentItemInteractions.add(item3); RecentInteractionsRecommender r = new RecentInteractionsRecommender(); ItemRecommendationResultSet res = r.recommend(client, null, dimensions, 2, null, recentItemInteractions); Assert.assertEquals(2,res.getResults().size()); Assert.assertEquals(item1,res.getResults().get(0).item); Assert.assertEquals(1.0f,res.getResults().get(0).score); Assert.assertEquals(item2,res.getResults().get(1).item); Assert.assertEquals(0.666f,res.getResults().get(1).score,0.01); } }
941
852
<gh_stars>100-1000 import FWCore.ParameterSet.Config as cms process = cms.Process("TEST") process.load("FWCore.MessageLogger.MessageLogger_cfi") process.CSCFakeGainsConditions = cms.ESSource("CSCFakeGainsConditions") process.prefer("CSCFakeGainsConditions") process.CSCFakePedestalsConditions = cms.ESSource("CSCFakePedestalsConditions") process.prefer("CSCFakePedestalsConditions") process.CSCFakeNoiseMatrixConditions = cms.ESSource("CSCFakeNoiseMatrixConditions") process.prefer("CSCFakeNoiseMatrixConditions") process.CSCFakeCrosstalkConditions = cms.ESSource("CSCFakeCrosstalkConditions") process.prefer("CSCFakeCrosstalkConditions") process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(1) ) process.source = cms.Source("EmptySource") process.prod1 = cms.EDAnalyzer("CSCGainsReadAnalyzer") process.prod2 = cms.EDAnalyzer("CSCPedestalReadAnalyzer") process.prod3 = cms.EDAnalyzer("CSCCrossTalkReadAnalyzer") process.prod4 = cms.EDAnalyzer("CSCNoiseMatrixReadAnalyzer") process.output = cms.OutputModule("AsciiOutputModule") process.p = cms.Path(process.prod1*process.prod2*process.prod3*process.prod4) process.ep = cms.EndPath(process.output)
436
14,668
<gh_stars>1000+ // Copyright 2020 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "chrome/browser/ash/arc/enterprise/arc_snapshot_reboot_notification_impl.h" #include "base/callback.h" #include "base/run_loop.h" #include "base/test/bind.h" #include "chrome/browser/notifications/notification_display_service_tester.h" #include "chrome/browser/notifications/system_notification_helper.h" #include "chrome/test/base/testing_browser_process.h" #include "content/public/test/browser_task_environment.h" #include "testing/gtest/include/gtest/gtest.h" namespace arc { namespace data_snapshotd { class ArcSnapshotRebootNotificationTest : public testing::Test { public: ArcSnapshotRebootNotificationTest() = default; ArcSnapshotRebootNotificationTest(const ArcSnapshotRebootNotificationTest&) = delete; ArcSnapshotRebootNotificationTest& operator=( const ArcSnapshotRebootNotificationTest&) = delete; void SetUp() override { TestingBrowserProcess::GetGlobal()->SetSystemNotificationHelper( std::make_unique<SystemNotificationHelper>()); tester_ = std::make_unique<NotificationDisplayServiceTester>(nullptr); tester_->SetNotificationAddedClosure(base::BindRepeating( &ArcSnapshotRebootNotificationTest::OnNotificationAdded, base::Unretained(this))); tester_->SetNotificationClosedClosure(base::BindRepeating( &ArcSnapshotRebootNotificationTest::OnNotificationClosed, base::Unretained(this))); } void TearDown() override { EXPECT_FALSE(IsNotificationShown()); tester_.reset(); } void ClickOnNotification() { tester_->SimulateClick( NotificationHandler::Type::TRANSIENT, ArcSnapshotRebootNotificationImpl::get_notification_id_for_testing(), absl::nullopt, absl::nullopt); } void ClickOnRestartButton() { tester_->SimulateClick( NotificationHandler::Type::TRANSIENT, ArcSnapshotRebootNotificationImpl::get_notification_id_for_testing(), ArcSnapshotRebootNotificationImpl::get_restart_button_id_for_testing(), absl::nullopt); } void OnNotificationAdded() { is_notification_shown_ = true; } void OnNotificationClosed() { is_notification_shown_ = false; } bool IsNotificationShown() { return is_notification_shown_; } private: content::BrowserTaskEnvironment task_environment_; bool is_notification_shown_ = false; std::unique_ptr<NotificationDisplayServiceTester> tester_; }; TEST_F(ArcSnapshotRebootNotificationTest, Basic) { ArcSnapshotRebootNotificationImpl notification; EXPECT_FALSE(IsNotificationShown()); notification.Show(); EXPECT_TRUE(IsNotificationShown()); notification.Hide(); EXPECT_FALSE(IsNotificationShown()); } TEST_F(ArcSnapshotRebootNotificationTest, ClickOnRestartButton) { ArcSnapshotRebootNotificationImpl notification; notification.Show(); EXPECT_TRUE(IsNotificationShown()); base::RunLoop run_loop; notification.SetUserConsentCallback(run_loop.QuitClosure()); ClickOnRestartButton(); run_loop.Run(); EXPECT_FALSE(IsNotificationShown()); } TEST_F(ArcSnapshotRebootNotificationTest, ClickOnNotification) { ArcSnapshotRebootNotificationImpl notification; notification.Show(); EXPECT_TRUE(IsNotificationShown()); notification.SetUserConsentCallback(base::BindLambdaForTesting( []() { NOTREACHED() << "Unexpected user consent registered"; })); ClickOnNotification(); EXPECT_TRUE(IsNotificationShown()); } TEST_F(ArcSnapshotRebootNotificationTest, DoubleShow) { ArcSnapshotRebootNotificationImpl notification; EXPECT_FALSE(IsNotificationShown()); notification.Show(); notification.Show(); EXPECT_TRUE(IsNotificationShown()); } TEST_F(ArcSnapshotRebootNotificationTest, DoubleHide) { ArcSnapshotRebootNotificationImpl notification; EXPECT_FALSE(IsNotificationShown()); notification.Show(); EXPECT_TRUE(IsNotificationShown()); notification.Hide(); EXPECT_FALSE(IsNotificationShown()); notification.Hide(); } } // namespace data_snapshotd } // namespace arc
1,381
897
#include <stdio.h> int painters(int * arr, int maxtime, int len, int num) { int time = 0, count = 1; for (int i = 0; i < len; i++) { time += arr[i]; if (time > maxtime) { time = arr[i]; count++; if (count > num) { return 0; } } } return 1; } int get_max(int arr[], int len) { int max = arr[0]; for (int i = 1; i < len; i++) { if (max < arr[i]) max = arr[i]; } return max; } int main() { int len, num, time; int sum = 0; printf("Enter the length of array, num of painters and time: "); scanf("%d%d%d", & len, & num, & time); int arr[len]; printf("Enter the array: "); for (int i = 0; i < len; i++) { scanf("%d", & arr[i]); sum += arr[i]; } // Finding the max of array int start = get_max(arr, len); int end = sum, ans; while (start <= end) { int mid = (start + end) / 2; if (painters(arr, mid, len, num)) { ans = mid; end = mid - 1; } else { start = mid + 1; } } ans = ans * time; printf("Total time taken: %d", ans); return 0; } /* Output: Enter the length of array,num of painters and time: 2 2 5 Enter the array: 1 10 Total time taken: 50 Time complexity : O(n*log(N)) (where N is sum of array elements) space complexity : O(n) (n is size of array) */
699
350
// // Created by pengfei.zhou on 2019-05-19. // #ifndef APNG4ANDROID_COMMON_H #define APNG4ANDROID_COMMON_H #include <jni.h> #include <android/log.h> #define ADB_LOG_TAG "GifDecoder" #ifdef DEBUG #define LOGD(...) __android_log_print(ANDROID_LOG_DEBUG, ADB_LOG_TAG, __VA_ARGS__) #define LOGE(...) __android_log_print(ANDROID_LOG_ERROR, ADB_LOG_TAG, __VA_ARGS__) #else #define LOGD(...) #define LOGE(...) #endif #endif //APNG4ANDROID_COMMON_H
212
1,124
# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tensorflow as tf from absl import flags from tqdm import trange from libml import utils from libml.train import ClassifySemi FLAGS = flags.FLAGS class ClassifyFullySupervised(ClassifySemi): """Fully supervised classification. """ def train_step(self, train_session, data_labeled): x = self.session.run(data_labeled) self.tmp.step = train_session.run([self.ops.train_op, self.ops.update_step], feed_dict={self.ops.x: x['image'], self.ops.label: x['label']})[1] def train(self, train_nimg, report_nimg): if FLAGS.eval_ckpt: self.eval_checkpoint(FLAGS.eval_ckpt) return batch = FLAGS.batch train_labeled = self.dataset.train_labeled.batch(batch).prefetch(16) train_labeled = train_labeled.make_one_shot_iterator().get_next() scaffold = tf.train.Scaffold(saver=tf.train.Saver(max_to_keep=FLAGS.keep_ckpt, pad_step_number=10)) with tf.Session(config=utils.get_config()) as sess: self.session = sess self.cache_eval() with tf.train.MonitoredTrainingSession( scaffold=scaffold, checkpoint_dir=self.checkpoint_dir, config=utils.get_config(), save_checkpoint_steps=FLAGS.save_kimg << 10, save_summaries_steps=report_nimg - batch) as train_session: self.session = train_session._tf_sess() self.tmp.step = self.session.run(self.step) while self.tmp.step < train_nimg: loop = trange(self.tmp.step % report_nimg, report_nimg, batch, leave=False, unit='img', unit_scale=batch, desc='Epoch %d/%d' % (1 + (self.tmp.step // report_nimg), train_nimg // report_nimg)) for _ in loop: self.train_step(train_session, train_labeled) while self.tmp.print_queue: loop.write(self.tmp.print_queue.pop(0)) while self.tmp.print_queue: print(self.tmp.print_queue.pop(0)) def tune(self, train_nimg): batch = FLAGS.batch train_labeled = self.dataset.train_labeled.batch(batch).prefetch(16) train_labeled = train_labeled.make_one_shot_iterator().get_next() for _ in trange(0, train_nimg, batch, leave=False, unit='img', unit_scale=batch, desc='Tuning'): x = self.session.run([train_labeled]) self.session.run([self.ops.tune_op], feed_dict={self.ops.x: x['image'], self.ops.label: x['label']})
1,592
309
# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ A rough-and-ready program to interactively classify images into up to 5 categories. Keyboard controls: * 0-4: classify the current image as '0' through '4' * 9: classify the next 10 images as '0' (useful if 0 is the 'normal' class with many frames) * Escape: go back one image * Space: toggle viewer brightness to make human classification easier (does not modify file) * S: save changes by moving images to their new directories and quit * Close the window without pressing S to exit without moving files. """ from sys import argv, exit from os import system from time import time from glob import glob import pygame def main(): if len(argv) != 2 or argv[1] == '--help': print('Usage: %s DIRECTORY\nClassify images in DIRECTORY and move into subdirs' % argv[0]) exit(1) directory = argv[1] images = glob('%s/*.png' % directory) images.sort() pygame.display.init() screen = pygame.display.set_mode((512, 512)) started = time() i = 0 kind = {} lighten = False while True: i = min(i, len(images)-1) image = images[i] duration = time() - started mins_remaining = (len(images)-(i+1)) * (duration / (i+1)) / 60.0 caption = "%4d / %4d - %.0f%%, %.0f mins remaining" % (i+1, len(images), 100*(i+1)/len(images), mins_remaining) pygame.display.set_caption(caption) try: surface = pygame.image.load(image) if lighten: light = pygame.Surface((surface.get_width(), surface.get_height()), flags=pygame.SRCALPHA) light.fill((128, 128, 128, 0)) surface.blit(light, (0, 0), special_flags=pygame.BLEND_RGBA_ADD) screen.blit(pygame.transform.scale(surface, (512, 512)), (0, 0)) pygame.display.flip() except: pygame.display.set_caption("%4d / %4d - error reading %s" % (i+1, len(images), image)) while True: evt = pygame.event.wait() if evt.type == pygame.QUIT: print('Exiting without moving images (use S to save and exit)') pygame.quit() exit(0) elif evt.type == pygame.KEYDOWN: if evt.key == pygame.K_9: # Not class 9, but rather 10 frames of class 1 for _ in range(10): kind[image] = 0 i += 1 i = min(i, len(images)-1) image = images[i] break if evt.key == pygame.K_0: kind[image] = 0 i += 1 break if evt.key == pygame.K_1: kind[image] = 1 i += 1 break if evt.key == pygame.K_2: kind[image] = 2 i += 1 break if evt.key == pygame.K_3: kind[image] = 3 i += 1 break if evt.key == pygame.K_4: kind[image] = 4 i += 1 break if evt.key == pygame.K_ESCAPE: i -= 1 break if evt.key == pygame.K_SPACE: lighten = not lighten break if evt.key == pygame.K_s: print('Moving %d images to subdirectories' % (len(kind))) for k in kind.values(): system('mkdir -p %s/%s' % (directory, k)) for image, k in kind.items(): system('mv %s %s/%s/' % (image, directory, k)) print('Classified %d images in %.0f minutes' % (len(kind), duration/60)) pygame.quit() exit(0) if __name__ == '__main__': main()
2,418
512
<reponame>bclindner/Kore /* * Copyright 2015 Synced Synapse. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.xbmc.kore.jsonrpc.method; import com.fasterxml.jackson.databind.node.ObjectNode; import org.xbmc.kore.jsonrpc.ApiException; import org.xbmc.kore.jsonrpc.ApiMethod; import org.xbmc.kore.jsonrpc.type.ApplicationType; import org.xbmc.kore.utils.JsonUtils; /** * All JSON RPC methods in Application.* */ public class Application { /** * Quit application */ public static final class Quit extends ApiMethod<String> { public final static String METHOD_NAME = "Application.Quit"; /** * Quit application */ public Quit() { super(); } @Override public String getMethodName() { return METHOD_NAME; } @Override public String resultFromJson(ObjectNode jsonObject) throws ApiException { return jsonObject.get(RESULT_NODE).textValue(); } } /** * Set the current volume */ public static final class SetVolume extends ApiMethod<Integer> { public final static String METHOD_NAME = "Application.SetVolume"; /** * Increment or decrement the volume * @param volume String enum in {@link org.xbmc.kore.jsonrpc.type.GlobalType.IncrementDecrement} */ public SetVolume(String volume) { super(); addParameterToRequest("volume", volume); } /** * Set the volume * @param volume volume between 0 and 100 */ public SetVolume(int volume) { super(); addParameterToRequest("volume", volume); } @Override public String getMethodName() { return METHOD_NAME; } @Override public Integer resultFromJson(ObjectNode jsonObject) throws ApiException { return JsonUtils.intFromJsonNode(jsonObject, RESULT_NODE); } } /** * Toggle mute/unmute */ public static final class SetMute extends ApiMethod<Boolean> { public final static String METHOD_NAME = "Application.SetMute"; /** * Toggle mute/unmute */ public SetMute() { super(); addParameterToRequest("mute", "toggle"); } @Override public String getMethodName() { return METHOD_NAME; } @Override public Boolean resultFromJson(ObjectNode jsonObject) throws ApiException { return JsonUtils.booleanFromJsonNode(jsonObject, RESULT_NODE); } } /** * Retrieves the values of the given properties. */ public static class GetProperties extends ApiMethod<ApplicationType.PropertyValue> { public final static String METHOD_NAME = "Application.GetProperties"; /** * Properties */ public final static String VOLUME = "volume"; public final static String MUTED = "muted"; public final static String NAME = "name"; public final static String VERSION = "version"; /** * Retrieves the values of the given properties. * @param properties See this class constants. */ public GetProperties(String... properties) { super(); addParameterToRequest("properties", properties); } @Override public String getMethodName() { return METHOD_NAME; } @Override public ApplicationType.PropertyValue resultFromJson(ObjectNode jsonObject) throws ApiException { return new ApplicationType.PropertyValue(jsonObject.get(RESULT_NODE)); } } }
1,673
468
#define GLI_INCLUDE_GL_EXT_WINDOW_RECTANGLES enum Main { GL_INCLUSIVE_EXT = 0x8F10, GL_EXCLUSIVE_EXT = 0x8F11, GL_WINDOW_RECTANGLE_EXT = 0x8F12, GL_WINDOW_RECTANGLE_MODE_EXT = 0x8F13, GL_MAX_WINDOW_RECTANGLES_EXT = 0x8F14, GL_NUM_WINDOW_RECTANGLES_EXT = 0x8F15, }; void glWindowRectanglesEXT(GLenum[Main] mode, GLsizei count, const GLint *box);
238
312
<gh_stars>100-1000 from typing import Iterable from ics.contentline.container import ParseError, QuotedParamValue, ContentLine, Container from ics.contentline.parser import ParserClass from ics.types import ContainerItem from ics.utils import one Parser = ParserClass() string_to_containers = Parser.string_to_containers lines_to_containers = Parser.lines_to_containers def string_to_container(txt: str) -> ContainerItem: return one(string_to_containers(txt)) def lines_to_container(lines: Iterable[str]) -> ContainerItem: return one(lines_to_containers(lines)) __all__ = ["ParseError", "QuotedParamValue", "ContentLine", "Container", "Parser", "string_to_containers", "lines_to_containers", "string_to_container", "lines_to_container"]
248
501
<filename>pyscf/lib/dft/grid_basis.h /* Copyright 2014-2018 The PySCF Developers. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. * * Author: <NAME> <<EMAIL>> */ void VXCeval_ao_drv(int deriv, int nao, int ngrids, int bastart, int bascount, int blksize, double *ao, double *coord, char *non0table, int *atm, int natm, int *bas, int nbas, double *env); void VXCnr_ao_screen(char *non0table, double *coord, int ngrids, int blksize, int *atm, int natm, int *bas, int nbas, double *env);
417
348
<reponame>chamberone/Leaflet.PixiOverlay {"nom":"Molsheim","circ":"6ème circonscription","dpt":"Bas-Rhin","inscrits":6059,"abs":2660,"votants":3399,"blancs":79,"nuls":55,"exp":3265,"res":[{"nuance":"LR","nom":"<NAME>","voix":2429},{"nuance":"MDM","nom":"<NAME>","voix":836}]}
117
304
<reponame>ganlubbq/hobbits #include "usbdevice.h" #include "usbdeviceimporteditor.h" #include <libusb-1.0/libusb.h> #include <iostream> #include <chrono> #include <thread> /** * @brief Construct a new UsbDevice::UsbDevice object and setup the parameters and parameter helper. * */ UsbDevice::UsbDevice() { QList<ParameterDelegate::ParameterInfo> importInfos = { {"DeviceNum", ParameterDelegate::ParameterType::Integer}, {"InterfaceNum", ParameterDelegate::ParameterType::Integer}, {"AltSetNum", ParameterDelegate::ParameterType::Integer}, {"EndpointNum", ParameterDelegate::ParameterType::Integer}, {"TransferNum", ParameterDelegate::ParameterType::Integer}, {"TransferDelay", ParameterDelegate::ParameterType::Integer}, {"TransferTimeout", ParameterDelegate::ParameterType::Integer}, {"TransferType", ParameterDelegate::ParameterType::Integer}, {"TransferSize", ParameterDelegate::ParameterType::Integer}, {"DeviceName", ParameterDelegate::ParameterType::String}, {"TimeoutIn", ParameterDelegate::ParameterType::Boolean}}; m_importDelegate = ParameterDelegate::create( importInfos, [this](const Parameters &parameters) { QString dev = parameters.value("DeviceName").toString(); int trNum = parameters.value("TransferNum").toInt(); QString output = ", Import " + QString::number(trNum) + " Transfers From USB Device " + dev; return QString("%1" + output).arg(this->name()); }, [](QSharedPointer<ParameterDelegate> delegate, QSize size) { Q_UNUSED(size) return new UsbDeviceImportEditor(delegate); }); QList<ParameterDelegate::ParameterInfo> exportInfos = {}; m_exportDelegate = nullptr; } /** * @brief Creates a new UsbDevice Importer Exporter Interface * * @return ImporterExporterInterface* - the actual Importer that imports the data from USB */ ImporterExporterInterface *UsbDevice::createDefaultImporterExporter() { return new UsbDevice(); } /** * @brief Returns the name of the Plugin * * @return QString - the name of the plugin */ QString UsbDevice::name() { return "USB Device"; } /** * @brief Returns a description of the plugin * * @return QString - a description of the plugin */ QString UsbDevice::description() { return "Imports the data sent Device->Host from a Selected USB Device Endpoint"; } /** * @brief Returns the relevant tags for the plugin * * @return QStringList - the relevant tags of the plugin */ QStringList UsbDevice::tags() { return {"Generic"}; } /** * @brief Tells hobbits if the plugin can export data * * @return false - plugin cannot export data so always returns false. */ bool UsbDevice::canExport() { return false; } /** * @brief Tells hobbits if plugin can import data depending on the OS being built for. * * @return true - can only import data if not on Windows so returns true if not on windows * @return false - can only import data if not on Windows so returns false if on Windows. */ bool UsbDevice::canImport() { return true; } /** * @brief Returns the parameter delegate from the importer parameter delegate * * @return QSharedPointer<ParameterDelegate> - the importer parameter delegate and parameters */ QSharedPointer<ParameterDelegate> UsbDevice::importParameterDelegate() { return m_importDelegate; } /** * @brief Returns the parameter delegate and parameters from the exporter parameter delegate, even though * the exporter files don't exist DO NOT REMOVE as it could break plugin IDK why. * * @return QSharedPointer<ParameterDelegate> - the non-existent parameter delegate and parameters from the non-existent * exporter interface */ QSharedPointer<ParameterDelegate> UsbDevice::exportParameterDelegate() { return m_exportDelegate; } /** * @brief initializes libusb session to params.ctx, gets the device, gets the config descriptor, gets the relevant information, * gets the endpoint addr, and sets the device handle returns an int depending on success of the initialization. * * @param params the UsbParams struct that contains all the necessary data for libusb to read from the device properly * * @return int - returns 0 on success and returns a negative number on a failure or error, -1 on libusb init failure, * -2 on libusb device handle no mem failure, -3 on libusb handle no access failure, -4 on device handle no device failure. */ //create parameters structure to handle variables void UsbDevice::setupLibusb(UsbParams &params) { int r = libusb_init(&params.ctx); //try initializing libusb if (r < 0) { //checking for errors, if r < 0 then there is an error params.errorCode = r; } libusb_get_device_list(params.ctx, &params.devs); params.dev = params.devs[params.deviceNum]; libusb_free_device_list(params.devs, params.deviceNum); //getting the endpoint address libusb_get_active_config_descriptor(params.dev, &params.config); const libusb_interface *inter = &params.config->interface[params.interfaceNum]; const libusb_interface_descriptor *interdesc = &inter->altsetting[params.altSetNum]; const libusb_endpoint_descriptor *epdesc = &interdesc->endpoint[params.endpointNum]; params.endpoint = epdesc->bEndpointAddress; //try to open a libusb device handle r = libusb_open(params.dev, &params.handle); params.errorCode = r; } /** * @brief closes all the libusb variables and exits the libusb session after reading all needed data or encountering an error. * * @param closeDevice is there a device handle initialized to be able to close, not always true as if there * is an error initializing a device handle. * @param params a UsbParams struct that handles all the parameters that need to be passed for libusb to close properly */ void UsbDevice::exitLibusb(bool closeDevice, UsbParams &params) { if (closeDevice) { //check if there is a device handle to close libusb_close(params.handle); } libusb_free_config_descriptor(params.config); libusb_exit(params.ctx); } /** * @brief The function to actually read the binary data from the USB device, imports parameters, determines the transfer needed, * checks for device handle errors, detaches the kernel driver, reads the data, resets the device, updates the progressbar, * checks if action is cancelled and then waits for the duration of transfer delay, and repeats for the amount in NumTransfers * * @param parameters the parameters from the importer UI needed to set libusb to read the data form the proper device and endpoint * @param progress the pointer resposible for updating the progressbar and checking for cancelled tasks * @return QSharedPointer<ImportResult> - the pointer towards the bitcontainer so that hobbits can read the data being imported */ QSharedPointer<ImportResult> UsbDevice::importBits(const Parameters &parameters, QSharedPointer<PluginActionProgress> progress) { //validating params QStringList invalidations = m_importDelegate->validate(parameters); if (!invalidations.isEmpty()) { return ImportResult::error(QString("Invalid parameters passed to %1:\n%2").arg(name()).arg(invalidations.join("\n"))); } //setting up our struct UsbParams params; //getting parameters params.deviceNum = parameters.value("DeviceNum").toInt(); params.interfaceNum = parameters.value("InterfaceNum").toInt(); params.altSetNum = parameters.value("AltSetNum").toInt(); params.endpointNum = parameters.value("EndpointNum").toInt(); int transferNum = parameters.value("TransferNum").toInt(); int transferDelay = parameters.value("TransferDelay").toInt(); int transferTimeout = parameters.value("TransferTimeout").toInt(); int transferType = parameters.value("TransferType").toInt(); int transferSize = parameters.value("TransferSize").toInt(); QString transferTypeStr = QString::number(transferType); QString deviceName = parameters.value("DeviceName").toString(); bool includeTimeout = parameters.value("TimeoutIn").toBool(); QSharedPointer<RangeSequence> frames = RangeSequence::createEmpty(); QByteArray largeBuffer; int actualLength; unsigned char smallBuffer[1024]; bool attach; //determine transfer type, as some transfers we can't handle yet. if (transferType == 0) { return ImportResult::error("Control Transfer Endpoints Not Supported"); } else if(transferType == 1) { return ImportResult::error("Isochronous Transfer Endpoints Not Supported"); } else if(transferType == 2) { transferTypeStr += ", Bulk Transfer"; //for setting metadata setupLibusb(params); //try to init libusb if (params.errorCode < 0) { return returnError(params.errorCode); } for (int i = 0; i < transferNum; i++) { //for every transfer if (libusb_kernel_driver_active(params.handle, params.interfaceNum) == 1) { //check if the kernel has a driver active attach = true; //need to reattach driver later libusb_detach_kernel_driver(params.handle, params.interfaceNum); //dettach driver now } //transfer time int transferReturn = libusb_bulk_transfer(params.handle, params.endpoint, smallBuffer, (int)sizeof(smallBuffer), &actualLength, transferTimeout); if (transferReturn == 0 && actualLength != 0) { //check that the transfer is valid frames->appendRange(transferSize * 8); //create a new frame on every transfer for (int j = 0; j < transferSize; j++) { largeBuffer.append(smallBuffer[j]); //add the data from the data buffer to the bigger resizable buffer } } else { if (transferReturn != LIBUSB_ERROR_TIMEOUT) { // check if an error was raised that isnt a timeout if (attach) { libusb_reset_device(params.handle); //reset the device reattaching the kernel driver attach = false; } //throw an error if an error occured return returnError(transferReturn); } else { if (includeTimeout) { //check if the user wants to include timeout largeBuffer.append("TIMEOUT"); frames->appendRange(56); } } } if (attach) { libusb_reset_device(params.handle); //reset the device to reattach the kernel driver attach = false; } if (progress->isCancelled()) { // check if the user has cancelled the import operation break; } progress->setProgress(i, transferNum); //update the progressbar std::this_thread::sleep_for(std::chrono::milliseconds(transferDelay)); //wait for the durration of the transfer delay } exitLibusb(true, params); //exit libusb with closing the device } else if (transferType == 3) { /** * This is the same thing as the bulk transfer implementation, but instead with an interrup transfer, only 2 lines of * code are different, these lines replace bulk transfer with interrupt transfer. * Refer to the bulk transfer implementation for a thorough explanation. * */ transferTypeStr += ", Interrupt Transfer"; setupLibusb(params); if (params.errorCode < 0) { return returnError(params.errorCode); } for (int i = 0; i < transferNum; i++) { if (libusb_kernel_driver_active(params.handle, params.interfaceNum) == 1) { attach = true; libusb_detach_kernel_driver(params.handle, params.interfaceNum); } int transferReturn = libusb_interrupt_transfer(params.handle, params.endpoint, smallBuffer, (int)sizeof(smallBuffer), &actualLength, transferTimeout); if (transferReturn == 0 && actualLength != 0) { frames->appendRange(transferSize * 8); for (int j = 0; j < transferSize; j++) { largeBuffer.append(smallBuffer[j]); } } else { if (transferReturn != LIBUSB_ERROR_TIMEOUT) { if (attach) { libusb_reset_device(params.handle); attach = false; } return returnError(transferReturn); } else { if (includeTimeout) { largeBuffer.append("TIMEOUT"); frames->appendRange(56); } } } if (attach) { libusb_reset_device(params.handle); attach = false; } if (progress->isCancelled()) { break; } progress->setProgress(i, transferNum); std::this_thread::sleep_for(std::chrono::milliseconds(transferDelay)); } exitLibusb(true, params); } else { return ImportResult::error("Invalid Transfer Type Error, please reselect settings and try again."); } QSharedPointer<BitContainer> container = BitContainer::create(largeBuffer); //creating a bit container with all the data in it container->setName("USB " + deviceName); //esetting the name you see on the side of hobbits QSharedPointer<BitInfo> info = BitInfo::create(container->bits()->sizeInBits()); //creating a bit infor for setting frames info->setFrames(frames); // setting frames on every transfer //setting the metadata info->setMetadata("Device Name", deviceName); info->setMetadata("Device Number", params.deviceNum); info->setMetadata("Interface Number", params.interfaceNum); info->setMetadata("Alternate Setting Number", params.altSetNum); info->setMetadata("Endpoint Number", params.endpointNum); info->setMetadata("Endpoint Address", (int)params.endpoint); info->setMetadata("Number of Transfers", transferNum); info->setMetadata("Transfer Delay Duration", transferDelay); info->setMetadata("Transfer Timeout Duration", transferTimeout); info->setMetadata("Transfer Type", transferTypeStr); info->setMetadata("Transfer Size", transferSize); //adding the info to the BitContainer container->setInfo(info); //returning the bit container return ImportResult::result(container, parameters); } QSharedPointer<ImportResult> UsbDevice::returnError(int errorCode) { if (errorCode == LIBUSB_ERROR_IO) { return ImportResult::error("Device IO Error, error reading from the device, please try again. If the problem continues, raise an issue on GitHub."); } else if (errorCode == LIBUSB_ERROR_INVALID_PARAM) { return ImportResult::error("Invalid Parameter Error, An invalid device parameter was passed, please check your entry and try again."); } else if (errorCode == LIBUSB_ERROR_ACCESS) { return ImportResult::error("Insufficient Permissions Error, Try restarting hobbits in root, or with valid chmod permissions like a+x."); } else if (errorCode == LIBUSB_ERROR_NO_DEVICE) { return ImportResult::error("No Device Found Error, Device selected could not be found, try replugging your device and/or restarting hobbits."); } else if (errorCode == LIBUSB_ERROR_NOT_FOUND) { return ImportResult::error("Entity Not Found Error, A Device, Interface, Alternate Setting, or Endpoint was not found, check your selection and try again. If the problem continues, raise an issue on GitHub."); } else if (errorCode == LIBUSB_ERROR_BUSY) { return ImportResult::error("Device Busy Error, the device you selected is busy with another task, please try again in a bit. If the problem continues, raise an issue on GitHub."); } else if (errorCode == LIBUSB_ERROR_OVERFLOW) { return ImportResult::error("Buffer Overflow Error, The buffer used to temporarily store the usb data has overflown, please try again. If the problem continues, raise an issue on GitHub."); } else if (errorCode == LIBUSB_ERROR_PIPE) { return ImportResult::error("Pipe Error, The endpoint you selected stopped sending data, please replug your device and try again. If the problem continues, raise an issue on GitHub."); } else if (errorCode == LIBUSB_ERROR_NO_MEM) { return ImportResult::error("Out Of Memory Error, Libusb cannot find enough memory to open a device, please close some other applications, and try again. If the problem continues, raise an issue on GitHub."); } else if (errorCode == LIBUSB_ERROR_NOT_SUPPORTED) { return ImportResult::error("Not Supported Error, The device you selected is not supported, please choose a different device and try again. If the problem continues, raise an issue on GitHub."); } else if (errorCode == LIBUSB_ERROR_OTHER) { return ImportResult::error("Libusb Ran Into an Error in its Processing, please try again. If the problem continues, raise an issue on GitHub."); } return nullptr; } /** * @brief The function that would export the bits if the plugin allowed for exporting bits, DO NOT REMOVE * as it is needed for the importexport plugin definition to work * * * @param container The container to export * @param parameters the parameters require for the export * @param progress the progressbar and progress checker for the output * @return QSharedPointer<ExportResult> - a pointer towards the export result, right now throws an error if its ever called */ QSharedPointer<ExportResult> UsbDevice::exportBits(QSharedPointer<const BitContainer> container, const Parameters &parameters, QSharedPointer<PluginActionProgress> progress) { return ExportResult::error("Export not implemented"); }
7,204
3,100
#include "csf.h" #include "csfimpl.h" /* change the x value of upper left co-ordinate * RputXUL changes the x value of upper left co-ordinate. * returns the new x value of upper left co-ordinate or 0 * case of an error. * * Merrno * NOACCESS */ REAL8 RputXUL( MAP *map, /* map handle */ REAL8 xUL) /* new x value of top left co-ordinate */ { CHECKHANDLE_GOTO(map, error); if(! WRITE_ENABLE(map)) { M_ERROR(NOACCESS); goto error; } map->raster.xUL = xUL; return(xUL); error: return(0); }
205
476
#pragma once #include "types.h" #ifdef __cplusplus extern "C" { #endif /** * Propose new participants to the security group. * * @param data - the buffer containing the packed participants. * @param datalen - size of the packed participants * @pre `data` is a valid pointer to a range of memory at least `datalen` bytes long that contains packed participants data * * @return -1 if proposing a new security group was unsuccessful, otherwise returns 0. */ __attribute__((eosio_wasm_import)) int64_t add_security_group_participants(const char* data, uint32_t datalen); /** * Propose to remove participants from the security group. * * @param data - the buffer containing the packed participants. * @param datalen - size of the packed participants * @pre `data` is a valid pointer to a range of memory at least `datalen` bytes long that contains packed participants data * * @return -1 if proposing a new security group was unsuccessful, otherwise returns 0. */ __attribute__((eosio_wasm_import)) int64_t remove_security_group_participants(const char* data, uint32_t datalen); /** * Check if the specified accounts are all in the active security group. * * @param data - the buffer containing the packed participants. * @param datalen - size of the packed participants * * @return Returns true if the specified accounts are all in the active security group. */ __attribute__((eosio_wasm_import)) bool in_active_security_group(const char* data, uint32_t datalen); /** * Gets the active security group * * @param[out] data - the output buffer containing the packed security group. * @param datalen - size of the `data` buffer * * @return Returns the size required in the buffer (if the buffer is too small, nothing is written). * */ __attribute__((eosio_wasm_import)) uint32_t get_active_security_group(char* data, uint32_t datalen); #ifdef __cplusplus } #endif
545
370
<filename>pytorch/libs/nnet/transformer/layer_norm.py # -*- coding:utf-8 -*- # Reference: https://github.com/espnet/espnet. import torch class LayerNorm(torch.nn.LayerNorm): """Layer normalization module :param int nout: output dim size :param int dim: dimension to be normalized """ def __init__(self, nout, dim=-1): super(LayerNorm, self).__init__(nout, eps=1e-12) self.dim = dim def forward(self, x): """Apply layer normalization :param torch.Tensor x: input tensor :return: layer normalized tensor :rtype torch.Tensor """ if self.dim == -1: return super(LayerNorm, self).forward(x) return super(LayerNorm, self).forward(x.transpose(1, -1)).transpose(1, -1)
329
763
<gh_stars>100-1000 package org.batfish.bddreachability; import static org.batfish.bddreachability.BDDOutgoingOriginalFlowFilterManager.forNetwork; import static org.batfish.bddreachability.EdgeMatchers.edge; import static org.batfish.bddreachability.LastHopOutgoingInterfaceManager.NO_LAST_HOP; import static org.batfish.bddreachability.TransitionMatchers.mapsBackward; import static org.batfish.bddreachability.TransitionMatchers.mapsForward; import static org.batfish.bddreachability.transition.Transitions.IDENTITY; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.empty; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSet; import java.util.List; import java.util.Map; import java.util.function.Supplier; import net.sf.javabdd.BDD; import org.batfish.bddreachability.transition.Transition; import org.batfish.bddreachability.transition.Transitions; import org.batfish.common.bdd.BDDFiniteDomain; import org.batfish.common.bdd.BDDInteger; import org.batfish.common.bdd.BDDPacket; import org.batfish.common.bdd.BDDSourceManager; import org.batfish.datamodel.Configuration; import org.batfish.datamodel.ConfigurationFormat; import org.batfish.datamodel.FirewallSessionInterfaceInfo; import org.batfish.datamodel.FirewallSessionInterfaceInfo.Action; import org.batfish.datamodel.Interface; import org.batfish.datamodel.IpProtocol; import org.batfish.datamodel.NetworkFactory; import org.batfish.datamodel.Vrf; import org.batfish.datamodel.collections.NodeInterfacePair; import org.batfish.datamodel.flow.Accept; import org.batfish.datamodel.flow.ForwardOutInterface; import org.batfish.datamodel.flow.OriginatingSessionScope; import org.batfish.datamodel.flow.PostNatFibLookup; import org.batfish.symbolic.state.NodeAccept; import org.batfish.symbolic.state.NodeDropAclIn; import org.batfish.symbolic.state.NodeDropAclOut; import org.batfish.symbolic.state.NodeInterfaceDeliveredToSubnet; import org.batfish.symbolic.state.OriginateInterface; import org.batfish.symbolic.state.OriginateVrf; import org.batfish.symbolic.state.PostInVrfSession; import org.batfish.symbolic.state.PreInInterface; import org.hamcrest.Matcher; import org.junit.Before; import org.junit.Rule; import org.junit.Test; import org.junit.rules.ExpectedException; import org.junit.rules.TemporaryFolder; /** Tests for {@link SessionInstrumentation}. */ public final class SessionInstrumentationTest { @Rule public TemporaryFolder _folder = new TemporaryFolder(); private static final String LAST_HOP_VAR_NAME = "lastHop"; // hostnames private static final String FW = "fw"; private static final String SOURCE1 = "source1"; private static final String SOURCE2 = "source2"; // vrf names private static final String FW_VRF = FW + ":VRF"; // interface names private static final String FW_I1 = "FW:I1"; private static final String SOURCE1_IFACE = "SOURCE1:IFACE"; private static final String SOURCE2_IFACE = "SOURCE2:IFACE"; private static final String FAKE_IFACE = "FAKE_IFACE"; // ACLs private static final String PERMIT_TCP = "permit tcp"; // BDD stuff private final BDDPacket _pkt = new BDDPacket(); private final BDD _one = _pkt.getFactory().one(); private final BDD _zero = _pkt.getFactory().zero(); @Rule public ExpectedException _exception = ExpectedException.none(); private BDDSourceManager _fwSrcMgr; private BDDSourceManager _source1SrcMgr; private Map<String, BDDSourceManager> _srcMgrs; private Map<String, BDDOutgoingOriginalFlowFilterManager> _outgoingOriginalFlowFilterMgrs; private LastHopOutgoingInterfaceManager _lastHopMgr; private Map<String, Map<String, Supplier<BDD>>> _filterBdds; private Configuration _fw; private Configuration _source1; private Configuration _source2; private BDD _invalidSrc; private Interface _fwI1; private BDD _permitTcpBdd; @Before public void setup() { NetworkFactory nf = new NetworkFactory(); Configuration.Builder cb = nf.configurationBuilder().setConfigurationFormat(ConfigurationFormat.CISCO_IOS); // Setup FW { _fw = cb.setHostname(FW).build(); Vrf vrf = nf.vrfBuilder().setOwner(_fw).setName(FW_VRF).build(); Interface.Builder ib = nf.interfaceBuilder().setActive(true).setOwner(_fw).setVrf(vrf); _fwI1 = ib.setName(FW_I1).build(); } // Setup source 1 { _source1 = cb.setHostname(SOURCE1).build(); Vrf vrf = nf.vrfBuilder().setOwner(_source1).build(); Interface.Builder ib = nf.interfaceBuilder().setActive(true).setOwner(_source1).setVrf(vrf); ib.setName(SOURCE1_IFACE).build(); } // Setup source 2 { _source2 = cb.setHostname(SOURCE2).build(); Vrf vrf = nf.vrfBuilder().setOwner(_source2).build(); Interface.Builder ib = nf.interfaceBuilder().setActive(true).setOwner(_source2).setVrf(vrf); ib.setName(SOURCE2_IFACE).build(); } // Setup last hop manager _lastHopMgr = new LastHopOutgoingInterfaceManager( _pkt, BDDFiniteDomain.domainsWithSharedVariable( _pkt, LAST_HOP_VAR_NAME, ImmutableMap.of( NodeInterfacePair.of(FW, FW_I1), ImmutableSet.of( NO_LAST_HOP, NodeInterfacePair.of(SOURCE1, SOURCE1_IFACE), NodeInterfacePair.of(SOURCE2, SOURCE2_IFACE))))); // Setup source managers { BDDInteger srcVar = _pkt.allocateBDDInteger("Source", 3, false); // Setup source tracking for firewall _fwSrcMgr = BDDSourceManager.forInterfaces(srcVar, ImmutableSet.of(FW_I1, FAKE_IFACE)); _invalidSrc = _fwSrcMgr.isValidValue().not(); assert !_invalidSrc.isZero(); _source1SrcMgr = BDDSourceManager.forInterfaces(srcVar, ImmutableSet.of(SOURCE1_IFACE, FAKE_IFACE)); _srcMgrs = ImmutableMap.of(FW, _fwSrcMgr, SOURCE1, _source1SrcMgr); _outgoingOriginalFlowFilterMgrs = forNetwork(_pkt, ImmutableMap.of(FW, _fw, SOURCE1, _source1), _srcMgrs); } // Setup filter BDDs { _permitTcpBdd = _pkt.getIpProtocol().value(IpProtocol.TCP); _filterBdds = ImmutableMap.of(FW, ImmutableMap.of(PERMIT_TCP, () -> _permitTcpBdd)); } } private Map<String, Configuration> configs() { return ImmutableMap.of(FW, _fw, SOURCE1, _source1, SOURCE2, _source2); } private List<Edge> deliveredToSubnetEdges(BDDFirewallSessionTraceInfo sessionInfo) { return new SessionInstrumentation( _pkt, configs(), _srcMgrs, _lastHopMgr, _outgoingOriginalFlowFilterMgrs, _filterBdds) .nodeInterfaceDeliveredToSubnetEdges(sessionInfo) .collect(ImmutableList.toImmutableList()); } private List<Edge> nodeAcceptEdges(BDDFirewallSessionTraceInfo sessionInfo) { return new SessionInstrumentation( _pkt, configs(), _srcMgrs, _lastHopMgr, _outgoingOriginalFlowFilterMgrs, _filterBdds) .nodeAcceptEdges(sessionInfo) .collect(ImmutableList.toImmutableList()); } private List<Edge> nodeDropAclInEdges(BDDFirewallSessionTraceInfo sessionInfo) { return new SessionInstrumentation( _pkt, configs(), _srcMgrs, _lastHopMgr, _outgoingOriginalFlowFilterMgrs, _filterBdds) .nodeDropAclInEdges(sessionInfo) .collect(ImmutableList.toImmutableList()); } private List<Edge> nodeDropAclOutEdges(BDDFirewallSessionTraceInfo sessionInfo) { return new SessionInstrumentation( _pkt, configs(), _srcMgrs, _lastHopMgr, _outgoingOriginalFlowFilterMgrs, _filterBdds) .nodeDropAclOutEdges(sessionInfo) .collect(ImmutableList.toImmutableList()); } private List<Edge> fibLookupSessionEdges(BDDFirewallSessionTraceInfo sessionInfo) { return new SessionInstrumentation( _pkt, configs(), _srcMgrs, _lastHopMgr, _outgoingOriginalFlowFilterMgrs, _filterBdds) .fibLookupSessionEdges(sessionInfo) .collect(ImmutableList.toImmutableList()); } private List<Edge> preInInterfaceEdges(BDDFirewallSessionTraceInfo sessionInfo) { return new SessionInstrumentation( _pkt, configs(), _srcMgrs, _lastHopMgr, _outgoingOriginalFlowFilterMgrs, _filterBdds) .preInInterfaceEdges(sessionInfo) .collect(ImmutableList.toImmutableList()); } @Test public void testNodeAcceptEdges() { BDD sessionHeaders = _pkt.getDstIp().value(10L); BDD srcFwI1 = _fwSrcMgr.getSourceInterfaceBDD(FW_I1); BDD validSrc = _fwSrcMgr.isValidValue(); BDD lastHop1 = _lastHopMgr.getLastHopOutgoingInterfaceBdd(SOURCE1, SOURCE1_IFACE, FW, FW_I1); BDD noLastHop = _lastHopMgr.getNoLastHopOutgoingInterfaceBdd(FW, FW_I1); BDDFirewallSessionTraceInfo sessionInfo = new BDDFirewallSessionTraceInfo( FW, ImmutableSet.of(FW_I1), Accept.INSTANCE, sessionHeaders, IDENTITY); // No transformation, no ACLs assertThat( nodeAcceptEdges(sessionInfo), contains( edge( new PreInInterface(FW, FW_I1), new NodeAccept(FW), allOf( mapsForward(srcFwI1, sessionHeaders), mapsForward(srcFwI1.and(noLastHop), sessionHeaders), mapsForward(srcFwI1.and(lastHop1), sessionHeaders), mapsBackward(_one, validSrc.and(sessionHeaders)))))); // FW_I1 has an incoming session ACL _fwI1.setFirewallSessionInterfaceInfo( new FirewallSessionInterfaceInfo( Action.FORWARD_OUT_IFACE, ImmutableList.of(FW_I1), PERMIT_TCP, null)); assertThat( nodeAcceptEdges(sessionInfo), contains( edge( new PreInInterface(FW, FW_I1), new NodeAccept(FW), allOf( mapsForward(srcFwI1, sessionHeaders.and(_permitTcpBdd)), mapsForward(srcFwI1.and(noLastHop), sessionHeaders.and(_permitTcpBdd)), mapsForward(srcFwI1.and(lastHop1), sessionHeaders.and(_permitTcpBdd)), mapsBackward(_one, validSrc.and(sessionHeaders).and(_permitTcpBdd)))))); _fwI1.setFirewallSessionInterfaceInfo(null); // Session has a transformation { BDD poolBdd = _pkt.getSrcIp().value(10L); Transition nat = Transitions.eraseAndSet(_pkt.getSrcIp(), poolBdd); BDDFirewallSessionTraceInfo natSessionInfo = new BDDFirewallSessionTraceInfo( FW, ImmutableSet.of(FW_I1), Accept.INSTANCE, sessionHeaders, nat); assertThat( nodeAcceptEdges(natSessionInfo), contains( edge( new PreInInterface(FW, FW_I1), new NodeAccept(FW), allOf( mapsForward(srcFwI1, sessionHeaders.and(poolBdd)), mapsForward(srcFwI1.and(noLastHop), sessionHeaders.and(poolBdd)), mapsForward(srcFwI1.and(lastHop1), sessionHeaders.and(poolBdd)), mapsBackward(_one, validSrc.and(sessionHeaders)), mapsBackward(poolBdd.not(), _zero))))); } } @Test public void testFibLookupSessionEdges() { BDD sessionHeaders = _pkt.getDstIp().value(10L); BDDFirewallSessionTraceInfo sessionInfo = new BDDFirewallSessionTraceInfo( FW, ImmutableSet.of(FW_I1), PostNatFibLookup.INSTANCE, sessionHeaders, IDENTITY); assertThat( fibLookupSessionEdges(sessionInfo), contains( edge( new PreInInterface(FW, FW_I1), new PostInVrfSession(FW, FW_VRF), allOf( mapsForward(_one, sessionHeaders.and(_fwSrcMgr.getSourceInterfaceBDD(FW_I1))), mapsBackward(_one, sessionHeaders))))); } @Test public void testFibLookupSessionEdges_transformation() { BDD sessionHeaders = _pkt.getDstIp().value(10L); BDD poolBdd = _pkt.getSrcIp().value(10L); Transition nat = Transitions.eraseAndSet(_pkt.getSrcIp(), poolBdd); BDDFirewallSessionTraceInfo sessionInfo = new BDDFirewallSessionTraceInfo( FW, ImmutableSet.of(FW_I1), PostNatFibLookup.INSTANCE, sessionHeaders, nat); assertThat( fibLookupSessionEdges(sessionInfo), contains( edge( new PreInInterface(FW, FW_I1), new PostInVrfSession(FW, FW_VRF), allOf( mapsForward( _one, sessionHeaders.and(poolBdd).and(_fwSrcMgr.getSourceInterfaceBDD(FW_I1))), mapsBackward(_one, sessionHeaders))))); } @Test public void testFibLookupSessionEdges_inboundSession() { BDD sessionHeaders = _pkt.getDstIp().value(10L); BDDFirewallSessionTraceInfo sessionInfo = new BDDFirewallSessionTraceInfo( FW, new OriginatingSessionScope(FW_VRF), PostNatFibLookup.INSTANCE, sessionHeaders, IDENTITY); BDD originating = _fwSrcMgr.getOriginatingFromDeviceBDD(); Matcher<Transition> expectedTransition = allOf( mapsForward(_one, sessionHeaders.and(originating)), mapsBackward(_one, sessionHeaders)); assertThat( fibLookupSessionEdges(sessionInfo), containsInAnyOrder( edge( new OriginateInterface(FW, FW_I1), new PostInVrfSession(FW, FW_VRF), expectedTransition), edge( new OriginateVrf(FW, FW_VRF), new PostInVrfSession(FW, FW_VRF), expectedTransition))); } @Test public void testFibLookupSessionEdges_inboundSessionWithTransformation() { BDD sessionHeaders = _pkt.getDstIp().value(10L); BDD poolBdd = _pkt.getSrcIp().value(10L); Transition nat = Transitions.eraseAndSet(_pkt.getSrcIp(), poolBdd); BDDFirewallSessionTraceInfo natSessionInfo = new BDDFirewallSessionTraceInfo( FW, new OriginatingSessionScope(FW_VRF), PostNatFibLookup.INSTANCE, sessionHeaders, nat); BDD originating = _fwSrcMgr.getOriginatingFromDeviceBDD(); Matcher<Transition> expectedTransition = allOf( mapsForward(_one, sessionHeaders.and(originating).and(poolBdd)), mapsBackward(_one, sessionHeaders)); assertThat( fibLookupSessionEdges(natSessionInfo), containsInAnyOrder( edge( new OriginateInterface(FW, FW_I1), new PostInVrfSession(FW, FW_VRF), expectedTransition), edge( new OriginateVrf(FW, FW_VRF), new PostInVrfSession(FW, FW_VRF), expectedTransition))); } @Test public void testPreInInterfaceEdges() { BDD sessionHeaders = _pkt.getDstIp().value(10L); BDD srcFwI1 = _fwSrcMgr.getSourceInterfaceBDD(FW_I1); BDD validSrc = _fwSrcMgr.isValidValue(); BDD lastHop1 = _lastHopMgr.getLastHopOutgoingInterfaceBdd(SOURCE1, SOURCE1_IFACE, FW, FW_I1); BDD noLastHop = _lastHopMgr.getNoLastHopOutgoingInterfaceBdd(FW, FW_I1); BDD fakeIface = _source1SrcMgr.getSourceInterfaceBDD(FAKE_IFACE); BDD source1Iface = _source1SrcMgr.getSourceInterfaceBDD(SOURCE1_IFACE); BDDFirewallSessionTraceInfo sessionInfo = new BDDFirewallSessionTraceInfo( FW, ImmutableSet.of(FW_I1), new ForwardOutInterface(FW_I1, NodeInterfacePair.of(SOURCE1, SOURCE1_IFACE)), sessionHeaders, IDENTITY); // No transformation, no ACLs assertThat( preInInterfaceEdges(sessionInfo), contains( edge( new PreInInterface(FW, FW_I1), new PreInInterface(SOURCE1, SOURCE1_IFACE), allOf( mapsForward(srcFwI1, sessionHeaders.and(source1Iface)), mapsForward(srcFwI1.and(noLastHop), sessionHeaders.and(source1Iface)), mapsForward(srcFwI1.and(lastHop1), sessionHeaders.and(source1Iface)), mapsBackward(source1Iface, validSrc.and(sessionHeaders)), mapsBackward(fakeIface, _zero))))); // FW_I1 has an incoming session ACL _fwI1.setFirewallSessionInterfaceInfo( new FirewallSessionInterfaceInfo( Action.FORWARD_OUT_IFACE, ImmutableList.of(FW_I1), PERMIT_TCP, null)); assertThat( preInInterfaceEdges(sessionInfo), contains( edge( new PreInInterface(FW, FW_I1), new PreInInterface(SOURCE1, SOURCE1_IFACE), allOf( mapsForward(srcFwI1, sessionHeaders.and(_permitTcpBdd).and(source1Iface)), mapsForward( srcFwI1.and(noLastHop), sessionHeaders.and(_permitTcpBdd).and(source1Iface)), mapsForward( srcFwI1.and(lastHop1), sessionHeaders.and(_permitTcpBdd).and(source1Iface)), mapsBackward(source1Iface, validSrc.and(sessionHeaders).and(_permitTcpBdd)), mapsBackward(fakeIface, _zero))))); _fwI1.setFirewallSessionInterfaceInfo(null); // FW_I1 has an outgoing session ACL _fwI1.setFirewallSessionInterfaceInfo( new FirewallSessionInterfaceInfo( Action.FORWARD_OUT_IFACE, ImmutableList.of(FW_I1), null, PERMIT_TCP)); assertThat( preInInterfaceEdges(sessionInfo), contains( edge( new PreInInterface(FW, FW_I1), new PreInInterface(SOURCE1, SOURCE1_IFACE), allOf( mapsForward(srcFwI1, sessionHeaders.and(_permitTcpBdd).and(source1Iface)), mapsForward( srcFwI1.and(noLastHop), sessionHeaders.and(_permitTcpBdd).and(source1Iface)), mapsForward( srcFwI1.and(lastHop1), sessionHeaders.and(_permitTcpBdd).and(source1Iface)), mapsBackward(source1Iface, validSrc.and(sessionHeaders).and(_permitTcpBdd)), mapsBackward(fakeIface, _zero))))); _fwI1.setFirewallSessionInterfaceInfo(null); // Session has a transformation { BDD poolBdd = _pkt.getSrcIp().value(10L); Transition nat = Transitions.eraseAndSet(_pkt.getSrcIp(), poolBdd); BDDFirewallSessionTraceInfo natSessionInfo = new BDDFirewallSessionTraceInfo( FW, ImmutableSet.of(FW_I1), new ForwardOutInterface(FW_I1, NodeInterfacePair.of(SOURCE1, SOURCE1_IFACE)), sessionHeaders, nat); assertThat( preInInterfaceEdges(natSessionInfo), contains( edge( new PreInInterface(FW, FW_I1), new PreInInterface(SOURCE1, SOURCE1_IFACE), allOf( mapsForward(srcFwI1, sessionHeaders.and(poolBdd).and(source1Iface)), mapsForward( srcFwI1.and(noLastHop), sessionHeaders.and(poolBdd).and(source1Iface)), mapsForward( srcFwI1.and(lastHop1), sessionHeaders.and(poolBdd).and(source1Iface)), mapsBackward(source1Iface, validSrc.and(sessionHeaders)), mapsBackward(source1Iface.and(poolBdd.not()), _zero), mapsBackward(fakeIface, _zero))))); } } @Test public void testDeliveredToSubnetEdges() { BDD sessionHeaders = _pkt.getDstIp().value(10L); BDD srcFwI1 = _fwSrcMgr.getSourceInterfaceBDD(FW_I1); BDD lastHop1 = _lastHopMgr.getLastHopOutgoingInterfaceBdd(SOURCE1, SOURCE1_IFACE, FW, FW_I1); BDD noLastHop = _lastHopMgr.getNoLastHopOutgoingInterfaceBdd(FW, FW_I1); BDDFirewallSessionTraceInfo sessionInfo = new BDDFirewallSessionTraceInfo( FW, ImmutableSet.of(FW_I1), new ForwardOutInterface(FW_I1, null), sessionHeaders, IDENTITY); // No transformation, no ACLs List<Edge> actual = deliveredToSubnetEdges(sessionInfo); assertThat( actual, contains( edge( new PreInInterface(FW, FW_I1), new NodeInterfaceDeliveredToSubnet(FW, FW_I1), allOf( mapsForward(srcFwI1, sessionHeaders.and(srcFwI1)), mapsForward(srcFwI1.and(noLastHop), sessionHeaders.and(srcFwI1).and(noLastHop)), mapsForward(srcFwI1.and(lastHop1), sessionHeaders.and(srcFwI1).and(lastHop1)), mapsBackward(_one, sessionHeaders))))); // FW_I1 has an incoming session ACL _fwI1.setFirewallSessionInterfaceInfo( new FirewallSessionInterfaceInfo( Action.FORWARD_OUT_IFACE, ImmutableList.of(FW_I1), PERMIT_TCP, null)); assertThat( deliveredToSubnetEdges(sessionInfo), contains( edge( new PreInInterface(FW, FW_I1), new NodeInterfaceDeliveredToSubnet(FW, FW_I1), allOf( mapsForward(_one, sessionHeaders.and(_permitTcpBdd)), mapsBackward(_one, sessionHeaders.and(_permitTcpBdd)))))); _fwI1.setFirewallSessionInterfaceInfo(null); // FW_I1 has an outgoing session ACL _fwI1.setFirewallSessionInterfaceInfo( new FirewallSessionInterfaceInfo( Action.FORWARD_OUT_IFACE, ImmutableList.of(FW_I1), null, PERMIT_TCP)); assertThat( deliveredToSubnetEdges(sessionInfo), contains( edge( new PreInInterface(FW, FW_I1), new NodeInterfaceDeliveredToSubnet(FW, FW_I1), allOf( mapsForward(_one, sessionHeaders.and(_permitTcpBdd)), mapsBackward(_one, sessionHeaders.and(_permitTcpBdd)))))); _fwI1.setFirewallSessionInterfaceInfo(null); // Session has a transformation { BDD poolBdd = _pkt.getSrcIp().value(10L); Transition nat = Transitions.eraseAndSet(_pkt.getSrcIp(), poolBdd); BDDFirewallSessionTraceInfo natSessionInfo = new BDDFirewallSessionTraceInfo( FW, ImmutableSet.of(FW_I1), new ForwardOutInterface(FW_I1, null), sessionHeaders, nat); assertThat( deliveredToSubnetEdges(natSessionInfo), contains( edge( new PreInInterface(FW, FW_I1), new NodeInterfaceDeliveredToSubnet(FW, FW_I1), allOf( mapsForward(_one, sessionHeaders.and(poolBdd)), mapsForward(poolBdd.not(), sessionHeaders.and(poolBdd)), mapsBackward(_one, sessionHeaders), mapsBackward(poolBdd.not(), _zero))))); } } @Test public void testDropAclInEdges() { BDD sessionHeaders = _pkt.getDstIp().value(10L); BDD srcFwI1 = _fwSrcMgr.getSourceInterfaceBDD(FW_I1); BDD validSrc = _fwSrcMgr.isValidValue(); BDDFirewallSessionTraceInfo sessionInfo = new BDDFirewallSessionTraceInfo( FW, ImmutableSet.of(FW_I1), Accept.INSTANCE, sessionHeaders, IDENTITY); // No ACLs assertThat(nodeDropAclInEdges(sessionInfo), empty()); // FW_I1 has an incoming session ACL _fwI1.setFirewallSessionInterfaceInfo( new FirewallSessionInterfaceInfo( Action.FORWARD_OUT_IFACE, ImmutableList.of(FW_I1), PERMIT_TCP, null)); assertThat( nodeDropAclInEdges(sessionInfo), contains( edge( new PreInInterface(FW, FW_I1), new NodeDropAclIn(FW), allOf( mapsForward(srcFwI1, sessionHeaders.and(_permitTcpBdd.not())), mapsBackward(_one, validSrc.and(sessionHeaders).and(_permitTcpBdd.not())))))); _fwI1.setFirewallSessionInterfaceInfo(null); } @Test public void testDropAclOutEdges() { BDD sessionHeaders = _pkt.getDstIp().value(10L); BDD srcFwI1 = _fwSrcMgr.getSourceInterfaceBDD(FW_I1); BDD validSrc = _fwSrcMgr.isValidValue(); BDDFirewallSessionTraceInfo sessionInfo = new BDDFirewallSessionTraceInfo( FW, ImmutableSet.of(FW_I1), new ForwardOutInterface(FW_I1, null), sessionHeaders, IDENTITY); // No ACLs assertThat(nodeDropAclOutEdges(sessionInfo), empty()); // FW_I1 has an outgoing session ACL _fwI1.setFirewallSessionInterfaceInfo( new FirewallSessionInterfaceInfo( Action.FORWARD_OUT_IFACE, ImmutableList.of(FW_I1), null, PERMIT_TCP)); assertThat( nodeDropAclOutEdges(sessionInfo), contains( edge( new PreInInterface(FW, FW_I1), new NodeDropAclOut(FW), allOf( mapsForward(srcFwI1, sessionHeaders.and(_permitTcpBdd.not())), mapsBackward(_one, validSrc.and(sessionHeaders).and(_permitTcpBdd.not())))))); _fwI1.setFirewallSessionInterfaceInfo(null); } }
11,787
856
// // Copyright © 2017 Arm Ltd. All rights reserved. // SPDX-License-Identifier: MIT // #pragma once #include "IGraphObservable.hpp" #include "Graph.hpp" namespace armnn { template <typename ObservedType> class GraphObservable : public IGraphObservable { public: using Iterator = typename std::list<ObservedType>::const_iterator; GraphObservable(Graph& subject, GraphEvent notifyOnEvent) : m_Subject(&subject) { m_NotifyOnEvent = notifyOnEvent; m_Subject->AttachObservable(this, m_NotifyOnEvent); }; void Clear() { m_ObservedObjects.clear(); }; Iterator begin() { return m_ObservedObjects.begin(); } Iterator end() { return m_ObservedObjects.end(); } protected: ~GraphObservable() { if (m_Subject) { m_Subject->DetachObservable(this, m_NotifyOnEvent); } } GraphEvent m_NotifyOnEvent; Graph* m_Subject; std::list<ObservedType> m_ObservedObjects; }; class AddedLayerObservable : public GraphObservable<Layer*> { public: explicit AddedLayerObservable(Graph& subject) : GraphObservable<Layer*>(subject, GraphEvent::LayerAdded) {}; void Update(Layer* graphLayer) override; }; class ErasedLayerNamesObservable : public GraphObservable<std::string> { public: explicit ErasedLayerNamesObservable(Graph& subject) : GraphObservable<std::string>(subject, GraphEvent::LayerErased) {}; void Update(Layer* graphLayer) override; }; } //namespace armnn
567
945
// This is core/vnl/vnl_beta.h #ifndef vnl_beta_h_ #define vnl_beta_h_ //: // \file // \brief implementation of the beta function, also called the Euler integral of the first kind // \author <NAME> #include "vnl_gamma.h" #include "vnl/vnl_export.h" #if 1 // implementation via vnl_log_gamma //: Computation of beta function in terms of gamma function. // Actually, this implementation refers to vnl_log_gamma, // since this involves just a single call to std::exp instead of three. template <class T> inline VNL_EXPORT double vnl_beta(T x, T y) {return std::exp(vnl_log_gamma(x)+vnl_log_gamma(y)-vnl_log_gamma(x+y)); } #else // implementation via vnl_gamma; less efficient since it needs 3x std::exp //: Computation of beta function in terms of gamma function. template <class T> inline double vnl_beta(T x, T y) {return (vnl_gamma(x)*vnl_gamma(y))/vnl_gamma(x+y); } #endif //: Computation of the log beta function in terms of the log gamma function. // vnl_log_beta is just the std::log (natural logarithm) of the beta function. template <class T> inline VNL_EXPORT double vnl_log_beta(T x, T y) {return vnl_log_gamma(x)+vnl_log_gamma(y)-vnl_log_gamma(x+y); } #endif
432
1,473
# SPDX-License-Identifier: Apache-2.0 import os import sys import unittest import mock_keras2onnx import numpy as np from mock_keras2onnx.proto import keras from os.path import dirname, abspath from onnxconverter_common.onnx_ex import get_maximum_opset_supported sys.path.insert(0, os.path.join(dirname(abspath(__file__)), '../../keras2onnx_tests/')) from test_utils import run_keras_and_ort, test_level_0 K = keras.backend Activation = keras.layers.Activation AveragePooling2D = keras.layers.AveragePooling2D Add = keras.layers.Add BatchNormalization = keras.layers.BatchNormalization concatenate = keras.layers.concatenate Conv1D = keras.layers.Conv1D Dense = keras.layers.Dense Dropout = keras.layers.Dropout Embedding = keras.layers.Embedding Flatten = keras.layers.Flatten GlobalAveragePooling1D = keras.layers.GlobalAveragePooling1D Input = keras.layers.Input Lambda = keras.layers.Lambda LeakyReLU = keras.layers.LeakyReLU LSTM = keras.layers.LSTM Masking = keras.layers.Masking MaxPooling2D = keras.layers.MaxPooling2D multiply = keras.layers.multiply Permute = keras.layers.Permute Reshape = keras.layers.Reshape SeparableConv2D = keras.layers.SeparableConv2D UpSampling2D = keras.layers.UpSampling2D ZeroPadding2D = keras.layers.ZeroPadding2D Sequential = keras.models.Sequential Model = keras.models.Model def squeeze_excite_block(input): filters = input._keras_shape[-1] # channel_axis = -1 for TF se = GlobalAveragePooling1D()(input) se = Reshape((1, filters))(se) se = Dense(filters // 16, activation='relu', kernel_initializer='he_normal', use_bias=False)(se) se = Dense(filters, activation='sigmoid', kernel_initializer='he_normal', use_bias=False)(se) se = multiply([input, se]) return se MAX_NB_VARIABLES = 20 MAX_TIMESTEPS = 5 NB_CLASS = 10 # Model from https://github.com/titu1994/MLSTM-FCN class TestMLSTM_FCN(unittest.TestCase): def setUp(self): self.model_files = [] def tearDown(self): for fl in self.model_files: os.remove(fl) @unittest.skipIf(test_level_0 or get_maximum_opset_supported() < 11, "Test level 0 only.") def test_MLSTM_FCN(self): K.clear_session() ip = Input(shape=(MAX_NB_VARIABLES, MAX_TIMESTEPS)) x = Masking()(ip) x = LSTM(8)(x) x = Dropout(0.8)(x) y = Permute((2, 1))(ip) y = Conv1D(128, 8, padding='same', kernel_initializer='he_uniform')(y) y = BatchNormalization()(y) y = Activation('relu')(y) y = squeeze_excite_block(y) y = Conv1D(256, 5, padding='same', kernel_initializer='he_uniform')(y) y = BatchNormalization()(y) y = Activation('relu')(y) y = squeeze_excite_block(y) y = Conv1D(128, 3, padding='same', kernel_initializer='he_uniform')(y) y = BatchNormalization()(y) y = Activation('relu')(y) y = GlobalAveragePooling1D()(y) x = concatenate([x, y]) out = Dense(NB_CLASS, activation='softmax')(x) keras_model = Model(ip, out) data = np.random.rand(2, MAX_NB_VARIABLES, MAX_TIMESTEPS).astype(np.float32) expected = keras_model.predict(data) onnx_model = mock_keras2onnx.convert_keras(keras_model, keras_model.name) self.assertTrue( run_keras_and_ort(onnx_model.graph.name, onnx_model, keras_model, data, expected, self.model_files)) @unittest.skipIf(test_level_0 or get_maximum_opset_supported() < 11, "Test level 0 only.") def test_LSTM_FCN(self): K.clear_session() ip = Input(shape=(MAX_NB_VARIABLES, MAX_TIMESTEPS)) x = Masking()(ip) x = LSTM(8)(x) x = Dropout(0.8)(x) y = Permute((2, 1))(ip) y = Conv1D(128, 8, padding='same', kernel_initializer='he_uniform')(y) y = BatchNormalization()(y) y = Activation('relu')(y) y = Conv1D(256, 5, padding='same', kernel_initializer='he_uniform')(y) y = BatchNormalization()(y) y = Activation('relu')(y) y = Conv1D(128, 3, padding='same', kernel_initializer='he_uniform')(y) y = BatchNormalization()(y) y = Activation('relu')(y) y = GlobalAveragePooling1D()(y) x = concatenate([x, y]) out = Dense(NB_CLASS, activation='softmax')(x) keras_model = Model(ip, out) data = np.random.rand(2, MAX_NB_VARIABLES, MAX_TIMESTEPS).astype(np.float32) expected = keras_model.predict(data) onnx_model = mock_keras2onnx.convert_keras(keras_model, keras_model.name) self.assertTrue( run_keras_and_ort(onnx_model.graph.name, onnx_model, keras_model, data, expected, self.model_files)) if __name__ == "__main__": unittest.main()
2,209
1,459
<reponame>Zephon-H/CyberBattleSim<filename>cyberbattle/simulation/environment_generation_test.py<gh_stars>1000+ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. """ The unit tests for the environment_generation functions """ from collections import Counter from cyberbattle.simulation import commandcontrol from typing import List, Dict import pytest from . import environment_generation from . import model windows_vulns: Dict[str, model.VulnerabilityInfo] = environment_generation.potential_windows_vulns linux_vulns: Dict[str, model.VulnerabilityInfo] = environment_generation.potential_linux_vulns windows_node_states: List[model.PropertyName] = environment_generation.potential_linux_node_states linux_node_states: List[model.PropertyName] = environment_generation.potential_linux_node_states potential_ports: List[model.PortName] = environment_generation.potential_ports def test_create_random_environment() -> None: """ The unit tests for create_random_environment function """ with pytest.raises(ValueError, match=r"Please supply a non empty string for the name"): environment_generation.create_random_environment("", 2) with pytest.raises(ValueError, match=r"Please supply a positive non zero positive" r"integer for the size of the environment"): environment_generation.create_random_environment("Test_environment", -5) result: model.Environment = environment_generation.\ create_random_environment("Test_environment 2", 4) assert isinstance(result, model.Environment) def test_random_environment_list_attacks() -> None: """ Unit tests for #23 caused by bug https://github.com/bastikr/boolean.py/issues/82 in boolean.py """ env = environment_generation.create_random_environment('test', 10) c2 = commandcontrol.CommandControl(env) c2.print_all_attacks() def test_create_random_node() -> None: """ The unit tests for create_random_node() function """ # check that the correct exceptions are generated with pytest.raises(ValueError, match=r"No endpoints supplied"): environment_generation.create_random_node("Linux", []) with pytest.raises(ValueError, match=r"Unsupported OS Type please enter Linux or Windows"): environment_generation.create_random_node("Solaris", potential_ports) test_node: model.NodeInfo = environment_generation.create_random_node("Linux", potential_ports) assert isinstance(test_node, model.NodeInfo) def test_get_properties_from_vulnerabilities() -> None: """ This function tests the get_properties_from_vulnerabilities function It takes nothing and returns nothing. """ # testing on linux vulns props: List[model.PropertyName] = environment_generation.\ get_properties_from_vulnerabilities("Linux", linux_vulns) assert "Linux" in props assert "PortSSHOpen" in props assert "PortSMBOpen" in props # testing on Windows vulns windows_props: List[model.PropertyName] = environment_generation.get_properties_from_vulnerabilities( "Windows", windows_vulns) assert "Windows" in windows_props assert "PortRDPOpen" in windows_props assert "PortSMBOpen" in windows_props assert "DomainJoined" in windows_props assert "Win10" in windows_props assert "Win7" in windows_props def test_create_firewall_rules() -> None: """ This function tests the create_firewall_rules function. It takes nothing and returns nothing. """ empty_ports: List[model.PortName] = [] potential_port_list: List[model.PortName] = ["RDP", "SSH", "HTTP", "HTTPs", "SMB", "SQL", "FTP", "WMI"] half_ports: List[model.PortName] = ["SSH", "HTTPs", "SQL", "FTP", "WMI"] all_blocked: List[model.FirewallRule] = [model.FirewallRule( port, model.RulePermission.BLOCK) for port in potential_port_list] all_allowed: List[model.FirewallRule] = [model.FirewallRule( port, model.RulePermission.ALLOW) for port in potential_port_list] half_allowed: List[model.FirewallRule] = [model.FirewallRule(port, model.RulePermission.ALLOW) if port in half_ports else model.FirewallRule( port, model.RulePermission.BLOCK) for port in potential_port_list] # testing on an empty list should lead to results: model.FirewallConfiguration = environment_generation.create_firewall_rules(empty_ports) assert Counter(results.incoming) == Counter(all_blocked) assert Counter(results.outgoing) == Counter(all_blocked) # testing on a the list supported ports results = environment_generation.create_firewall_rules(potential_ports) assert Counter(results.incoming) == Counter(all_allowed) assert Counter(results.outgoing) == Counter(all_allowed) results = environment_generation.create_firewall_rules(half_ports) assert Counter(results.incoming) == Counter(half_allowed) assert Counter(results.outgoing) == Counter(half_allowed)
1,850
861
<reponame>scotthufeng/spring-cloud-gray package cn.springcloud.gray.server.resources.rest.remote; import cn.springcloud.gray.api.ApiRes; import cn.springcloud.gray.model.InstanceStatus; import cn.springcloud.gray.server.module.gray.GrayServerModule; import cn.springcloud.gray.server.module.gray.domain.GrayInstance; import io.swagger.annotations.Api; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.web.bind.annotation.RequestBody; import org.springframework.web.bind.annotation.RequestMapping; import org.springframework.web.bind.annotation.RequestMethod; import org.springframework.web.bind.annotation.RestController; import java.util.Date; import java.util.Objects; import static cn.springcloud.gray.api.ApiRes.CODE_SUCCESS; @Api("gray-client调用的接口") @RestController @RequestMapping("/gray/v2") public class GrayInstanceResourceV2 { @Autowired private GrayServerModule grayServerModule; @RequestMapping(value = "/instance/", method = RequestMethod.POST) public ApiRes<Void> save(@RequestBody GrayInstance grayInstance) { if (Objects.isNull(grayInstance.getInstanceStatus())) { grayInstance.setInstanceStatus(InstanceStatus.UP); } grayInstance.setOperateTime(new Date()); grayServerModule.saveGrayInstance(grayInstance); return ApiRes.<Void>builder() .code(CODE_SUCCESS) .build(); } }
532
2,338
// RUN: %clangxx_tsan -O1 %s -o %t // RUN: %run %t 2>&1 | FileCheck %s // RUN: %run %t arg 2>&1 | FileCheck %s #include "java.h" jptr varaddr1_old; jptr varaddr2_old; jptr lockaddr1_old; jptr lockaddr2_old; jptr varaddr1_new; jptr varaddr2_new; jptr lockaddr1_new; jptr lockaddr2_new; void *Thread(void *p) { barrier_wait(&barrier); __tsan_java_mutex_lock(lockaddr1_new); *(char*)varaddr1_new = 43; __tsan_java_mutex_unlock(lockaddr1_new); __tsan_java_mutex_lock(lockaddr2_new); *(char*)varaddr2_new = 43; __tsan_java_mutex_unlock(lockaddr2_new); return 0; } int main(int argc, char **argv) { barrier_init(&barrier, 2); int const kHeapSize = 1024 * 1024; void *jheap = malloc(kHeapSize); jheap = (char*)jheap + 8; __tsan_java_init((jptr)jheap, kHeapSize); const int kBlockSize = 64; int const kMove = 32; varaddr1_old = (jptr)jheap; lockaddr1_old = (jptr)jheap + 1; varaddr2_old = (jptr)jheap + kBlockSize - 1; lockaddr2_old = (jptr)jheap + kBlockSize - 16; varaddr1_new = varaddr1_old + kMove; lockaddr1_new = lockaddr1_old + kMove; varaddr2_new = varaddr2_old + kMove; lockaddr2_new = lockaddr2_old + kMove; if (argc > 1) { // Move memory backwards. varaddr1_old += kMove; lockaddr1_old += kMove; varaddr2_old += kMove; lockaddr2_old += kMove; varaddr1_new -= kMove; lockaddr1_new -= kMove; varaddr2_new -= kMove; lockaddr2_new -= kMove; } __tsan_java_alloc(varaddr1_old, kBlockSize); pthread_t th; pthread_create(&th, 0, Thread, 0); __tsan_java_mutex_lock(lockaddr1_old); *(char*)varaddr1_old = 43; __tsan_java_mutex_unlock(lockaddr1_old); __tsan_java_mutex_lock(lockaddr2_old); *(char*)varaddr2_old = 43; __tsan_java_mutex_unlock(lockaddr2_old); __tsan_java_move(varaddr1_old, varaddr1_new, kBlockSize); barrier_wait(&barrier); pthread_join(th, 0); __tsan_java_free(varaddr1_new, kBlockSize); fprintf(stderr, "DONE\n"); return __tsan_java_fini(); } // CHECK-NOT: WARNING: ThreadSanitizer: data race // CHECK: DONE
920
377
<filename>src/jpa-engine/core/src/main/java/com/impetus/kundera/proxy/collection/ProxyMap.java /******************************************************************************* * * Copyright 2013 Impetus Infotech. * * * * Licensed under the Apache License, Version 2.0 (the "License"); * * you may not use this file except in compliance with the License. * * You may obtain a copy of the License at * * * * http://www.apache.org/licenses/LICENSE-2.0 * * * * Unless required by applicable law or agreed to in writing, software * * distributed under the License is distributed on an "AS IS" BASIS, * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * * See the License for the specific language governing permissions and * * limitations under the License. ******************************************************************************/ package com.impetus.kundera.proxy.collection; import java.util.Collection; import java.util.Map; import java.util.Set; import com.impetus.kundera.metadata.model.Relation; import com.impetus.kundera.persistence.PersistenceDelegator; /** * Proxy class used to represent instances for {@link Map} * * @author amresh.singh */ public class ProxyMap extends AbstractProxyBase implements ProxyCollection, Map { /** * Default constructor */ public ProxyMap() { super(); } public ProxyMap(final PersistenceDelegator delegator, final Relation relation) { super(delegator, relation); } @Override public ProxyCollection getCopy() { ProxyCollection proxyCollection = new ProxyMap(getPersistenceDelegator(), getRelation()); proxyCollection.setRelationsMap(getRelationsMap()); return proxyCollection; } @Override public Object getDataCollection() { return dataCollection != null && ! ((Map) dataCollection).isEmpty() ? dataCollection : null; } // ///////////////////////Methods from Collection interface //////////////// @Override public void clear() { eagerlyLoadDataCollection(); if (getDataCollection() != null && !(getDataCollection() instanceof ProxyCollection)) { ((Map)getDataCollection()).clear(); } } @Override public boolean isEmpty() { boolean result = true; eagerlyLoadDataCollection(); if (getDataCollection() != null && !(getDataCollection() instanceof ProxyCollection)) { result = ((Map)getDataCollection()).isEmpty(); } return result; } @Override public int size() { eagerlyLoadDataCollection(); return dataCollection == null || dataCollection instanceof ProxyCollection ? 0 : ((Map) dataCollection).size(); } // ///////////////////////Methods from Map interface //////////////// @Override public boolean containsKey(final Object arg0) { eagerlyLoadDataCollection(); final Map dataMap = (Map) dataCollection; boolean result = false; if (dataMap != null && !(dataMap instanceof ProxyMap) && !dataMap.isEmpty()) { result = dataMap.containsKey(arg0); } return result; } @Override public boolean containsValue(final Object arg0) { eagerlyLoadDataCollection(); final Map dataMap = (Map) dataCollection; boolean result = false; if (dataMap != null && !dataMap.isEmpty()) { result = dataMap.containsValue(arg0); } return result; } @Override public Set entrySet() { eagerlyLoadDataCollection(); final Map dataMap = (Map) dataCollection; Set result = null; if (dataMap != null && !dataMap.isEmpty()) { result = dataMap.entrySet(); } return result; } @Override public Object get(final Object arg0) { eagerlyLoadDataCollection(); final Map dataMap = (Map) dataCollection; Object result = null; if (dataMap != null && !dataMap.isEmpty()) { result = dataMap.get(arg0); } return result; } @Override public Set keySet() { eagerlyLoadDataCollection(); final Map dataMap = (Map) dataCollection; Set result = null; if (dataMap != null && !dataMap.isEmpty()) { result = dataMap.keySet(); } return result; } @Override public Object put(final Object arg0, final Object arg1) { eagerlyLoadDataCollection(); Map dataMap = (Map) dataCollection; Object result = null; if (dataMap != null) { result = dataMap.put(arg0, arg1); } return result; } @Override public void putAll(final Map arg0) { eagerlyLoadDataCollection(); Map dataMap = (Map) dataCollection; if (dataMap != null) { dataMap.putAll(arg0); } } @Override public Object remove(final Object arg0) { eagerlyLoadDataCollection(); Map dataMap = (Map) dataCollection; Object result = null; if (dataMap != null && !dataMap.isEmpty()) { result = dataMap.remove(arg0); } return result; } @Override public Collection values() { eagerlyLoadDataCollection(); final Map dataMap = (Map) dataCollection; Collection result = null; if (dataMap != null && !dataMap.isEmpty()) { result = dataMap.values(); } return result; } }
2,228
778
package org.aion.zero.impl.valid; import static org.aion.zero.impl.valid.BlockDetailsValidator.isValidBlock; import static org.mockito.Mockito.when; import com.google.common.truth.Truth; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; import org.aion.base.AionTransaction; import org.aion.base.AionTxExecSummary; import org.aion.base.AionTxReceipt; import org.aion.base.Bloom; import org.aion.base.ConstantUtil; import org.aion.base.Constants; import org.aion.log.AionLoggerFactory; import org.aion.log.LogEnum; import org.aion.log.LogLevel; import org.aion.zero.impl.types.Block; import org.aion.zero.impl.types.BlockHeader; import org.aion.zero.impl.types.BlockUtil; import org.junit.Before; import org.junit.Test; import org.mockito.Mock; import org.mockito.MockitoAnnotations; import org.slf4j.Logger; public class BlockDetailsValidatorTest { @Mock Block mockBlock; @Mock BlockHeader mockBlockHeader; @Mock AionTxExecSummary mockTxExecSummary; @Mock AionTxReceipt mockTxReceipt; @Mock AionTransaction mockTransaction; @Before public void before() { MockitoAnnotations.initMocks(this); Map<LogEnum, LogLevel> logLevelMap = new HashMap<>(); logLevelMap.put(LogEnum.ROOT, LogLevel.DEBUG); AionLoggerFactory.init(logLevelMap); } @Test public void validateEmptyBlockTest() { // Normal empty block case when(mockBlock.getNumber()).thenReturn(2L); when(mockBlock.getHeader()).thenReturn(mockBlockHeader); when(mockBlock.getReceiptsRoot()).thenReturn(ConstantUtil.EMPTY_TRIE_HASH); when(mockBlock.getLogBloom()).thenReturn(new byte[Bloom.SIZE]); when(mockBlockHeader.getEnergyConsumed()).thenReturn(0L); Truth.assertThat( isValidBlock( mockBlock, Collections.emptyList(), Collections.emptyList(), false, AionLoggerFactory.getLogger(Logger.ROOT_LOGGER_NAME))) .isTrue(); // empty block with invalid receiptRoot when(mockBlock.getReceiptsRoot()).thenReturn(new byte[32]); Truth.assertThat( isValidBlock( mockBlock, Collections.emptyList(), Collections.emptyList(), false, AionLoggerFactory.getLogger(Logger.ROOT_LOGGER_NAME))) .isFalse(); // empty block with invalid energy when(mockBlock.getReceiptsRoot()).thenReturn(ConstantUtil.EMPTY_TRIE_HASH); when(mockBlockHeader.getEnergyConsumed()).thenReturn(1L); Truth.assertThat( isValidBlock( mockBlock, Collections.emptyList(), Collections.emptyList(), false, AionLoggerFactory.getLogger(Logger.ROOT_LOGGER_NAME))) .isFalse(); // empty block with invalid logBloom when(mockBlockHeader.getEnergyConsumed()).thenReturn(0L); byte[] bytes = new byte[Bloom.SIZE]; Arrays.fill(bytes, (byte) 1); when(mockBlock.getLogBloom()).thenReturn(bytes); Truth.assertThat( isValidBlock( mockBlock, Collections.emptyList(), Collections.emptyList(), false, AionLoggerFactory.getLogger(Logger.ROOT_LOGGER_NAME))) .isFalse(); } @Test public void validateBlockHasRejectedTransaction() { byte[] receiptTrieEncoded = new byte[32]; Arrays.fill(receiptTrieEncoded, (byte) 1); // The block initially has rejected transaction when(mockBlock.getNumber()).thenReturn(1L); when(mockBlock.getHeader()).thenReturn(mockBlockHeader); when(mockBlock.getLogBloom()).thenReturn(new byte[Bloom.SIZE]); when(mockBlockHeader.getEnergyConsumed()) .thenReturn((long) Constants.NRG_TRANSACTION_DEFAULT); when(mockTxExecSummary.isRejected()).thenReturn(true); when(mockTxExecSummary.getReceipt()).thenReturn(mockTxReceipt); when(mockTxReceipt.getEnergyUsed()).thenReturn((long) Constants.NRG_TRANSACTION_DEFAULT); when(mockTxReceipt.getReceiptTrieEncoded()).thenReturn(receiptTrieEncoded); when(mockTxReceipt.getBloomFilter()).thenReturn(new Bloom()); List<AionTxExecSummary> summaryList = new ArrayList<>(); summaryList.add(mockTxExecSummary); List<AionTxReceipt> receiptList = new ArrayList<>(); receiptList.add(mockTxReceipt); byte[] calculatedTrieroot = BlockUtil.calcReceiptsTrie(receiptList); when(mockBlock.getReceiptsRoot()).thenReturn(calculatedTrieroot); Truth.assertThat( isValidBlock( mockBlock, summaryList, receiptList, true, AionLoggerFactory.getLogger(Logger.ROOT_LOGGER_NAME))) .isTrue(); // The block has rejected transaction after nonce hard fork active when(mockBlock.getNumber()).thenReturn(2L); Truth.assertThat( isValidBlock( mockBlock, summaryList, receiptList, false, AionLoggerFactory.getLogger(Logger.ROOT_LOGGER_NAME))) .isFalse(); } @Test public void validateBlockHasInvalidEnergyUsed() { byte[] receiptTrieEncoded = new byte[32]; Arrays.fill(receiptTrieEncoded, (byte) 1); // The block has invalid total energy use in the block header when(mockBlock.getNumber()).thenReturn(2L); when(mockBlock.getHeader()).thenReturn(mockBlockHeader); when(mockBlock.getLogBloom()).thenReturn(new byte[Bloom.SIZE]); when(mockBlockHeader.getEnergyConsumed()) .thenReturn((long) Constants.NRG_TRANSACTION_DEFAULT + 1); when(mockTxExecSummary.isRejected()).thenReturn(false); when(mockTxExecSummary.getReceipt()).thenReturn(mockTxReceipt); when(mockTxExecSummary.getTransaction()).thenReturn(mockTransaction); when(mockTxReceipt.getEnergyUsed()) .thenReturn((long) Constants.NRG_TRANSACTION_DEFAULT); when(mockTxReceipt.getReceiptTrieEncoded()).thenReturn(receiptTrieEncoded); when(mockTxReceipt.getBloomFilter()).thenReturn(new Bloom()); List<AionTxExecSummary> summaryList = new ArrayList<>(); summaryList.add(mockTxExecSummary); List<AionTxReceipt> receiptList = new ArrayList<>(); receiptList.add(mockTxReceipt); byte[] calculatedTrieroot = BlockUtil.calcReceiptsTrie(receiptList); when(mockBlock.getReceiptsRoot()).thenReturn(calculatedTrieroot); Truth.assertThat( isValidBlock( mockBlock, summaryList, receiptList, false, AionLoggerFactory.getLogger(Logger.ROOT_LOGGER_NAME))) .isFalse(); } @Test public void validateBlockHasInvalidReceiptRoot() { byte[] receiptTrieEncoded = new byte[32]; Arrays.fill(receiptTrieEncoded, (byte) 1); // The block has invalid receipt root in the block header when(mockBlock.getNumber()).thenReturn(2L); when(mockBlock.getHeader()).thenReturn(mockBlockHeader); when(mockBlock.getLogBloom()).thenReturn(new byte[Bloom.SIZE]); when(mockBlockHeader.getEnergyConsumed()) .thenReturn((long) Constants.NRG_TRANSACTION_DEFAULT); when(mockTxExecSummary.isRejected()).thenReturn(false); when(mockTxExecSummary.getReceipt()).thenReturn(mockTxReceipt); when(mockTxExecSummary.getTransaction()).thenReturn(mockTransaction); when(mockTxReceipt.getEnergyUsed()) .thenReturn((long) Constants.NRG_TRANSACTION_DEFAULT); when(mockTxReceipt.getReceiptTrieEncoded()).thenReturn(receiptTrieEncoded); when(mockTxReceipt.getBloomFilter()).thenReturn(new Bloom()); when(mockTxReceipt.getTransaction()).thenReturn(mockTransaction); List<AionTxExecSummary> summaryList = new ArrayList<>(); summaryList.add(mockTxExecSummary); List<AionTxReceipt> receiptList = new ArrayList<>(); receiptList.add(mockTxReceipt); when(mockBlock.getReceiptsRoot()).thenReturn(ConstantUtil.EMPTY_TRIE_HASH); Truth.assertThat( isValidBlock( mockBlock, summaryList, receiptList, false, AionLoggerFactory.getLogger(Logger.ROOT_LOGGER_NAME))) .isFalse(); } @Test public void validateBlockHasInvalidLogBloom() { byte[] receiptTrieEncoded = new byte[32]; Arrays.fill(receiptTrieEncoded, (byte) 1); byte[] txBloom = new byte[Bloom.SIZE]; Arrays.fill(txBloom, (byte) 1); // The block has invalid receipt root in the block header when(mockBlock.getNumber()).thenReturn(2L); when(mockBlock.getHeader()).thenReturn(mockBlockHeader); when(mockBlock.getLogBloom()).thenReturn(new byte[Bloom.SIZE]); when(mockBlockHeader.getEnergyConsumed()) .thenReturn((long) Constants.NRG_TRANSACTION_DEFAULT); when(mockTxExecSummary.isRejected()).thenReturn(false); when(mockTxExecSummary.getReceipt()).thenReturn(mockTxReceipt); when(mockTxExecSummary.getTransaction()).thenReturn(mockTransaction); when(mockTxReceipt.getEnergyUsed()) .thenReturn((long) Constants.NRG_TRANSACTION_DEFAULT); when(mockTxReceipt.getReceiptTrieEncoded()).thenReturn(receiptTrieEncoded); when(mockTxReceipt.getBloomFilter()).thenReturn(new Bloom(txBloom)); when(mockTxReceipt.getTransaction()).thenReturn(mockTransaction); List<AionTxExecSummary> summaryList = new ArrayList<>(); summaryList.add(mockTxExecSummary); List<AionTxReceipt> receiptList = new ArrayList<>(); receiptList.add(mockTxReceipt); byte[] calculatedTrieroot = BlockUtil.calcReceiptsTrie(receiptList); when(mockBlock.getReceiptsRoot()).thenReturn(calculatedTrieroot); Truth.assertThat( isValidBlock( mockBlock, summaryList, receiptList, false, AionLoggerFactory.getLogger(Logger.ROOT_LOGGER_NAME))) .isFalse(); } @Test public void isValidStateRootTest() { when(mockBlock.getStateRoot()).thenReturn(ConstantUtil.EMPTY_TRIE_HASH); when(mockBlock.getNumber()).thenReturn(1L); when(mockBlock.getEncoded()).thenReturn(new byte[32]); Truth.assertThat( BlockDetailsValidator.isValidStateRoot( mockBlock, ConstantUtil.EMPTY_TRIE_HASH, AionLoggerFactory.getLogger(Logger.ROOT_LOGGER_NAME))) .isTrue(); byte[] worldTrieRoot = new byte[32]; Arrays.fill(worldTrieRoot, (byte) 1); Truth.assertThat( BlockDetailsValidator.isValidStateRoot( mockBlock, worldTrieRoot, AionLoggerFactory.getLogger(Logger.ROOT_LOGGER_NAME))) .isFalse(); } @Test public void isValidTxTrieRootTest() { List<AionTransaction> txList = new ArrayList<>(); txList.add(mockTransaction); when(mockTransaction.getEncoded()).thenReturn(new byte[32]); byte[] calculatedTxTrieRoot = BlockUtil.calcTxTrieRoot(txList); when(mockBlock.getTxTrieRoot()).thenReturn(calculatedTxTrieRoot); when(mockBlock.getNumber()).thenReturn(1L); Truth.assertThat( BlockDetailsValidator.isValidTxTrieRoot( mockBlock.getTxTrieRoot(), txList, mockBlock.getNumber(), AionLoggerFactory.getLogger(Logger.ROOT_LOGGER_NAME))) .isTrue(); when(mockBlock.getTxTrieRoot()).thenReturn(ConstantUtil.EMPTY_TRIE_HASH); Truth.assertThat( BlockDetailsValidator.isValidTxTrieRoot( mockBlock.getTxTrieRoot(), txList, mockBlock.getNumber(), AionLoggerFactory.getLogger(Logger.ROOT_LOGGER_NAME))) .isFalse(); } }
5,729
1,858
<reponame>leejimbo/soter<filename>soter-client-demo/app/src/main/java/com/tencent/soter/demo/net/RemoteUploadASK.java /* * Tencent is pleased to support the open source community by making TENCENT SOTER available. * Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved. * Licensed under the BSD 3-Clause License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at * https://opensource.org/licenses/BSD-3-Clause * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.tencent.soter.demo.net; import android.support.annotation.NonNull; import android.util.Base64; import com.tencent.soter.demo.model.ConstantsSoterDemo; import com.tencent.soter.demo.model.DemoUtil; import com.tencent.soter.wrapper.wrap_net.ISoterNetCallback; import com.tencent.soter.wrapper.wrap_net.IWrapUploadKeyNet; import org.json.JSONException; import org.json.JSONObject; /** * Created by henryye on 2017/4/27. * */ public class RemoteUploadASK extends RemoteBase implements IWrapUploadKeyNet { private static final String TAG = "SoterDemo.RemoteUploadASK"; private static final String SAMPLE_ASK_JSON_PATH = ConstantsSoterDemo.SAMPLE_EXTERNAL_PATH + "ask_json.txt"; private static final String SAMPLE_ASK_SIGNATURE_PATH = ConstantsSoterDemo.SAMPLE_EXTERNAL_PATH + "ask_signature.bin"; private static final String SAMPLE_ASK_PUBLIC_KEY_PEM_PATH = ConstantsSoterDemo.SAMPLE_EXTERNAL_PATH + "ask_key.pem"; private static final String KEY_REQUEST_KEY_JSON = "keyJson"; private static final String KEY_REQUEST_SIGNATURE = "keySignature"; private static final String KEY_RESULT = "is_verified"; private ISoterNetCallback<UploadResult> mCallback = null; @Override JSONObject getSimulateJsonResult(JSONObject requestJson) { JSONObject resultJson = new JSONObject(); try { resultJson.put(KEY_RESULT, true); } catch (JSONException e) { e.printStackTrace(); } return resultJson; } @Override public void execute() { super.execute(); } @Override void onNetworkEnd(JSONObject resultJson) { if(mCallback != null) { if(resultJson == null) { mCallback.onNetEnd(null); } else { boolean isUploadAndVerified = resultJson.optBoolean(KEY_RESULT, false); mCallback.onNetEnd(new UploadResult(isUploadAndVerified)); } } } @Override public void setRequest(@NonNull UploadRequest requestDataModel) { JSONObject requestJson = new JSONObject(); try { requestJson.put(KEY_REQUEST_KEY_JSON, requestDataModel.mKeyJson); requestJson.put(KEY_REQUEST_SIGNATURE, requestDataModel.mKeyJsonSignature); // save to file as sample. In real projects, you do not have to do it, just as a sample if(ConstantsSoterDemo.IS_DEBUG_SAVE_DATA) { DemoUtil.saveTextToFile(requestDataModel.mKeyJson, SAMPLE_ASK_JSON_PATH); DemoUtil.saveTextToFile(retrievePublicKeyFromJson(requestDataModel.mKeyJson), SAMPLE_ASK_PUBLIC_KEY_PEM_PATH); DemoUtil.saveBinaryToFile(Base64.decode(requestDataModel.mKeyJsonSignature, Base64.DEFAULT), SAMPLE_ASK_SIGNATURE_PATH); } } catch (JSONException e) { e.printStackTrace(); } setRequestJson(requestJson); } private String retrievePublicKeyFromJson(String jsonStr) { try { JSONObject jsonObject = new JSONObject(jsonStr); return jsonObject.getString("pub_key"); } catch (JSONException e) { e.printStackTrace(); return null; } } @Override public void setCallback(ISoterNetCallback<UploadResult> callback) { this.mCallback = callback; } @Override protected String getNetUrl() { return BASE_URL + "/upload_ask_key"; } }
1,665
11,396
from awx.main.tests.functional.conftest import * # noqa def pytest_addoption(parser): parser.addoption("--release", action="store", help="a release version number, e.g., 3.3.0") def pytest_generate_tests(metafunc): # This is called for every test. Only get/set command line arguments # if the argument is specified in the list of test "fixturenames". option_value = metafunc.config.option.release if 'release' in metafunc.fixturenames and option_value is not None: metafunc.parametrize("release", [option_value])
183
478
/* Copyright (c) 2014 Aerys Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include "minko/file/FileProtocol.hpp" #include "minko/file/Options.hpp" #include "minko/Signal.hpp" #include "minko/AbstractCanvas.hpp" #include "minko/async/Worker.hpp" #include <fstream> #include <regex> using namespace minko; using namespace minko::file; FileProtocol::FileProtocol() { } std::list<std::shared_ptr<FileProtocol>> FileProtocol::_runningLoaders; void FileProtocol::load() { auto loader = std::static_pointer_cast<FileProtocol>(shared_from_this()); _runningLoaders.push_back(loader); const auto& resolvedFilename = this->resolvedFilename(); auto options = _options; auto flags = std::ios::in | std::ios::binary; auto cleanFilename = resolvedFilename; auto prefixPosition = resolvedFilename.find("://"); if (prefixPosition != std::string::npos) { cleanFilename = resolvedFilename.substr(prefixPosition + 3); } std::ifstream file(cleanFilename, flags); if (file.is_open()) { if (_options->loadAsynchronously() && AbstractCanvas::defaultCanvas() != nullptr && AbstractCanvas::defaultCanvas()->isWorkerRegistered("file-protocol")) { file.close(); auto worker = AbstractCanvas::defaultCanvas()->getWorker("file-protocol"); _workerSlots.insert(std::make_pair(worker, worker->message()->connect([=](async::Worker::Ptr, async::Worker::Message message) { if (message.type == "complete") { void* bytes = &*message.data.begin(); data().assign(static_cast<unsigned char*>(bytes), static_cast<unsigned char*>(bytes) + message.data.size()); _complete->execute(loader); _runningLoaders.remove(loader); _workerSlots.erase(worker); } else if (message.type == "progress") { float ratio = *reinterpret_cast<float*>(&*message.data.begin()); _progress->execute(loader, ratio); } else if (message.type == "error") { _error->execute(loader); _complete->execute(loader); _runningLoaders.remove(loader); _workerSlots.erase(worker); } }))); auto offset = options->seekingOffset(); auto length = options->seekedLength(); std::vector<char> offsetByteArray(4); offsetByteArray[0] = (offset & 0xff000000) >> 24; offsetByteArray[1] = (offset & 0x00ff0000) >> 16; offsetByteArray[2] = (offset & 0x0000ff00) >> 8; offsetByteArray[3] = (offset & 0x000000ff); std::vector<char> lengthByteArray(4); lengthByteArray[0] = (length & 0xff000000) >> 24; lengthByteArray[1] = (length & 0x00ff0000) >> 16; lengthByteArray[2] = (length & 0x0000ff00) >> 8; lengthByteArray[3] = (length & 0x000000ff); std::vector<char> input; input.insert(input.end(), offsetByteArray.begin(), offsetByteArray.end()); input.insert(input.end(), lengthByteArray.begin(), lengthByteArray.end()); input.insert(input.end(), cleanFilename.begin(), cleanFilename.end()); worker->start(input); } else { file.seekg(0, std::ios::end); auto offset = options->seekingOffset(); const auto length = options->seekedLength() > 0 ? static_cast<std::ifstream::pos_type>(options->seekedLength()) : file.tellg(); // FIXME: use fixed size buffers and call _progress accordingly _progress->execute(shared_from_this(), 0.0); data().resize(length); file.seekg(offset, std::ios::beg); file.read((char*)&data()[0], length); file.close(); _progress->execute(loader, 1.0); _complete->execute(shared_from_this()); _runningLoaders.remove(loader); } } else { _error->execute(shared_from_this()); } } bool FileProtocol::fileExists(const std::string& filename) { std::ifstream file(filename, std::ios::in | std::ios::binary); return file.is_open(); } bool FileProtocol::isAbsolutePath(const std::string& filename) const { const auto cleanFilename = File::sanitizeFilename(filename); #if MINKO_PLATFORM == MINKO_PLATFORM_WINDOWS return cleanFilename.find(":/") != std::string::npos; #else return cleanFilename.find_first_of("/") == 0u; #endif }
2,359
5,865
<gh_stars>1000+ /* * Copyright 2021 ThoughtWorks, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.thoughtworks.go.api.cctray; import com.thoughtworks.go.api.ControllerMethods; import com.thoughtworks.go.server.service.CcTrayService; import com.thoughtworks.go.server.service.SecurityService; import com.thoughtworks.go.spark.RequestContext; import com.thoughtworks.go.spark.Routes; import com.thoughtworks.go.spark.SparkController; import com.thoughtworks.go.spark.spring.SparkSpringController; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.http.HttpStatus; import org.springframework.stereotype.Component; import spark.HaltException; import spark.Request; import spark.Response; import java.io.IOException; import java.io.OutputStreamWriter; import static spark.Spark.*; @Component public class CctrayController implements SparkSpringController, SparkController { private static final String ACCESS_DENIED_XML_RESPONSE = "<access-denied>\n" + " <message>You are not authenticated!</message>\n" + "</access-denied>"; private final SecurityService securityService; private final CcTrayService ccTrayService; @Autowired public CctrayController(SecurityService securityService, CcTrayService ccTrayService) { this.securityService = securityService; this.ccTrayService = ccTrayService; } @Override public String controllerBasePath() { return Routes.CCTray.BASE; } @Override public void setupRoutes() { before(controllerBasePath(), this::setContentType); before(controllerBasePath(), this::checkUserAnd403); get(controllerBasePath(), this::index); } public String index(Request req, Response res) throws IOException { OutputStreamWriter appendable = new OutputStreamWriter(res.raw().getOutputStream()); ccTrayService.renderCCTrayXML(siteUrlPrefix(req), currentUsername().getUsername().toString(), appendable, etag -> setEtagHeader(res, etag)); appendable.flush(); // because we've streamed the ccontent already. return ControllerMethods.NOTHING; } private void setEtagHeader(Response res, String value) { if (value == null) { return; } res.header("ETag", '"' + value + '"'); } private String siteUrlPrefix(Request req) { return RequestContext.requestContext(req).urlFor(""); } private void setContentType(Request request, Response response) { response.raw().setCharacterEncoding("utf-8"); response.type(getMimeType()); } protected String getMimeType() { return "application/xml"; } private void checkUserAnd403(Request request, Response response) { if (!securityService.isSecurityEnabled()) { return; } checkNonAnonymousUser(request, response); } public void checkNonAnonymousUser(Request req, Response res) { if (currentUsername().isAnonymous()) { throw renderForbiddenResponse(); } } private HaltException renderForbiddenResponse() { return halt(HttpStatus.FORBIDDEN.value(), ACCESS_DENIED_XML_RESPONSE); } }
1,294
1,899
<reponame>saulocbarreto/bgslibrary #pragma once #include <iostream> #include <opencv2/opencv.hpp> // opencv legacy includes #include <opencv2/core/core_c.h> #include <opencv2/imgproc/imgproc_c.h> namespace bgslibrary { namespace algorithms { namespace vumeter { class TBackground { public: TBackground(); virtual ~TBackground(); virtual void Clear(); virtual void Reset(); virtual int UpdateBackground(IplImage * pSource, IplImage *pBackground, IplImage *pMotionMask); virtual int UpdateTest(IplImage *pSource, IplImage *pBackground, IplImage *pTest, int nX, int nY, int nInd); virtual IplImage *CreateTestImg(); virtual int GetParameterCount(); virtual std::string GetParameterName(int nInd); virtual std::string GetParameterValue(int nInd); virtual int SetParameterValue(int nInd, std::string csNew); protected: virtual int Init(IplImage * pSource); virtual bool isInitOk(IplImage * pSource, IplImage *pBackground, IplImage *pMotionMask); }; } } }
444
614
<gh_stars>100-1000 [ { "home": "United States", "away": "Ecuador", "homescore": "0", "awayscore": "0" }, { "home": "Orlando City SC", "away": "Philadelphia Union", "homescore": "2", "awayscore": "2" }, { "home": "Sydney FC", "away": "<NAME>", "homescore": "2", "awayscore": "2" }, { "home": "Pumas UNAM", "away": "Independiente del Valle", "homescore": "(3) 2", "awayscore": "(5) 1" }, { "home": "Serbia", "away": "Cyprus", "homescore": "2", "awayscore": "1" }, { "home": "Romania", "away": "Congo DR", "homescore": "1", "awayscore": "1" }, { "home": "Serbia", "away": "Cyprus", "homescore": "2", "awayscore": "1" }, { "home": "Romania", "away": "Congo DR", "homescore": "1", "awayscore": "1" }, { "home": "United States", "away": "Ecuador", "homescore": "0", "awayscore": "0" }, { "home": "<NAME>", "away": "Osasuna", "homescore": "1", "awayscore": "0" }, { "home": "Llagostera", "away": "Almeria", "homescore": "0", "awayscore": "0" }, { "home": "Mirandes", "away": "Cordoba", "homescore": "0", "awayscore": "3" }, { "home": "Numancia", "away": "Lugo", "homescore": "1", "awayscore": "0" }, { "home": "Ponferradina", "away": "Albacete", "homescore": "2", "awayscore": "1" }, { "home": "Bari", "away": "Novara", "homescore": "3", "awayscore": "4" }, { "home": "<NAME>", "away": "St Gallen", "homescore": "3", "awayscore": "0" }, { "home": "FC Basel", "away": "Grasshoppers", "homescore": "0", "awayscore": "1" }, { "home": "FC Thun", "away": "Young Boys", "homescore": "0", "awayscore": "3" }, { "home": "FC Zürich", "away": "FC Vaduz", "homescore": "3", "awayscore": "1" }, { "home": "Lucerne", "away": "FC Sion", "homescore": "2", "awayscore": "2" }, { "home": "Orlando City SC", "away": "Philadelphia Union", "homescore": "2", "awayscore": "2" }, { "home": "Tigre", "away": "<NAME>", "homescore": "(2) 0", "awayscore": "(4) 0" }, { "home": "Racing Club", "away": "Gimnasia y Tiro (Orán)" }, { "home": "San Telmo", "away": "UAI Urquiza", "homescore": "1", "awayscore": "0" }, { "home": "Comunicaciones (Mercedes)", "away": "San Carlos", "homescore": "1", "awayscore": "0" }, { "home": "Botafogo", "away": "Atletico Paranaense", "homescore": "2", "awayscore": "1" }, { "home": "Figueirense FC", "away": "Santos FC", "homescore": "2", "awayscore": "2" }, { "home": "América Mineiro", "away": "EC Vitória", "homescore": "1", "awayscore": "1" }, { "home": "Flamengo", "away": "Chapecoense AF", "homescore": "2", "awayscore": "2" }, { "home": "Coritiba FBC", "away": "São Paulo", "homescore": "0", "awayscore": "0" }, { "home": "Palmeiras", "away": "Fluminense FC", "homescore": "0", "awayscore": "0" }, { "home": "Santa Cruz FC", "away": "Cruzeiro", "homescore": "1", "awayscore": "1" }, { "home": "América RN", "away": "SE Gama", "homescore": "3", "awayscore": "2" }, { "home": "Bahia", "away": "Joinville", "homescore": "1", "awayscore": "0" }, { "home": "Atlético Nacional", "away": "La Equidad", "homescore": "2", "awayscore": "0" }, { "home": "Tigres FC", "away": "Llaneros", "homescore": "0", "awayscore": "0" }, { "home": "Deportivo Anzoategui", "away": "Zamora", "homescore": "1", "awayscore": "1" }, { "home": "Sydney FC", "away": "Shandong Luneng Taishan", "homescore": "2", "awayscore": "2" }, { "home": "FC Seoul", "away": "Urawa Red Diamonds", "homescore": "3", "awayscore": "2" }, { "home": "El Jaish", "away": "Lekhwiya", "homescore": "2", "awayscore": "4" }, { "home": "Zob-Ahan", "away": "Al-Ain", "homescore": "0", "awayscore": "2" }, { "home": "Blooming", "away": "Real Potosí", "homescore": "2", "awayscore": "3" }, { "home": "Ciclon <NAME>", "away": "The Strongest", "homescore": "2", "awayscore": "0" }, { "home": "Club Petrolero de Yacuiba", "away": "San José", "homescore": "2", "awayscore": "0" }, { "home": "<NAME>", "away": "<NAME>", "homescore": "3", "awayscore": "1" }, { "home": "Universitario de Sucre", "away": "Wilstermann", "homescore": "4", "awayscore": "2" }, { "home": "Bolívar", "away": "Sport Boys Warnes" }, { "home": "Enyimba", "away": "Warri Wolves" }, { "home": "Wikki Tourists", "away": "Enyimba" } ]
1,751
1,968
# -*- coding: utf-8 -*- class RumpsError(Exception): """A generic rumps error occurred.""" class InternalRumpsError(RumpsError): """Internal mechanism powering functionality of rumps failed."""
63
479
// Copyright (C) 2013 The Android Open Source Project // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package com.google.gerrit.client.diff; import com.google.gerrit.client.diff.DiffInfo.Region; import com.google.gerrit.client.diff.DiffInfo.Span; import com.google.gerrit.client.rpc.Natives; import com.google.gwt.core.client.JavaScriptObject; import com.google.gwt.core.client.JsArray; import com.google.gwt.core.client.JsArrayString; import com.google.gwt.dom.client.Element; import com.google.gwt.dom.client.NativeEvent; import com.google.gwt.user.client.DOM; import com.google.gwt.user.client.EventListener; import java.util.ArrayList; import java.util.Collections; import java.util.Comparator; import java.util.List; import net.codemirror.lib.CodeMirror; import net.codemirror.lib.CodeMirror.LineClassWhere; import net.codemirror.lib.Configuration; import net.codemirror.lib.Pos; /** Colors modified regions for {@link Unified}. */ class UnifiedChunkManager extends ChunkManager { private static final JavaScriptObject focus = initOnClick(); private static native JavaScriptObject initOnClick() /*-{ return $entry(function(e){ @com.google.gerrit.client.diff.UnifiedChunkManager::focus( Lcom/google/gwt/dom/client/NativeEvent;)(e) }); }-*/; private List<UnifiedDiffChunkInfo> chunks; @Override DiffChunkInfo getFirst() { return !chunks.isEmpty() ? chunks.get(0) : null; } private static void focus(NativeEvent event) { Element e = Element.as(event.getEventTarget()); for (e = DOM.getParent(e); e != null; e = DOM.getParent(e)) { EventListener l = DOM.getEventListener(e); if (l instanceof Unified) { ((Unified) l).getCmFromSide(DisplaySide.A).focus(); event.stopPropagation(); } } } static void focusOnClick(Element e) { onClick(e, focus); } private final Unified host; private final CodeMirror cm; UnifiedChunkManager(Unified host, CodeMirror cm, Scrollbar scrollbar) { super(scrollbar); this.host = host; this.cm = cm; } @Override void render(DiffInfo diff) { super.render(); chunks = new ArrayList<>(); int cmLine = 0; boolean useIntralineBg = diff.metaA() == null || diff.metaB() == null; for (Region current : Natives.asList(diff.content())) { int origLineA = lineMapper.getLineA(); int origLineB = lineMapper.getLineB(); if (current.ab() != null) { int length = current.ab().length(); lineMapper.appendCommon(length); for (int i = 0; i < length; i++) { host.setLineNumber(DisplaySide.A, cmLine + i, origLineA + i + 1); host.setLineNumber(DisplaySide.B, cmLine + i, origLineB + i + 1); } cmLine += length; } else if (current.skip() > 0) { lineMapper.appendCommon(current.skip()); cmLine += current.skip(); // Maybe current.ab().length(); } else if (current.common()) { lineMapper.appendCommon(current.b().length()); cmLine += current.b().length(); } else { cmLine += render(current, cmLine, useIntralineBg); } } host.setLineNumber(DisplaySide.A, cmLine, lineMapper.getLineA() + 1); host.setLineNumber(DisplaySide.B, cmLine, lineMapper.getLineB() + 1); } private int render(Region region, int cmLine, boolean useIntralineBg) { int startA = lineMapper.getLineA(); int startB = lineMapper.getLineB(); JsArrayString a = region.a(); JsArrayString b = region.b(); int aLen = a != null ? a.length() : 0; int bLen = b != null ? b.length() : 0; boolean insertOrDelete = a == null || b == null; colorLines( cm, insertOrDelete && !useIntralineBg ? UnifiedTable.style.diffDelete() : UnifiedTable.style.intralineDelete(), cmLine, aLen); colorLines( cm, insertOrDelete && !useIntralineBg ? UnifiedTable.style.diffInsert() : UnifiedTable.style.intralineInsert(), cmLine + aLen, bLen); markEdit(DisplaySide.A, cmLine, a, region.editA()); markEdit(DisplaySide.B, cmLine + aLen, b, region.editB()); addGutterTag(region, cmLine); // TODO: verify addGutterTag lineMapper.appendReplace(aLen, bLen); int endA = lineMapper.getLineA() - 1; int endB = lineMapper.getLineB() - 1; if (aLen > 0) { addDiffChunk(DisplaySide.A, endA, aLen, cmLine, bLen > 0); for (int j = 0; j < aLen; j++) { host.setLineNumber(DisplaySide.A, cmLine + j, startA + j + 1); host.setLineNumberEmpty(DisplaySide.B, cmLine + j); } } if (bLen > 0) { addDiffChunk(DisplaySide.B, endB, bLen, cmLine + aLen, aLen > 0); for (int j = 0; j < bLen; j++) { host.setLineNumberEmpty(DisplaySide.A, cmLine + aLen + j); host.setLineNumber(DisplaySide.B, cmLine + aLen + j, startB + j + 1); } } return aLen + bLen; } private void addGutterTag(Region region, int cmLine) { if (region.a() == null) { scrollbar.insert(cm, cmLine, region.b().length()); } else if (region.b() == null) { scrollbar.delete(cm, cm, cmLine, region.a().length()); } else { scrollbar.edit(cm, cmLine, region.b().length()); } } private void markEdit(DisplaySide side, int startLine, JsArrayString lines, JsArray<Span> edits) { if (lines == null || edits == null) { return; } EditIterator iter = new EditIterator(lines, startLine); Configuration bg = Configuration.create().set("className", getIntralineBgFromSide(side)).set("readOnly", true); Configuration diff = Configuration.create().set("className", getDiffColorFromSide(side)).set("readOnly", true); Pos last = Pos.create(0, 0); for (Span span : Natives.asList(edits)) { Pos from = iter.advance(span.skip()); Pos to = iter.advance(span.mark()); if (from.line() == last.line()) { getMarkers().add(cm.markText(last, from, bg)); } else { getMarkers().add(cm.markText(Pos.create(from.line(), 0), from, bg)); } getMarkers().add(cm.markText(from, to, diff)); last = to; colorLines(cm, LineClassWhere.BACKGROUND, getDiffColorFromSide(side), from.line(), to.line()); } } private String getIntralineBgFromSide(DisplaySide side) { return side == DisplaySide.A ? UnifiedTable.style.intralineDelete() : UnifiedTable.style.intralineInsert(); } private String getDiffColorFromSide(DisplaySide side) { return side == DisplaySide.A ? UnifiedTable.style.diffDelete() : UnifiedTable.style.diffInsert(); } private void addDiffChunk( DisplaySide side, int chunkEnd, int chunkSize, int cmLine, boolean edit) { chunks.add(new UnifiedDiffChunkInfo(side, chunkEnd - chunkSize + 1, chunkEnd, cmLine, edit)); } @Override Runnable diffChunkNav(CodeMirror cm, Direction dir) { return () -> { int line = cm.extras().hasActiveLine() ? cm.getLineNumber(cm.extras().activeLine()) : 0; int res = Collections.binarySearch( chunks, new UnifiedDiffChunkInfo(cm.side(), 0, 0, line, false), getDiffChunkComparatorCmLine()); diffChunkNavHelper(chunks, host, res, dir); }; } /** Diff chunks are ordered by their starting lines in CodeMirror */ private Comparator<UnifiedDiffChunkInfo> getDiffChunkComparatorCmLine() { return new Comparator<UnifiedDiffChunkInfo>() { @Override public int compare(UnifiedDiffChunkInfo o1, UnifiedDiffChunkInfo o2) { return o1.getCmLine() - o2.getCmLine(); } }; } @Override int getCmLine(int line, DisplaySide side) { int res = Collections.binarySearch( chunks, new UnifiedDiffChunkInfo(side, line, 0, 0, false), // Dummy DiffChunkInfo getDiffChunkComparator()); if (res >= 0) { return chunks.get(res).getCmLine(); } // The line might be within a DiffChunk res = -res - 1; if (res > 0) { UnifiedDiffChunkInfo info = chunks.get(res - 1); if (side == DisplaySide.A && info.isEdit() && info.getSide() == DisplaySide.B) { // Need to use the start and cmLine of the deletion chunk UnifiedDiffChunkInfo delete = chunks.get(res - 2); if (line <= delete.getEnd()) { return delete.getCmLine() + line - delete.getStart(); } // Need to add the length of the insertion chunk return delete.getCmLine() + line - delete.getStart() + info.getEnd() - info.getStart() + 1; } else if (side == info.getSide()) { return info.getCmLine() + line - info.getStart(); } else { return info.getCmLine() + lineMapper.lineOnOther(side, line).getLine() - info.getStart(); } } return line; } LineRegionInfo getLineRegionInfoFromCmLine(int cmLine) { int res = Collections.binarySearch( chunks, new UnifiedDiffChunkInfo(DisplaySide.A, 0, 0, cmLine, false), // Dummy DiffChunkInfo getDiffChunkComparatorCmLine()); if (res >= 0) { // The line is right at the start of a diff chunk. UnifiedDiffChunkInfo info = chunks.get(res); return new LineRegionInfo(info.getStart(), displaySideToRegionType(info.getSide())); } // The line might be within or after a diff chunk. res = -res - 1; if (res > 0) { UnifiedDiffChunkInfo info = chunks.get(res - 1); int lineOnInfoSide = info.getStart() + cmLine - info.getCmLine(); if (lineOnInfoSide > info.getEnd()) { // After a diff chunk if (info.getSide() == DisplaySide.A) { // For the common region after a deletion chunk, associate the line // on side B with a common region. return new LineRegionInfo( lineMapper.lineOnOther(DisplaySide.A, lineOnInfoSide).getLine(), RegionType.COMMON); } return new LineRegionInfo(lineOnInfoSide, RegionType.COMMON); } // Within a diff chunk return new LineRegionInfo(lineOnInfoSide, displaySideToRegionType(info.getSide())); } // The line is before any diff chunk, so it always equals cmLine and // belongs to a common region. return new LineRegionInfo(cmLine, RegionType.COMMON); } enum RegionType { INSERT, DELETE, COMMON, } private static RegionType displaySideToRegionType(DisplaySide side) { return side == DisplaySide.A ? RegionType.DELETE : RegionType.INSERT; } /** * Helper class to associate a line in the original file with the type of the region it belongs * to. * * @field line The 0-based line number in the original file. Note that this might be different * from the line number shown in CodeMirror. * @field type The type of the region the line belongs to. Can be INSERT, DELETE or COMMON. */ static class LineRegionInfo { final int line; final RegionType type; LineRegionInfo(int line, RegionType type) { this.line = line; this.type = type; } DisplaySide getSide() { // Always return DisplaySide.B for INSERT or COMMON return type == RegionType.DELETE ? DisplaySide.A : DisplaySide.B; } } }
4,562
3,062
package com.mapswithme.maps.bookmarks; interface CategoryListCallback { void onAddButtonClick(); void onImportButtonClick(); }
39
350
import tensorflow as tf from tonic.tensorflow import updaters class VRegression: def __init__(self, loss=None, optimizer=None, gradient_clip=0): self.loss = loss or tf.keras.losses.MeanSquaredError() self.optimizer = optimizer or \ tf.keras.optimizers.Adam(lr=1e-3, epsilon=1e-8) self.gradient_clip = gradient_clip def initialize(self, model): self.model = model self.variables = self.model.critic.trainable_variables @tf.function def __call__(self, observations, returns): with tf.GradientTape() as tape: values = self.model.critic(observations) loss = self.loss(returns, values) gradients = tape.gradient(loss, self.variables) if self.gradient_clip > 0: gradients = tf.clip_by_global_norm( gradients, self.gradient_clip)[0] self.optimizer.apply_gradients(zip(gradients, self.variables)) return dict(loss=loss, v=values) class QRegression: def __init__(self, loss=None, optimizer=None, gradient_clip=0): self.loss = loss or tf.keras.losses.MeanSquaredError() self.optimizer = optimizer or \ tf.keras.optimizers.Adam(lr=1e-3, epsilon=1e-8) self.gradient_clip = gradient_clip def initialize(self, model): self.model = model self.variables = self.model.critic.trainable_variables @tf.function def __call__(self, observations, actions, returns): with tf.GradientTape() as tape: values = self.model.critic(observations, actions) loss = self.loss(returns, values) gradients = tape.gradient(loss, self.variables) if self.gradient_clip > 0: gradients = tf.clip_by_global_norm( gradients, self.gradient_clip)[0] self.optimizer.apply_gradients(zip(gradients, self.variables)) return dict(loss=loss, q=values) class DeterministicQLearning: def __init__(self, loss=None, optimizer=None, gradient_clip=0): self.loss = loss or tf.keras.losses.MeanSquaredError() self.optimizer = optimizer or \ tf.keras.optimizers.Adam(lr=1e-3, epsilon=1e-8) self.gradient_clip = gradient_clip def initialize(self, model): self.model = model self.variables = self.model.critic.trainable_variables @tf.function def __call__( self, observations, actions, next_observations, rewards, discounts ): next_actions = self.model.target_actor(next_observations) next_values = self.model.target_critic(next_observations, next_actions) returns = rewards + discounts * next_values with tf.GradientTape() as tape: values = self.model.critic(observations, actions) loss = self.loss(returns, values) gradients = tape.gradient(loss, self.variables) if self.gradient_clip > 0: gradients = tf.clip_by_global_norm( gradients, self.gradient_clip)[0] self.optimizer.apply_gradients(zip(gradients, self.variables)) return dict(loss=loss, q=values) class DistributionalDeterministicQLearning: def __init__(self, optimizer=None, gradient_clip=0): self.optimizer = optimizer or \ tf.keras.optimizers.Adam(lr=1e-3, epsilon=1e-8) self.gradient_clip = gradient_clip def initialize(self, model): self.model = model self.variables = self.model.critic.trainable_variables @tf.function def __call__( self, observations, actions, next_observations, rewards, discounts ): next_actions = self.model.target_actor(next_observations) next_value_distributions = self.model.target_critic( next_observations, next_actions) values = next_value_distributions.values returns = rewards[:, None] + discounts[:, None] * values targets = next_value_distributions.project(returns) with tf.GradientTape() as tape: value_distributions = self.model.critic(observations, actions) losses = tf.nn.softmax_cross_entropy_with_logits( logits=value_distributions.logits, labels=targets) loss = tf.reduce_mean(losses) gradients = tape.gradient(loss, self.variables) if self.gradient_clip > 0: gradients = tf.clip_by_global_norm( gradients, self.gradient_clip)[0] self.optimizer.apply_gradients(zip(gradients, self.variables)) return dict(loss=loss) class TargetActionNoise: def __init__(self, scale=0.2, clip=0.5): self.scale = scale self.clip = clip def __call__(self, actions): noises = self.scale * tf.random.normal(actions.shape) noises = tf.clip_by_value(noises, -self.clip, self.clip) actions = actions + noises return tf.clip_by_value(actions, -1, 1) class TwinCriticDeterministicQLearning: def __init__( self, loss=None, optimizer=None, target_action_noise=None, gradient_clip=0 ): self.loss = loss or tf.keras.losses.MeanSquaredError() self.optimizer = optimizer or \ tf.keras.optimizers.Adam(lr=1e-3, epsilon=1e-8) self.target_action_noise = target_action_noise or \ TargetActionNoise(scale=0.2, clip=0.5) self.gradient_clip = gradient_clip def initialize(self, model): self.model = model variables_1 = self.model.critic_1.trainable_variables variables_2 = self.model.critic_2.trainable_variables self.variables = variables_1 + variables_2 @tf.function def __call__( self, observations, actions, next_observations, rewards, discounts ): next_actions = self.model.target_actor(next_observations) next_actions = self.target_action_noise(next_actions) next_values_1 = self.model.target_critic_1( next_observations, next_actions) next_values_2 = self.model.target_critic_2( next_observations, next_actions) next_values = tf.minimum(next_values_1, next_values_2) returns = rewards + discounts * next_values with tf.GradientTape() as tape: values_1 = self.model.critic_1(observations, actions) values_2 = self.model.critic_2(observations, actions) loss_1 = self.loss(returns, values_1) loss_2 = self.loss(returns, values_2) loss = loss_1 + loss_2 gradients = tape.gradient(loss, self.variables) if self.gradient_clip > 0: gradients = tf.clip_by_global_norm( gradients, self.gradient_clip)[0] self.optimizer.apply_gradients(zip(gradients, self.variables)) return dict(loss=loss, q1=values_1, q2=values_2) class TwinCriticSoftQLearning: def __init__( self, loss=None, optimizer=None, entropy_coeff=0.2, gradient_clip=0 ): self.loss = loss or tf.keras.losses.MeanSquaredError() self.optimizer = optimizer or \ tf.keras.optimizers.Adam(lr=1e-3, epsilon=1e-8) self.entropy_coeff = entropy_coeff self.gradient_clip = gradient_clip def initialize(self, model): self.model = model variables_1 = self.model.critic_1.trainable_variables variables_2 = self.model.critic_2.trainable_variables self.variables = variables_1 + variables_2 @tf.function def __call__( self, observations, actions, next_observations, rewards, discounts ): next_distributions = self.model.actor(next_observations) if hasattr(next_distributions, 'sample_with_log_prob'): outs = next_distributions.sample_with_log_prob() next_actions, next_log_probs = outs else: next_actions = next_distributions.sample() next_log_probs = next_distributions.log_prob(next_actions) next_values_1 = self.model.target_critic_1( next_observations, next_actions) next_values_2 = self.model.target_critic_2( next_observations, next_actions) next_values = tf.minimum(next_values_1, next_values_2) returns = rewards + discounts * ( next_values - self.entropy_coeff * next_log_probs) with tf.GradientTape() as tape: values_1 = self.model.critic_1(observations, actions) values_2 = self.model.critic_2(observations, actions) loss_1 = self.loss(returns, values_1) loss_2 = self.loss(returns, values_2) loss = loss_1 + loss_2 gradients = tape.gradient(loss, self.variables) if self.gradient_clip > 0: gradients = tf.clip_by_global_norm( gradients, self.gradient_clip)[0] self.optimizer.apply_gradients(zip(gradients, self.variables)) return dict(loss=loss, q1=values_1, q2=values_2) class ExpectedSARSA: def __init__( self, num_samples=20, loss=None, optimizer=None, gradient_clip=0 ): self.num_samples = num_samples self.loss = loss or tf.keras.losses.MeanSquaredError() self.optimizer = optimizer or \ tf.keras.optimizers.Adam(lr=1e-3, epsilon=1e-8) self.gradient_clip = gradient_clip def initialize(self, model): self.model = model self.variables = self.model.critic.trainable_variables @tf.function def __call__( self, observations, actions, next_observations, rewards, discounts ): # Approximate the expected next values. next_target_distributions = self.model.target_actor(next_observations) next_actions = next_target_distributions.sample(self.num_samples) next_actions = updaters.merge_first_two_dims(next_actions) next_observations = updaters.tile(next_observations, self.num_samples) next_observations = updaters.merge_first_two_dims(next_observations) next_values = self.model.target_critic(next_observations, next_actions) next_values = tf.reshape(next_values, (self.num_samples, -1)) next_values = tf.reduce_mean(next_values, axis=0) returns = rewards + discounts * next_values with tf.GradientTape() as tape: values = self.model.critic(observations, actions) loss = self.loss(returns, values) gradients = tape.gradient(loss, self.variables) if self.gradient_clip > 0: gradients = tf.clip_by_global_norm( gradients, self.gradient_clip)[0] self.optimizer.apply_gradients(zip(gradients, self.variables)) return dict(loss=loss, q=values)
4,700
3,428
{"id":"02266","group":"easy-ham-1","checksum":{"type":"MD5","value":"dc8353bea2a93134f0e947ec60106c32"},"text":"From <EMAIL> Fri Oct 4 11:02:21 2002\nReturn-Path: <<EMAIL>>\nDelivered-To: yyyy<EMAIL>assassin.taint.org\nReceived: from localhost (jalapeno [127.0.0.1])\n\tby jmason.org (Postfix) with ESMTP id 8A40816F1F\n\tfor <jm@localhost>; Fri, 4 Oct 2002 11:01:59 +0100 (IST)\nReceived: from jalapeno [127.0.0.1]\n\tby localhost with IMAP (fetchmail-5.9.0)\n\tfor jm@localhost (single-drop); Fri, 04 Oct 2002 11:01:59 +0100 (IST)\nReceived: from dogma.slashnull.org (localhost [127.0.0.1]) by\n dogma.slashnull.org (8.11.6/8.11.6) with ESMTP id g9480pK08868 for\n <<EMAIL>>; Fri, 4 Oct 2002 09:00:52 +0100\nMessage-Id: <<EMAIL>>\nTo: yyyy<EMAIL>int.org\nFrom: newscientist <<EMAIL>>\nSubject: Blood pressure drugs \"slow ageing\"\nDate: Fri, 04 Oct 2002 08:00:51 -0000\nContent-Type: text/plain; encoding=utf-8\n\nURL: http://www.newsisfree.com/click/-4,8512678,1440/\nDate: Not supplied\n\nDrugs designed to prevent the complications of diabetes may work by slowing \naccelerated ageing - future versions may delay symptoms of ageing in everyone\n\n\n"}
467
2,542
<reponame>gridgentoo/ServiceFabricAzure<filename>src/prod/src/data/tstore/VolatileStoreCopyStream.h<gh_stars>1000+ // ------------------------------------------------------------ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License (MIT). See License.txt in the repo root for license information. // ------------------------------------------------------------ #pragma once #define VOLATILE_STORE_COPY_STREAM_TAG 'scSV' namespace Data { namespace TStore { class VolatileCopyStage { public: enum Enum : byte { // Indicates the copy protocol version will be sent Version = 0, // Indicates that metadata will be sent Metadata = 1, // Indicates that data will be sent Data = 2, // Indicates the copy "completed" marker will be sent Complete = 3, // Indicates no more copy data needs to be sent None = 4 }; }; template<typename TKey, typename TValue> class VolatileStoreCopyStream : public TxnReplicator::OperationDataStream { K_FORCE_SHARED(VolatileStoreCopyStream) public: // TODO: make configurable static const ULONG32 CopyChunkSize = 512 * 1024; // Exposed for testing, normally 500 KB static const ULONG32 MetadataSizeBytes = 18; static NTSTATUS Create( __in Data::TStore::ConsolidationManager<TKey, TValue> & consolidationManager, __in IComparer<TKey> & keyComparer, __in Data::StateManager::IStateSerializer<TKey> & keySerializer, __in Data::StateManager::IStateSerializer<TValue> & valueSerializer, __in StoreTraceComponent & traceComponent, __in KAllocator & allocator, __out SPtr & result) { NTSTATUS status; SPtr output = _new(VOLATILE_STORE_COPY_STREAM_TAG, allocator) VolatileStoreCopyStream( consolidationManager, keyComparer, keySerializer, valueSerializer, traceComponent); if (!output) { return STATUS_INSUFFICIENT_RESOURCES; } status = output->Status(); if (!NT_SUCCESS(status)) { return status; } result = Ktl::Move(output); return status; } ktl::Awaitable<NTSTATUS> GetNextAsync( __in ktl::CancellationToken const &, __out OperationData::CSPtr & result) noexcept override { result = nullptr; NTSTATUS status = STATUS_SUCCESS; try { switch (copyStage_) { case VolatileCopyStage::Version: { result = OnCopyStageVersion(); break; } case VolatileCopyStage::Metadata: { result = OnCopyStageMetadata(); break; } case VolatileCopyStage::Data: { result = OnCopyStageData(); break; } case VolatileCopyStage::Complete: { result = OnCopyStageComplete(); break; } } } catch (ktl::Exception const & e) { status = e.GetStatus(); } co_return status; } void Dispose() override { } private: OperationData::CSPtr OnCopyStageVersion() { // Next copy stage copyStage_ = VolatileCopyStage::Metadata; BinaryWriter writer(this->GetThisAllocator()); ULONG32 CopyProtocolVersion = 2; // TODO: make constant byte CopyOperationVersion = VolatileStoreCopyOperation::Version; writer.Write(CopyProtocolVersion); writer.Write(CopyOperationVersion); OperationData::SPtr resultSPtr = OperationData::Create(this->GetThisAllocator()); resultSPtr->Append(*writer.GetBuffer(0)); StoreEventSource::Events->VolatileStoreCopyStreamStageVersion( traceComponent_->PartitionId, traceComponent_->TraceTag, CopyProtocolVersion, writer.Position); OperationData::CSPtr resultCSPtr = resultSPtr.RawPtr(); return resultCSPtr; } OperationData::CSPtr OnCopyStageMetadata() { // TODO: Trace // Next copy stage bool hasNext = consolidatedStateEnumeratorSPtr_->MoveNext(); if (hasNext) { copyStage_ = VolatileCopyStage::Data; } else { copyStage_ = VolatileCopyStage::Complete; } BinaryWriter writer(this->GetThisAllocator()); byte CopyOperationMetadata = VolatileStoreCopyOperation::Metadata; writer.Write(MetadataSizeBytes); writer.Write(CopyOperationMetadata); // Workaround for Linux compiler ULONG32 metadataSize = MetadataSizeBytes; StoreEventSource::Events->VolatileStoreCopyStreamStageMetadata( traceComponent_->PartitionId, traceComponent_->TraceTag, metadataSize, writer.Position); OperationData::SPtr resultSPtr = OperationData::Create(this->GetThisAllocator()); resultSPtr->Append(*writer.GetBuffer(0)); OperationData::CSPtr resultCSPtr = resultSPtr.RawPtr(); return resultCSPtr; } OperationData::CSPtr OnCopyStageData() { LONG64 chunkCount = 0; BinaryWriter keyMemoryBuffer(this->GetThisAllocator()); BinaryWriter valueMemoryBuffer(this->GetThisAllocator()); bool hasNext = true; ULONG totalSize = keyMemoryBuffer.Position + valueMemoryBuffer.Position; while (hasNext && totalSize < CopyChunkSize) { auto item = consolidatedStateEnumeratorSPtr_->Current(); if (item.Value->GetRecordKind() != RecordKind::DeletedVersion) { this->WriteValue(valueMemoryBuffer, *item.Value); this->WriteKey(keyMemoryBuffer, item); chunkCount++; count_++; } totalSize = keyMemoryBuffer.Position + valueMemoryBuffer.Position; // Advance to the next unique key (i.e. skip duplicate keys with smaller LSNs) while (true) { hasNext = consolidatedStateEnumeratorSPtr_->MoveNext(); if (hasNext == false) { break; } TKey nextKey = consolidatedStateEnumeratorSPtr_->Current().Key; if (keyComparerSPtr_->Compare(item.Key, nextKey) != 0) { break; } } } if (hasNext == false) { copyStage_ = VolatileCopyStage::Complete; } keyMemoryBuffer.Write(static_cast<byte>(VolatileStoreCopyOperation::Data)); OperationData::SPtr resultSPtr = OperationData::Create(this->GetThisAllocator()); resultSPtr->Append(*keyMemoryBuffer.GetBuffer(0)); resultSPtr->Append(*valueMemoryBuffer.GetBuffer(0)); StoreEventSource::Events->VolatileStoreCopyStreamStageData( traceComponent_->PartitionId, traceComponent_->TraceTag, chunkCount, count_, keyMemoryBuffer.Position, valueMemoryBuffer.Position); OperationData::CSPtr resultCSPtr = resultSPtr.RawPtr(); return resultCSPtr; } OperationData::CSPtr OnCopyStageComplete() { // TODO: Trace copyStage_ = VolatileCopyStage::None; KBuffer::SPtr operationDataBufferSPtr; auto status = KBuffer::Create(sizeof(byte), operationDataBufferSPtr, GetThisAllocator()); Diagnostics::Validate(status); byte* data = static_cast<byte *>(operationDataBufferSPtr->GetBuffer()); *data = VolatileStoreCopyOperation::Complete; OperationData::SPtr resultSPtr = OperationData::Create(GetThisAllocator()); resultSPtr->Append(*operationDataBufferSPtr); StoreEventSource::Events->VolatileStoreCopyStreamStageComplete( traceComponent_->PartitionId, traceComponent_->TraceTag, count_); OperationData::CSPtr resultCSPtr = resultSPtr.RawPtr(); return resultCSPtr; } // // Name Type Size // // KeySize ULONG32 4 // Kind byte 1 // VersionSequenceNumber LONG64 8 // ValueSize int 4 // OptionalFields byte 1 // // Key TKey KeySize // TTL LONG64 (OptionalFields & HasTTL) ? 8 : 0 // // Note: Largest Key size supported is ULONG32_MAX in bytes. // void WriteKey( __in BinaryWriter& memoryBuffer, __in KeyValuePair<TKey, KSharedPtr<VersionedItem<TValue>>> & item) { ULONG metadataStartPosition = memoryBuffer.Position; STORE_ASSERT(item.Value->GetRecordKind() != RecordKind::DeletedVersion, "Got a DeletedVersionedItem"); ULONG recordPosition = memoryBuffer.Position; memoryBuffer.Position += sizeof(ULONG32); // Leave space for size to be filled later memoryBuffer.Write(static_cast<byte>(item.Value->GetRecordKind())); // RecordKind memoryBuffer.Write(item.Value->GetVersionSequenceNumber()); memoryBuffer.Write(item.Value->GetValueSize()); byte optionalFields = 0; memoryBuffer.Write(optionalFields); STORE_ASSERT(memoryBuffer.Position - metadataStartPosition == MetadataSizeBytes, "Metadata size different than expected. Expected={1} Actual={2}", MetadataSizeBytes, memoryBuffer.Position); ULONG keyStartPosition = memoryBuffer.Position; keySerializerSPtr_->Write(item.Key, memoryBuffer); ULONG keyEndPosition = memoryBuffer.Position; STORE_ASSERT(keyEndPosition >= keyStartPosition, "keyEndPosition={1} >= keyStartPosition={2}", keyEndPosition, keyStartPosition); // Go back and write key size memoryBuffer.Position = recordPosition; memoryBuffer.Write(static_cast<ULONG32>(keyEndPosition - keyStartPosition)); // Go back to where we left off memoryBuffer.Position = keyEndPosition; // Write optional fields if (optionalFields & VolatileStoreCopyOptionalFlags::HasTTL) { // TODO: Actual TTL; left here as an example LONG64 ttl = 17; memoryBuffer.Write(ttl); } } void WriteValue( __in BinaryWriter& memoryBuffer, __in VersionedItem<TValue> & item) { if (item.GetRecordKind() != RecordKind::DeletedVersion) { // Serialize the value ULONG valueStartPosition = memoryBuffer.Position; valueSerializerSPtr_->Write(item.GetValue(), memoryBuffer); ULONG valueEndPosition = memoryBuffer.Position; STORE_ASSERT( valueEndPosition >= valueStartPosition, "valueEndPosition={1} >= valueStartPosition={2}", valueEndPosition, valueStartPosition); // Write the checksum of the value's bytes ULONG32 valueSize = static_cast<ULONG32>(valueEndPosition - valueStartPosition); item.SetValueSize(valueSize); } } VolatileStoreCopyStream( __in Data::TStore::ConsolidationManager<TKey, TValue> & consolidationManager, __in IComparer<TKey> & keyComparer, __in Data::StateManager::IStateSerializer<TKey> & keySerializer, __in Data::StateManager::IStateSerializer<TValue> & valueSerializer, __in StoreTraceComponent & traceComponent); KSharedPtr<Data::StateManager::IStateSerializer<TKey>> keySerializerSPtr_ = nullptr; KSharedPtr<Data::StateManager::IStateSerializer<TValue>> valueSerializerSPtr_ = nullptr; KSharedPtr<IComparer<TKey>> keyComparerSPtr_ = nullptr; KSharedPtr<IEnumerator<KeyValuePair<TKey, KSharedPtr<VersionedItem<TValue>>>>> consolidatedStateEnumeratorSPtr_ = nullptr; VolatileCopyStage::Enum copyStage_; LONG64 count_ = 0; StoreTraceComponent::SPtr traceComponent_; }; template<typename TKey, typename TValue> VolatileStoreCopyStream<TKey, TValue>::VolatileStoreCopyStream( __in Data::TStore::ConsolidationManager<TKey, TValue> & consolidationManager, __in IComparer<TKey> & keyComparer, __in Data::StateManager::IStateSerializer<TKey> & keySerializer, __in Data::StateManager::IStateSerializer<TValue> & valueSerializer, __in StoreTraceComponent & traceComponent) : traceComponent_(&traceComponent) , keyComparerSPtr_(&keyComparer) , keySerializerSPtr_(&keySerializer) , valueSerializerSPtr_(&valueSerializer) , copyStage_(VolatileCopyStage::Version) { consolidatedStateEnumeratorSPtr_ = consolidationManager.GetAllKeysAndValuesEnumerator(); } template<typename TKey, typename TValue> VolatileStoreCopyStream<TKey, TValue>::~VolatileStoreCopyStream() { } } }
8,142
396
/* Copyright 2018 Google Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS-IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #ifndef RESONANCE_AUDIO_GEOMETRICAL_ACOUSTICS_ACOUSTIC_RAY_H_ #define RESONANCE_AUDIO_GEOMETRICAL_ACOUSTICS_ACOUSTIC_RAY_H_ #include <array> #include <vector> #include "embree2/rtcore.h" #include "embree2/rtcore_ray.h" #include "base/constants_and_types.h" namespace vraudio { // A class extending Embree's RTCRay (https://embree.github.io/api.html) with // data needed for acoustic computations. // It exposes useful fields through accessors. class RTCORE_ALIGN(16) AcousticRay : public RTCRay { public: enum class RayType { kSpecular, kDiffuse, }; // A constant used to indicate that the ray extends to infinity, if // ray.t_far() == AcousticRay::kInfinity. static const float kInfinity; // Used to offset a ray's origin slightly so that it will not // intersect with the same geometry/primitive that it was generated from // (by reflection, transmission, diffraction, etc.). static const float kRayEpsilon; // Default constructor. Constructs a ray whose origin is at (0, 0, 0) and // points in the +x direction. AcousticRay() : energies_(), type_(RayType::kSpecular), prior_distance_(0.0f) { org[0] = 0.0f; org[1] = 0.0f; org[2] = 0.0f; dir[0] = 1.0f; dir[1] = 0.0f; dir[2] = 0.0f; tnear = 0.0f; tfar = kInfinity; Ng[0] = 0.0f; Ng[1] = 0.0f; Ng[2] = 0.0f; geomID = RTC_INVALID_GEOMETRY_ID; // Members in RTCRay that we do not use (or whose initial values we do not // care) are not initialized: // align0, align1, align2, time, mask, u, v, primID, instID. } // Constructor. // // @param origin Origin of the ray. // @param direction Direction of the ray. // @param t_near Ray parameter corresponding to the start of the ray. // @param t_far Ray parameter corresponding to the end of the ray. Pass in // AcousticRay::kInfinity if there is no end point. // @param energies Ray energies for all frequency bands. // @param ray_type Type of ray. // @param prior_distance Distance traveled before this ray. AcousticRay(const float origin[3], const float direction[3], float t_near, float t_far, const std::array<float, kNumReverbOctaveBands>& energies, RayType ray_type, float prior_distance) : energies_(energies), type_(ray_type), prior_distance_(prior_distance) { org[0] = origin[0]; org[1] = origin[1]; org[2] = origin[2]; dir[0] = direction[0]; dir[1] = direction[1]; dir[2] = direction[2]; tnear = t_near; tfar = t_far; Ng[0] = 0.0f; Ng[1] = 0.0f; Ng[2] = 0.0f; geomID = RTC_INVALID_GEOMETRY_ID; // Members in RTCRay that we do not use (or whose initial values we do not // care) are not initialized: // align0, align1, align2, time, mask, u, v, primID, instID. } // Ray origin. const float* origin() const { return org; } void set_origin(const float origin[3]) { org[0] = origin[0]; org[1] = origin[1]; org[2] = origin[2]; } // Ray direction. const float* direction() const { return dir; } void set_direction(const float direction[3]) { dir[0] = direction[0]; dir[1] = direction[1]; dir[2] = direction[2]; } // Ray parameter t corresponding to the start of the ray segment. const float t_near() const { return tnear; } void set_t_near(float t_near) { tnear = t_near; } // Ray parameter t corresponding to the end of the ray segment. const float t_far() const { return tfar; } void set_t_far(float t_far) { tfar = t_far; } // Functions intersected_*() will only return meaningful results after // Intersect() is called, otherwise they return default values as // described below. // // Not normalized geometry normal at the intersection point. // Default value: Vec3fa(0, 0, 0). const float* intersected_geometry_normal() const { return Ng; } void set_intersected_geometry_normal( const float intersected_geometry_normal[3]) { Ng[0] = intersected_geometry_normal[0]; Ng[1] = intersected_geometry_normal[1]; Ng[2] = intersected_geometry_normal[2]; } // Id of the intersected geometry. // Default value: kInvalidGeometryId. const unsigned int intersected_geometry_id() const { return geomID; } // Id of the intersected primitive. // Default value: kInvalidPrimitiveId. const unsigned int intersected_primitive_id() const { return primID; } // Ray energies for all frequency bands. const std::array<float, kNumReverbOctaveBands>& energies() const { return energies_; } void set_energies(const std::array<float, kNumReverbOctaveBands>& energies) { energies_ = energies; } // Ray type. const RayType type() const { return type_; } void set_type(const RayType type) { type_ = type; } // Prior distance. const float prior_distance() const { return prior_distance_; } void set_prior_distance(float prior_distance) { prior_distance_ = prior_distance; } // Finds the first intersection between this ray and a scene. Some fields // will be filled/mutated, which can be examined by the following functions: // - t_far() // - intersected_geometry_normal() // - intersected_geometry_id() // - intersected_primitive_id() // // @param scene An RTCScene to test the intersection. // @return True if an intersection is found. bool Intersect(RTCScene scene) { rtcIntersect(scene, *this); return geomID != RTC_INVALID_GEOMETRY_ID; } private: // Used to determine early-termination of rays. May also be used to model // source strength. std::array<float, kNumReverbOctaveBands> energies_; // Ray type. RayType type_ = RayType::kSpecular; // Accumulated distance traveled on the same path before this ray starts. float prior_distance_ = 0.0f; }; } // namespace vraudio #endif // RESONANCE_AUDIO_GEOMETRICAL_ACOUSTICS_ACOUSTIC_RAY_H_
2,267
6,931
""" Limited dependent variable and qualitative variables. Includes binary outcomes, count data, (ordered) ordinal data and limited dependent variables. General References -------------------- <NAME> and <NAME>. `Regression Analysis of Count Data`. Cambridge, 1998 <NAME>. `Limited-Dependent and Qualitative Variables in Econometrics`. Cambridge, 1983. <NAME>. `Econometric Analysis`. Prentice Hall, 5th. edition. 2003. """ __all__ = ["Poisson", "Logit", "Probit", "MNLogit", "NegativeBinomial", "GeneralizedPoisson", "NegativeBinomialP", "CountModel"] from statsmodels.compat.pandas import Appender import warnings import numpy as np from pandas import MultiIndex, get_dummies from scipy import special, stats from scipy.special import digamma, gammaln, loggamma, polygamma from scipy.stats import nbinom from statsmodels.base.data import handle_data # for mnlogit from statsmodels.base.l1_slsqp import fit_l1_slsqp import statsmodels.base.model as base import statsmodels.base.wrapper as wrap from statsmodels.distributions import genpoisson_p import statsmodels.regression.linear_model as lm from statsmodels.tools import data as data_tools, tools from statsmodels.tools.decorators import cache_readonly from statsmodels.tools.numdiff import approx_fprime_cs from statsmodels.tools.sm_exceptions import ( PerfectSeparationError, SpecificationWarning, ) try: import cvxopt # noqa:F401 have_cvxopt = True except ImportError: have_cvxopt = False # TODO: When we eventually get user-settable precision, we need to change # this FLOAT_EPS = np.finfo(float).eps # Limit for exponentials to avoid overflow EXP_UPPER_LIMIT = np.log(np.finfo(np.float64).max) - 1.0 # TODO: add options for the parameter covariance/variance # ie., OIM, EIM, and BHHH see Green 21.4 _discrete_models_docs = """ """ _discrete_results_docs = """ %(one_line_description)s Parameters ---------- model : A DiscreteModel instance params : array_like The parameters of a fitted model. hessian : array_like The hessian of the fitted model. scale : float A scale parameter for the covariance matrix. Attributes ---------- df_resid : float See model definition. df_model : float See model definition. llf : float Value of the loglikelihood %(extra_attr)s""" _l1_results_attr = """ nnz_params : int The number of nonzero parameters in the model. Train with trim_params == True or else numerical error will distort this. trimmed : bool array trimmed[i] == True if the ith parameter was trimmed from the model.""" _get_start_params_null_docs = """ Compute one-step moment estimator for null (constant-only) model This is a preliminary estimator used as start_params. Returns ------- params : ndarray parameter estimate based one one-step moment matching """ _check_rank_doc = """ check_rank : bool Check exog rank to determine model degrees of freedom. Default is True. Setting to False reduces model initialization time when exog.shape[1] is large. """ # helper for MNLogit (will be generally useful later) def _numpy_to_dummies(endog): if endog.ndim == 2 and endog.dtype.kind not in ["S", "O"]: endog_dummies = endog ynames = range(endog.shape[1]) else: dummies = get_dummies(endog, drop_first=False) ynames = {i: dummies.columns[i] for i in range(dummies.shape[1])} endog_dummies = np.asarray(dummies, dtype=float) return endog_dummies, ynames return endog_dummies, ynames def _pandas_to_dummies(endog): if endog.ndim == 2: if endog.shape[1] == 1: yname = endog.columns[0] endog_dummies = get_dummies(endog.iloc[:, 0]) else: # series yname = 'y' endog_dummies = endog else: yname = endog.name endog_dummies = get_dummies(endog) ynames = endog_dummies.columns.tolist() return endog_dummies, ynames, yname def _validate_l1_method(method): """ As of 0.10.0, the supported values for `method` in `fit_regularized` are "l1" and "l1_cvxopt_cp". If an invalid value is passed, raise with a helpful error message Parameters ---------- method : str Raises ------ ValueError """ if method not in ['l1', 'l1_cvxopt_cp']: raise ValueError('`method` = {method} is not supported, use either ' '"l1" or "l1_cvxopt_cp"'.format(method=method)) #### Private Model Classes #### class DiscreteModel(base.LikelihoodModel): """ Abstract class for discrete choice models. This class does not do anything itself but lays out the methods and call signature expected of child classes in addition to those of statsmodels.model.LikelihoodModel. """ def __init__(self, endog, exog, check_rank=True, **kwargs): self._check_rank = check_rank super().__init__(endog, exog, **kwargs) self.raise_on_perfect_prediction = True def initialize(self): """ Initialize is called by statsmodels.model.LikelihoodModel.__init__ and should contain any preprocessing that needs to be done for a model. """ if self._check_rank: # assumes constant rank = tools.matrix_rank(self.exog, method="qr") else: # If rank check is skipped, assume full rank = self.exog.shape[1] self.df_model = float(rank - 1) self.df_resid = float(self.exog.shape[0] - rank) def cdf(self, X): """ The cumulative distribution function of the model. """ raise NotImplementedError def pdf(self, X): """ The probability density (mass) function of the model. """ raise NotImplementedError def _check_perfect_pred(self, params, *args): endog = self.endog fittedvalues = self.cdf(np.dot(self.exog, params[:self.exog.shape[1]])) if (self.raise_on_perfect_prediction and np.allclose(fittedvalues - endog, 0)): msg = "Perfect separation detected, results not available" raise PerfectSeparationError(msg) @Appender(base.LikelihoodModel.fit.__doc__) def fit(self, start_params=None, method='newton', maxiter=35, full_output=1, disp=1, callback=None, **kwargs): """ Fit the model using maximum likelihood. The rest of the docstring is from statsmodels.base.model.LikelihoodModel.fit """ if callback is None: callback = self._check_perfect_pred else: pass # TODO: make a function factory to have multiple call-backs mlefit = super().fit(start_params=start_params, method=method, maxiter=maxiter, full_output=full_output, disp=disp, callback=callback, **kwargs) return mlefit # It is up to subclasses to wrap results def fit_regularized(self, start_params=None, method='l1', maxiter='defined_by_method', full_output=1, disp=True, callback=None, alpha=0, trim_mode='auto', auto_trim_tol=0.01, size_trim_tol=1e-4, qc_tol=0.03, qc_verbose=False, **kwargs): """ Fit the model using a regularized maximum likelihood. The regularization method AND the solver used is determined by the argument method. Parameters ---------- start_params : array_like, optional Initial guess of the solution for the loglikelihood maximization. The default is an array of zeros. method : 'l1' or 'l1_cvxopt_cp' See notes for details. maxiter : {int, 'defined_by_method'} Maximum number of iterations to perform. If 'defined_by_method', then use method defaults (see notes). full_output : bool Set to True to have all available output in the Results object's mle_retvals attribute. The output is dependent on the solver. See LikelihoodModelResults notes section for more information. disp : bool Set to True to print convergence messages. fargs : tuple Extra arguments passed to the likelihood function, i.e., loglike(x,*args). callback : callable callback(xk) Called after each iteration, as callback(xk), where xk is the current parameter vector. retall : bool Set to True to return list of solutions at each iteration. Available in Results object's mle_retvals attribute. alpha : non-negative scalar or numpy array (same size as parameters) The weight multiplying the l1 penalty term. trim_mode : 'auto, 'size', or 'off' If not 'off', trim (set to zero) parameters that would have been zero if the solver reached the theoretical minimum. If 'auto', trim params using the Theory above. If 'size', trim params if they have very small absolute value. size_trim_tol : float or 'auto' (default = 'auto') Tolerance used when trim_mode == 'size'. auto_trim_tol : float Tolerance used when trim_mode == 'auto'. qc_tol : float Print warning and do not allow auto trim when (ii) (above) is violated by this much. qc_verbose : bool If true, print out a full QC report upon failure. **kwargs Additional keyword arguments used when fitting the model. Returns ------- Results A results instance. Notes ----- Using 'l1_cvxopt_cp' requires the cvxopt module. Extra parameters are not penalized if alpha is given as a scalar. An example is the shape parameter in NegativeBinomial `nb1` and `nb2`. Optional arguments for the solvers (available in Results.mle_settings):: 'l1' acc : float (default 1e-6) Requested accuracy as used by slsqp 'l1_cvxopt_cp' abstol : float absolute accuracy (default: 1e-7). reltol : float relative accuracy (default: 1e-6). feastol : float tolerance for feasibility conditions (default: 1e-7). refinement : int number of iterative refinement steps when solving KKT equations (default: 1). Optimization methodology With :math:`L` the negative log likelihood, we solve the convex but non-smooth problem .. math:: \\min_\\beta L(\\beta) + \\sum_k\\alpha_k |\\beta_k| via the transformation to the smooth, convex, constrained problem in twice as many variables (adding the "added variables" :math:`u_k`) .. math:: \\min_{\\beta,u} L(\\beta) + \\sum_k\\alpha_k u_k, subject to .. math:: -u_k \\leq \\beta_k \\leq u_k. With :math:`\\partial_k L` the derivative of :math:`L` in the :math:`k^{th}` parameter direction, theory dictates that, at the minimum, exactly one of two conditions holds: (i) :math:`|\\partial_k L| = \\alpha_k` and :math:`\\beta_k \\neq 0` (ii) :math:`|\\partial_k L| \\leq \\alpha_k` and :math:`\\beta_k = 0` """ _validate_l1_method(method) # Set attributes based on method cov_params_func = self.cov_params_func_l1 ### Bundle up extra kwargs for the dictionary kwargs. These are ### passed through super(...).fit() as kwargs and unpacked at ### appropriate times alpha = np.array(alpha) assert alpha.min() >= 0 try: kwargs['alpha'] = alpha except TypeError: kwargs = dict(alpha=alpha) kwargs['alpha_rescaled'] = kwargs['alpha'] / float(self.endog.shape[0]) kwargs['trim_mode'] = trim_mode kwargs['size_trim_tol'] = size_trim_tol kwargs['auto_trim_tol'] = auto_trim_tol kwargs['qc_tol'] = qc_tol kwargs['qc_verbose'] = qc_verbose ### Define default keyword arguments to be passed to super(...).fit() if maxiter == 'defined_by_method': if method == 'l1': maxiter = 1000 elif method == 'l1_cvxopt_cp': maxiter = 70 ## Parameters to pass to super(...).fit() # For the 'extra' parameters, pass all that are available, # even if we know (at this point) we will only use one. extra_fit_funcs = {'l1': fit_l1_slsqp} if have_cvxopt and method == 'l1_cvxopt_cp': from statsmodels.base.l1_cvxopt import fit_l1_cvxopt_cp extra_fit_funcs['l1_cvxopt_cp'] = fit_l1_cvxopt_cp elif method.lower() == 'l1_cvxopt_cp': raise ValueError("Cannot use l1_cvxopt_cp as cvxopt " "was not found (install it, or use method='l1' instead)") if callback is None: callback = self._check_perfect_pred else: pass # make a function factory to have multiple call-backs mlefit = super().fit(start_params=start_params, method=method, maxiter=maxiter, full_output=full_output, disp=disp, callback=callback, extra_fit_funcs=extra_fit_funcs, cov_params_func=cov_params_func, **kwargs) return mlefit # up to subclasses to wrap results def cov_params_func_l1(self, likelihood_model, xopt, retvals): """ Computes cov_params on a reduced parameter space corresponding to the nonzero parameters resulting from the l1 regularized fit. Returns a full cov_params matrix, with entries corresponding to zero'd values set to np.nan. """ H = likelihood_model.hessian(xopt) trimmed = retvals['trimmed'] nz_idx = np.nonzero(~trimmed)[0] nnz_params = (~trimmed).sum() if nnz_params > 0: H_restricted = H[nz_idx[:, None], nz_idx] # Covariance estimate for the nonzero params H_restricted_inv = np.linalg.inv(-H_restricted) else: H_restricted_inv = np.zeros(0) cov_params = np.nan * np.ones(H.shape) cov_params[nz_idx[:, None], nz_idx] = H_restricted_inv return cov_params def predict(self, params, exog=None, linear=False): """ Predict response variable of a model given exogenous variables. """ raise NotImplementedError def _derivative_exog(self, params, exog=None, dummy_idx=None, count_idx=None): """ This should implement the derivative of the non-linear function """ raise NotImplementedError def _derivative_exog_helper(self, margeff, params, exog, dummy_idx, count_idx, transform): """ Helper for _derivative_exog to wrap results appropriately """ from .discrete_margins import _get_count_effects, _get_dummy_effects if count_idx is not None: margeff = _get_count_effects(margeff, exog, count_idx, transform, self, params) if dummy_idx is not None: margeff = _get_dummy_effects(margeff, exog, dummy_idx, transform, self, params) return margeff class BinaryModel(DiscreteModel): _continuous_ok = False def __init__(self, endog, exog, check_rank=True, **kwargs): # unconditional check, requires no extra kwargs added by subclasses self._check_kwargs(kwargs) super().__init__(endog, exog, check_rank, **kwargs) if not issubclass(self.__class__, MultinomialModel): if not np.all((self.endog >= 0) & (self.endog <= 1)): raise ValueError("endog must be in the unit interval.") if (not self._continuous_ok and np.any(self.endog != np.round(self.endog))): raise ValueError("endog must be binary, either 0 or 1") def predict(self, params, exog=None, linear=False): """ Predict response variable of a model given exogenous variables. Parameters ---------- params : array_like Fitted parameters of the model. exog : array_like 1d or 2d array of exogenous values. If not supplied, the whole exog attribute of the model is used. linear : bool, optional If True, returns the linear predictor dot(exog,params). Else, returns the value of the cdf at the linear predictor. Returns ------- array Fitted values at exog. """ if exog is None: exog = self.exog if not linear: return self.cdf(np.dot(exog, params)) else: return np.dot(exog, params) @Appender(DiscreteModel.fit_regularized.__doc__) def fit_regularized(self, start_params=None, method='l1', maxiter='defined_by_method', full_output=1, disp=1, callback=None, alpha=0, trim_mode='auto', auto_trim_tol=0.01, size_trim_tol=1e-4, qc_tol=0.03, **kwargs): _validate_l1_method(method) bnryfit = super().fit_regularized(start_params=start_params, method=method, maxiter=maxiter, full_output=full_output, disp=disp, callback=callback, alpha=alpha, trim_mode=trim_mode, auto_trim_tol=auto_trim_tol, size_trim_tol=size_trim_tol, qc_tol=qc_tol, **kwargs) discretefit = L1BinaryResults(self, bnryfit) return L1BinaryResultsWrapper(discretefit) def _derivative_predict(self, params, exog=None, transform='dydx'): """ For computing marginal effects standard errors. This is used only in the case of discrete and count regressors to get the variance-covariance of the marginal effects. It returns [d F / d params] where F is the predict. Transform can be 'dydx' or 'eydx'. Checking is done in margeff computations for appropriate transform. """ if exog is None: exog = self.exog dF = self.pdf(np.dot(exog, params))[:,None] * exog if 'ey' in transform: dF /= self.predict(params, exog)[:,None] return dF def _derivative_exog(self, params, exog=None, transform='dydx', dummy_idx=None, count_idx=None): """ For computing marginal effects returns dF(XB) / dX where F(.) is the predicted probabilities transform can be 'dydx', 'dyex', 'eydx', or 'eyex'. Not all of these make sense in the presence of discrete regressors, but checks are done in the results in get_margeff. """ # Note: this form should be appropriate for # group 1 probit, logit, logistic, cloglog, heckprob, xtprobit if exog is None: exog = self.exog margeff = np.dot(self.pdf(np.dot(exog, params))[:, None], params[None, :]) if 'ex' in transform: margeff *= exog if 'ey' in transform: margeff /= self.predict(params, exog)[:, None] return self._derivative_exog_helper(margeff, params, exog, dummy_idx, count_idx, transform) class MultinomialModel(BinaryModel): def _handle_data(self, endog, exog, missing, hasconst, **kwargs): if data_tools._is_using_ndarray_type(endog, None): endog_dummies, ynames = _numpy_to_dummies(endog) yname = 'y' elif data_tools._is_using_pandas(endog, None): endog_dummies, ynames, yname = _pandas_to_dummies(endog) else: endog = np.asarray(endog) endog_dummies, ynames = _numpy_to_dummies(endog) yname = 'y' if not isinstance(ynames, dict): ynames = dict(zip(range(endog_dummies.shape[1]), ynames)) self._ynames_map = ynames data = handle_data(endog_dummies, exog, missing, hasconst, **kwargs) data.ynames = yname # overwrite this to single endog name data.orig_endog = endog self.wendog = data.endog # repeating from upstream... for key in kwargs: if key in ['design_info', 'formula']: # leave attached to data continue try: setattr(self, key, data.__dict__.pop(key)) except KeyError: pass return data def initialize(self): """ Preprocesses the data for MNLogit. """ super().initialize() # This is also a "whiten" method in other models (eg regression) self.endog = self.endog.argmax(1) # turn it into an array of col idx self.J = self.wendog.shape[1] self.K = self.exog.shape[1] self.df_model *= (self.J-1) # for each J - 1 equation. self.df_resid = self.exog.shape[0] - self.df_model - (self.J-1) def predict(self, params, exog=None, linear=False): """ Predict response variable of a model given exogenous variables. Parameters ---------- params : array_like 2d array of fitted parameters of the model. Should be in the order returned from the model. exog : array_like 1d or 2d array of exogenous values. If not supplied, the whole exog attribute of the model is used. If a 1d array is given it assumed to be 1 row of exogenous variables. If you only have one regressor and would like to do prediction, you must provide a 2d array with shape[1] == 1. linear : bool, optional If True, returns the linear predictor dot(exog,params). Else, returns the value of the cdf at the linear predictor. Notes ----- Column 0 is the base case, the rest conform to the rows of params shifted up one for the base case. """ if exog is None: # do here to accommodate user-given exog exog = self.exog if exog.ndim == 1: exog = exog[None] pred = super().predict(params, exog, linear) if linear: pred = np.column_stack((np.zeros(len(exog)), pred)) return pred @Appender(DiscreteModel.fit.__doc__) def fit(self, start_params=None, method='newton', maxiter=35, full_output=1, disp=1, callback=None, **kwargs): if start_params is None: start_params = np.zeros((self.K * (self.J-1))) else: start_params = np.asarray(start_params) callback = lambda x : None # placeholder until check_perfect_pred # skip calling super to handle results from LikelihoodModel mnfit = base.LikelihoodModel.fit(self, start_params = start_params, method=method, maxiter=maxiter, full_output=full_output, disp=disp, callback=callback, **kwargs) mnfit.params = mnfit.params.reshape(self.K, -1, order='F') mnfit = MultinomialResults(self, mnfit) return MultinomialResultsWrapper(mnfit) @Appender(DiscreteModel.fit_regularized.__doc__) def fit_regularized(self, start_params=None, method='l1', maxiter='defined_by_method', full_output=1, disp=1, callback=None, alpha=0, trim_mode='auto', auto_trim_tol=0.01, size_trim_tol=1e-4, qc_tol=0.03, **kwargs): if start_params is None: start_params = np.zeros((self.K * (self.J-1))) else: start_params = np.asarray(start_params) mnfit = DiscreteModel.fit_regularized( self, start_params=start_params, method=method, maxiter=maxiter, full_output=full_output, disp=disp, callback=callback, alpha=alpha, trim_mode=trim_mode, auto_trim_tol=auto_trim_tol, size_trim_tol=size_trim_tol, qc_tol=qc_tol, **kwargs) mnfit.params = mnfit.params.reshape(self.K, -1, order='F') mnfit = L1MultinomialResults(self, mnfit) return L1MultinomialResultsWrapper(mnfit) def _derivative_predict(self, params, exog=None, transform='dydx'): """ For computing marginal effects standard errors. This is used only in the case of discrete and count regressors to get the variance-covariance of the marginal effects. It returns [d F / d params] where F is the predicted probabilities for each choice. dFdparams is of shape nobs x (J*K) x (J-1)*K. The zero derivatives for the base category are not included. Transform can be 'dydx' or 'eydx'. Checking is done in margeff computations for appropriate transform. """ if exog is None: exog = self.exog if params.ndim == 1: # will get flatted from approx_fprime params = params.reshape(self.K, self.J-1, order='F') eXB = np.exp(np.dot(exog, params)) sum_eXB = (1 + eXB.sum(1))[:,None] J = int(self.J) K = int(self.K) repeat_eXB = np.repeat(eXB, J, axis=1) X = np.tile(exog, J-1) # this is the derivative wrt the base level F0 = -repeat_eXB * X / sum_eXB ** 2 # this is the derivative wrt the other levels when # dF_j / dParams_j (ie., own equation) #NOTE: this computes too much, any easy way to cut down? F1 = eXB.T[:,:,None]*X * (sum_eXB - repeat_eXB) / (sum_eXB**2) F1 = F1.transpose((1,0,2)) # put the nobs index first # other equation index other_idx = ~np.kron(np.eye(J-1), np.ones(K)).astype(bool) F1[:, other_idx] = (-eXB.T[:,:,None]*X*repeat_eXB / \ (sum_eXB**2)).transpose((1,0,2))[:, other_idx] dFdX = np.concatenate((F0[:, None,:], F1), axis=1) if 'ey' in transform: dFdX /= self.predict(params, exog)[:, :, None] return dFdX def _derivative_exog(self, params, exog=None, transform='dydx', dummy_idx=None, count_idx=None): """ For computing marginal effects returns dF(XB) / dX where F(.) is the predicted probabilities transform can be 'dydx', 'dyex', 'eydx', or 'eyex'. Not all of these make sense in the presence of discrete regressors, but checks are done in the results in get_margeff. For Multinomial models the marginal effects are P[j] * (params[j] - sum_k P[k]*params[k]) It is returned unshaped, so that each row contains each of the J equations. This makes it easier to take derivatives of this for standard errors. If you want average marginal effects you can do margeff.reshape(nobs, K, J, order='F).mean(0) and the marginal effects for choice J are in column J """ J = int(self.J) # number of alternative choices K = int(self.K) # number of variables # Note: this form should be appropriate for # group 1 probit, logit, logistic, cloglog, heckprob, xtprobit if exog is None: exog = self.exog if params.ndim == 1: # will get flatted from approx_fprime params = params.reshape(K, J-1, order='F') zeroparams = np.c_[np.zeros(K), params] # add base in cdf = self.cdf(np.dot(exog, params)) # TODO: meaningful interpretation for `iterm`? iterm = np.array([cdf[:, [i]] * zeroparams[:, i] for i in range(int(J))]).sum(0) margeff = np.array([cdf[:, [j]] * (zeroparams[:, j] - iterm) for j in range(J)]) # swap the axes to make sure margeff are in order nobs, K, J margeff = np.transpose(margeff, (1, 2, 0)) if 'ex' in transform: margeff *= exog if 'ey' in transform: margeff /= self.predict(params, exog)[:,None,:] margeff = self._derivative_exog_helper(margeff, params, exog, dummy_idx, count_idx, transform) return margeff.reshape(len(exog), -1, order='F') class CountModel(DiscreteModel): def __init__(self, endog, exog, offset=None, exposure=None, missing='none', check_rank=True, **kwargs): self._check_kwargs(kwargs) super().__init__(endog, exog, check_rank, missing=missing, offset=offset, exposure=exposure, **kwargs) if exposure is not None: self.exposure = np.asarray(self.exposure) self.exposure = np.log(self.exposure) if offset is not None: self.offset = np.asarray(self.offset) self._check_inputs(self.offset, self.exposure, self.endog) if offset is None: delattr(self, 'offset') if exposure is None: delattr(self, 'exposure') # promote dtype to float64 if needed dt = np.promote_types(self.endog.dtype, np.float64) self.endog = np.asarray(self.endog, dt) dt = np.promote_types(self.exog.dtype, np.float64) self.exog = np.asarray(self.exog, dt) def _check_inputs(self, offset, exposure, endog): if offset is not None and offset.shape[0] != endog.shape[0]: raise ValueError("offset is not the same length as endog") if exposure is not None and exposure.shape[0] != endog.shape[0]: raise ValueError("exposure is not the same length as endog") def _get_init_kwds(self): # this is a temporary fixup because exposure has been transformed # see #1609 kwds = super()._get_init_kwds() if 'exposure' in kwds and kwds['exposure'] is not None: kwds['exposure'] = np.exp(kwds['exposure']) return kwds def predict(self, params, exog=None, exposure=None, offset=None, linear=False): """ Predict response variable of a count model given exogenous variables Parameters ---------- params : array_like Model parameters exog : array_like, optional Design / exogenous data. Is exog is None, model exog is used. exposure : array_like, optional Log(exposure) is added to the linear prediction with coefficient equal to 1. If exposure is not provided and exog is None, uses the model's exposure if present. If not, uses 0 as the default value. offset : array_like, optional Offset is added to the linear prediction with coefficient equal to 1. If offset is not provided and exog is None, uses the model's offset if present. If not, uses 0 as the default value. linear : bool If True, returns the linear predicted values. If False, returns the exponential of the linear predicted value. Notes ----- If exposure is specified, then it will be logged by the method. The user does not need to log it first. """ # the following is copied from GLM predict (without family/link check) # Use fit offset if appropriate if offset is None and exog is None and hasattr(self, 'offset'): offset = self.offset elif offset is None: offset = 0. # Use fit exposure if appropriate if exposure is None and exog is None and hasattr(self, 'exposure'): # Already logged exposure = self.exposure elif exposure is None: exposure = 0. else: exposure = np.log(exposure) if exog is None: exog = self.exog fitted = np.dot(exog, params[:exog.shape[1]]) linpred = fitted + exposure + offset if not linear: return np.exp(linpred) # not cdf else: return linpred def _derivative_predict(self, params, exog=None, transform='dydx'): """ For computing marginal effects standard errors. This is used only in the case of discrete and count regressors to get the variance-covariance of the marginal effects. It returns [d F / d params] where F is the predict. Transform can be 'dydx' or 'eydx'. Checking is done in margeff computations for appropriate transform. """ if exog is None: exog = self.exog #NOTE: this handles offset and exposure dF = self.predict(params, exog)[:,None] * exog if 'ey' in transform: dF /= self.predict(params, exog)[:,None] return dF def _derivative_exog(self, params, exog=None, transform="dydx", dummy_idx=None, count_idx=None): """ For computing marginal effects. These are the marginal effects d F(XB) / dX For the Poisson model F(XB) is the predicted counts rather than the probabilities. transform can be 'dydx', 'dyex', 'eydx', or 'eyex'. Not all of these make sense in the presence of discrete regressors, but checks are done in the results in get_margeff. """ # group 3 poisson, nbreg, zip, zinb if exog is None: exog = self.exog k_extra = getattr(self, 'k_extra', 0) params_exog = params if k_extra == 0 else params[:-k_extra] margeff = self.predict(params, exog)[:,None] * params_exog[None,:] if 'ex' in transform: margeff *= exog if 'ey' in transform: margeff /= self.predict(params, exog)[:,None] return self._derivative_exog_helper(margeff, params, exog, dummy_idx, count_idx, transform) @Appender(DiscreteModel.fit.__doc__) def fit(self, start_params=None, method='newton', maxiter=35, full_output=1, disp=1, callback=None, **kwargs): cntfit = super().fit(start_params=start_params, method=method, maxiter=maxiter, full_output=full_output, disp=disp, callback=callback, **kwargs) discretefit = CountResults(self, cntfit) return CountResultsWrapper(discretefit) @Appender(DiscreteModel.fit_regularized.__doc__) def fit_regularized(self, start_params=None, method='l1', maxiter='defined_by_method', full_output=1, disp=1, callback=None, alpha=0, trim_mode='auto', auto_trim_tol=0.01, size_trim_tol=1e-4, qc_tol=0.03, **kwargs): _validate_l1_method(method) cntfit = super().fit_regularized(start_params=start_params, method=method, maxiter=maxiter, full_output=full_output, disp=disp, callback=callback, alpha=alpha, trim_mode=trim_mode, auto_trim_tol=auto_trim_tol, size_trim_tol=size_trim_tol, qc_tol=qc_tol, **kwargs) discretefit = L1CountResults(self, cntfit) return L1CountResultsWrapper(discretefit) # Public Model Classes class Poisson(CountModel): __doc__ = """ Poisson Model %(params)s %(extra_params)s Attributes ---------- endog : ndarray A reference to the endogenous response variable exog : ndarray A reference to the exogenous design. """ % {'params': base._model_params_doc, 'extra_params': """offset : array_like Offset is added to the linear prediction with coefficient equal to 1. exposure : array_like Log(exposure) is added to the linear prediction with coefficient equal to 1. """ + base._missing_param_doc + _check_rank_doc} @property def family(self): from statsmodels.genmod import families return families.Poisson() def cdf(self, X): """ Poisson model cumulative distribution function Parameters ---------- X : array_like `X` is the linear predictor of the model. See notes. Returns ------- The value of the Poisson CDF at each point. Notes ----- The CDF is defined as .. math:: \\exp\\left(-\\lambda\\right)\\sum_{i=0}^{y}\\frac{\\lambda^{i}}{i!} where :math:`\\lambda` assumes the loglinear model. I.e., .. math:: \\ln\\lambda_{i}=X\\beta The parameter `X` is :math:`X\\beta` in the above formula. """ y = self.endog return stats.poisson.cdf(y, np.exp(X)) def pdf(self, X): """ Poisson model probability mass function Parameters ---------- X : array_like `X` is the linear predictor of the model. See notes. Returns ------- pdf : ndarray The value of the Poisson probability mass function, PMF, for each point of X. Notes -------- The PMF is defined as .. math:: \\frac{e^{-\\lambda_{i}}\\lambda_{i}^{y_{i}}}{y_{i}!} where :math:`\\lambda` assumes the loglinear model. I.e., .. math:: \\ln\\lambda_{i}=x_{i}\\beta The parameter `X` is :math:`x_{i}\\beta` in the above formula. """ y = self.endog return np.exp(stats.poisson.logpmf(y, np.exp(X))) def loglike(self, params): """ Loglikelihood of Poisson model Parameters ---------- params : array_like The parameters of the model. Returns ------- loglike : float The log-likelihood function of the model evaluated at `params`. See notes. Notes -------- .. math:: \\ln L=\\sum_{i=1}^{n}\\left[-\\lambda_{i}+y_{i}x_{i}^{\\prime}\\beta-\\ln y_{i}!\\right] """ offset = getattr(self, "offset", 0) exposure = getattr(self, "exposure", 0) XB = np.dot(self.exog, params) + offset + exposure endog = self.endog return np.sum( -np.exp(np.clip(XB, None, EXP_UPPER_LIMIT)) + endog * XB - gammaln(endog + 1) ) def loglikeobs(self, params): """ Loglikelihood for observations of Poisson model Parameters ---------- params : array_like The parameters of the model. Returns ------- loglike : array_like The log likelihood for each observation of the model evaluated at `params`. See Notes Notes -------- .. math:: \\ln L_{i}=\\left[-\\lambda_{i}+y_{i}x_{i}^{\\prime}\\beta-\\ln y_{i}!\\right] for observations :math:`i=1,...,n` """ offset = getattr(self, "offset", 0) exposure = getattr(self, "exposure", 0) XB = np.dot(self.exog, params) + offset + exposure endog = self.endog #np.sum(stats.poisson.logpmf(endog, np.exp(XB))) return -np.exp(XB) + endog*XB - gammaln(endog+1) @Appender(_get_start_params_null_docs) def _get_start_params_null(self): offset = getattr(self, "offset", 0) exposure = getattr(self, "exposure", 0) const = (self.endog / np.exp(offset + exposure)).mean() params = [np.log(const)] return params @Appender(DiscreteModel.fit.__doc__) def fit(self, start_params=None, method='newton', maxiter=35, full_output=1, disp=1, callback=None, **kwargs): if start_params is None and self.data.const_idx is not None: # k_params or k_exog not available? start_params = 0.001 * np.ones(self.exog.shape[1]) start_params[self.data.const_idx] = self._get_start_params_null()[0] kwds = {} if kwargs.get('cov_type') is not None: kwds['cov_type'] = kwargs.get('cov_type') kwds['cov_kwds'] = kwargs.get('cov_kwds', {}) cntfit = super(CountModel, self).fit(start_params=start_params, method=method, maxiter=maxiter, full_output=full_output, disp=disp, callback=callback, **kwargs) discretefit = PoissonResults(self, cntfit, **kwds) return PoissonResultsWrapper(discretefit) @Appender(DiscreteModel.fit_regularized.__doc__) def fit_regularized(self, start_params=None, method='l1', maxiter='defined_by_method', full_output=1, disp=1, callback=None, alpha=0, trim_mode='auto', auto_trim_tol=0.01, size_trim_tol=1e-4, qc_tol=0.03, **kwargs): _validate_l1_method(method) cntfit = super(CountModel, self).fit_regularized( start_params=start_params, method=method, maxiter=maxiter, full_output=full_output, disp=disp, callback=callback, alpha=alpha, trim_mode=trim_mode, auto_trim_tol=auto_trim_tol, size_trim_tol=size_trim_tol, qc_tol=qc_tol, **kwargs) discretefit = L1PoissonResults(self, cntfit) return L1PoissonResultsWrapper(discretefit) def fit_constrained(self, constraints, start_params=None, **fit_kwds): """fit the model subject to linear equality constraints The constraints are of the form `R params = q` where R is the constraint_matrix and q is the vector of constraint_values. The estimation creates a new model with transformed design matrix, exog, and converts the results back to the original parameterization. Parameters ---------- constraints : formula expression or tuple If it is a tuple, then the constraint needs to be given by two arrays (constraint_matrix, constraint_value), i.e. (R, q). Otherwise, the constraints can be given as strings or list of strings. see t_test for details start_params : None or array_like starting values for the optimization. `start_params` needs to be given in the original parameter space and are internally transformed. **fit_kwds : keyword arguments fit_kwds are used in the optimization of the transformed model. Returns ------- results : Results instance """ #constraints = (R, q) # TODO: temporary trailing underscore to not overwrite the monkey # patched version # TODO: decide whether to move the imports from patsy import DesignInfo from statsmodels.base._constraints import (fit_constrained, LinearConstraints) # same pattern as in base.LikelihoodModel.t_test lc = DesignInfo(self.exog_names).linear_constraint(constraints) R, q = lc.coefs, lc.constants # TODO: add start_params option, need access to tranformation # fit_constrained needs to do the transformation params, cov, res_constr = fit_constrained(self, R, q, start_params=start_params, fit_kwds=fit_kwds) #create dummy results Instance, TODO: wire up properly res = self.fit(maxiter=0, method='nm', disp=0, warn_convergence=False) # we get a wrapper back res.mle_retvals['fcall'] = res_constr.mle_retvals.get('fcall', np.nan) res.mle_retvals['iterations'] = res_constr.mle_retvals.get( 'iterations', np.nan) res.mle_retvals['converged'] = res_constr.mle_retvals['converged'] res._results.params = params res._results.cov_params_default = cov cov_type = fit_kwds.get('cov_type', 'nonrobust') if cov_type != 'nonrobust': res._results.normalized_cov_params = cov # assume scale=1 else: res._results.normalized_cov_params = None k_constr = len(q) res._results.df_resid += k_constr res._results.df_model -= k_constr res._results.constraints = LinearConstraints.from_patsy(lc) res._results.k_constr = k_constr res._results.results_constrained = res_constr return res def score(self, params): """ Poisson model score (gradient) vector of the log-likelihood Parameters ---------- params : array_like The parameters of the model Returns ------- score : ndarray, 1-D The score vector of the model, i.e. the first derivative of the loglikelihood function, evaluated at `params` Notes ----- .. math:: \\frac{\\partial\\ln L}{\\partial\\beta}=\\sum_{i=1}^{n}\\left(y_{i}-\\lambda_{i}\\right)x_{i} where the loglinear model is assumed .. math:: \\ln\\lambda_{i}=x_{i}\\beta """ offset = getattr(self, "offset", 0) exposure = getattr(self, "exposure", 0) X = self.exog L = np.exp(np.dot(X,params) + offset + exposure) return np.dot(self.endog - L, X) def score_obs(self, params): """ Poisson model Jacobian of the log-likelihood for each observation Parameters ---------- params : array_like The parameters of the model Returns ------- score : array_like The score vector (nobs, k_vars) of the model evaluated at `params` Notes ----- .. math:: \\frac{\\partial\\ln L_{i}}{\\partial\\beta}=\\left(y_{i}-\\lambda_{i}\\right)x_{i} for observations :math:`i=1,...,n` where the loglinear model is assumed .. math:: \\ln\\lambda_{i}=x_{i}\\beta """ offset = getattr(self, "offset", 0) exposure = getattr(self, "exposure", 0) X = self.exog L = np.exp(np.dot(X,params) + offset + exposure) return (self.endog - L)[:,None] * X def score_factor(self, params): """ Poisson model score_factor for each observation Parameters ---------- params : array_like The parameters of the model Returns ------- score : array_like The score factor (nobs, ) of the model evaluated at `params` Notes ----- .. math:: \\frac{\\partial\\ln L_{i}}{\\partial\\beta}=\\left(y_{i}-\\lambda_{i}\\right) for observations :math:`i=1,...,n` where the loglinear model is assumed .. math:: \\ln\\lambda_{i}=x_{i}\\beta """ offset = getattr(self, "offset", 0) exposure = getattr(self, "exposure", 0) X = self.exog L = np.exp(np.dot(X,params) + offset + exposure) return (self.endog - L) def hessian(self, params): """ Poisson model Hessian matrix of the loglikelihood Parameters ---------- params : array_like The parameters of the model Returns ------- hess : ndarray, (k_vars, k_vars) The Hessian, second derivative of loglikelihood function, evaluated at `params` Notes ----- .. math:: \\frac{\\partial^{2}\\ln L}{\\partial\\beta\\partial\\beta^{\\prime}}=-\\sum_{i=1}^{n}\\lambda_{i}x_{i}x_{i}^{\\prime} where the loglinear model is assumed .. math:: \\ln\\lambda_{i}=x_{i}\\beta """ offset = getattr(self, "offset", 0) exposure = getattr(self, "exposure", 0) X = self.exog L = np.exp(np.dot(X,params) + exposure + offset) return -np.dot(L*X.T, X) def hessian_factor(self, params): """ Poisson model Hessian factor Parameters ---------- params : array_like The parameters of the model Returns ------- hess : ndarray, (nobs,) The Hessian factor, second derivative of loglikelihood function with respect to the linear predictor evaluated at `params` Notes ----- .. math:: \\frac{\\partial^{2}\\ln L}{\\partial\\beta\\partial\\beta^{\\prime}}=-\\sum_{i=1}^{n}\\lambda_{i} where the loglinear model is assumed .. math:: \\ln\\lambda_{i}=x_{i}\\beta """ offset = getattr(self, "offset", 0) exposure = getattr(self, "exposure", 0) X = self.exog L = np.exp(np.dot(X,params) + exposure + offset) return L class GeneralizedPoisson(CountModel): __doc__ = """ Generalized Poisson Model %(params)s %(extra_params)s Attributes ---------- endog : ndarray A reference to the endogenous response variable exog : ndarray A reference to the exogenous design. """ % {'params': base._model_params_doc, 'extra_params': """ p : scalar P denotes parameterizations for GP regression. p=1 for GP-1 and p=2 for GP-2. Default is p=1. offset : array_like Offset is added to the linear prediction with coefficient equal to 1. exposure : array_like Log(exposure) is added to the linear prediction with coefficient equal to 1.""" + base._missing_param_doc + _check_rank_doc} def __init__(self, endog, exog, p=1, offset=None, exposure=None, missing='none', check_rank=True, **kwargs): super().__init__(endog, exog, offset=offset, exposure=exposure, missing=missing, check_rank=check_rank, **kwargs) self.parameterization = p - 1 self.exog_names.append('alpha') self.k_extra = 1 self._transparams = False def _get_init_kwds(self): kwds = super()._get_init_kwds() kwds['p'] = self.parameterization + 1 return kwds def loglike(self, params): """ Loglikelihood of Generalized Poisson model Parameters ---------- params : array_like The parameters of the model. Returns ------- loglike : float The log-likelihood function of the model evaluated at `params`. See notes. Notes -------- .. math:: \\ln L=\\sum_{i=1}^{n}\\left[\\mu_{i}+(y_{i}-1)*ln(\\mu_{i}+ \\alpha*\\mu_{i}^{p-1}*y_{i})-y_{i}*ln(1+\\alpha*\\mu_{i}^{p-1})- ln(y_{i}!)-\\frac{\\mu_{i}+\\alpha*\\mu_{i}^{p-1}*y_{i}}{1+\\alpha* \\mu_{i}^{p-1}}\\right] """ return np.sum(self.loglikeobs(params)) def loglikeobs(self, params): """ Loglikelihood for observations of Generalized Poisson model Parameters ---------- params : array_like The parameters of the model. Returns ------- loglike : ndarray The log likelihood for each observation of the model evaluated at `params`. See Notes Notes -------- .. math:: \\ln L=\\sum_{i=1}^{n}\\left[\\mu_{i}+(y_{i}-1)*ln(\\mu_{i}+ \\alpha*\\mu_{i}^{p-1}*y_{i})-y_{i}*ln(1+\\alpha*\\mu_{i}^{p-1})- ln(y_{i}!)-\\frac{\\mu_{i}+\\alpha*\\mu_{i}^{p-1}*y_{i}}{1+\\alpha* \\mu_{i}^{p-1}}\\right] for observations :math:`i=1,...,n` """ if self._transparams: alpha = np.exp(params[-1]) else: alpha = params[-1] params = params[:-1] p = self.parameterization endog = self.endog mu = self.predict(params) mu_p = np.power(mu, p) a1 = 1 + alpha * mu_p a2 = mu + (a1 - 1) * endog return (np.log(mu) + (endog - 1) * np.log(a2) - endog * np.log(a1) - gammaln(endog + 1) - a2 / a1) @Appender(_get_start_params_null_docs) def _get_start_params_null(self): offset = getattr(self, "offset", 0) exposure = getattr(self, "exposure", 0) const = (self.endog / np.exp(offset + exposure)).mean() params = [np.log(const)] mu = const * np.exp(offset + exposure) resid = self.endog - mu a = self._estimate_dispersion(mu, resid, df_resid=resid.shape[0] - 1) params.append(a) return np.array(params) def _estimate_dispersion(self, mu, resid, df_resid=None): q = self.parameterization if df_resid is None: df_resid = resid.shape[0] a = ((np.abs(resid) / np.sqrt(mu) - 1) * mu**(-q)).sum() / df_resid return a @Appender( """ use_transparams : bool This parameter enable internal transformation to impose non-negativity. True to enable. Default is False. use_transparams=True imposes the no underdispersion (alpha > 0) constraint. In case use_transparams=True and method="newton" or "ncg" transformation is ignored. """) @Appender(DiscreteModel.fit.__doc__) def fit(self, start_params=None, method='bfgs', maxiter=35, full_output=1, disp=1, callback=None, use_transparams=False, cov_type='nonrobust', cov_kwds=None, use_t=None, optim_kwds_prelim=None, **kwargs): if use_transparams and method not in ['newton', 'ncg']: self._transparams = True else: if use_transparams: warnings.warn('Parameter "use_transparams" is ignored', RuntimeWarning) self._transparams = False if start_params is None: offset = getattr(self, "offset", 0) + getattr(self, "exposure", 0) if np.size(offset) == 1 and offset == 0: offset = None kwds_prelim = {'disp': 0, 'skip_hessian': True, 'warn_convergence': False} if optim_kwds_prelim is not None: kwds_prelim.update(optim_kwds_prelim) mod_poi = Poisson(self.endog, self.exog, offset=offset) with warnings.catch_warnings(): warnings.simplefilter("always") res_poi = mod_poi.fit(**kwds_prelim) start_params = res_poi.params a = self._estimate_dispersion(res_poi.predict(), res_poi.resid, df_resid=res_poi.df_resid) start_params = np.append(start_params, max(-0.1, a)) if callback is None: # work around perfect separation callback #3895 callback = lambda *x: x mlefit = super().fit(start_params=start_params, maxiter=maxiter, method=method, disp=disp, full_output=full_output, callback=callback, **kwargs) if optim_kwds_prelim is not None: mlefit.mle_settings["optim_kwds_prelim"] = optim_kwds_prelim if use_transparams and method not in ["newton", "ncg"]: self._transparams = False mlefit._results.params[-1] = np.exp(mlefit._results.params[-1]) gpfit = GeneralizedPoissonResults(self, mlefit._results) result = GeneralizedPoissonResultsWrapper(gpfit) if cov_kwds is None: cov_kwds = {} result._get_robustcov_results(cov_type=cov_type, use_self=True, use_t=use_t, **cov_kwds) return result @Appender(DiscreteModel.fit_regularized.__doc__) def fit_regularized(self, start_params=None, method='l1', maxiter='defined_by_method', full_output=1, disp=1, callback=None, alpha=0, trim_mode='auto', auto_trim_tol=0.01, size_trim_tol=1e-4, qc_tol=0.03, **kwargs): _validate_l1_method(method) if np.size(alpha) == 1 and alpha != 0: k_params = self.exog.shape[1] + self.k_extra alpha = alpha * np.ones(k_params) alpha[-1] = 0 alpha_p = alpha[:-1] if (self.k_extra and np.size(alpha) > 1) else alpha self._transparams = False if start_params is None: offset = getattr(self, "offset", 0) + getattr(self, "exposure", 0) if np.size(offset) == 1 and offset == 0: offset = None mod_poi = Poisson(self.endog, self.exog, offset=offset) with warnings.catch_warnings(): warnings.simplefilter("always") start_params = mod_poi.fit_regularized( start_params=start_params, method=method, maxiter=maxiter, full_output=full_output, disp=0, callback=callback, alpha=alpha_p, trim_mode=trim_mode, auto_trim_tol=auto_trim_tol, size_trim_tol=size_trim_tol, qc_tol=qc_tol, **kwargs).params start_params = np.append(start_params, 0.1) cntfit = super(CountModel, self).fit_regularized( start_params=start_params, method=method, maxiter=maxiter, full_output=full_output, disp=disp, callback=callback, alpha=alpha, trim_mode=trim_mode, auto_trim_tol=auto_trim_tol, size_trim_tol=size_trim_tol, qc_tol=qc_tol, **kwargs) discretefit = L1GeneralizedPoissonResults(self, cntfit) return L1GeneralizedPoissonResultsWrapper(discretefit) def score_obs(self, params): if self._transparams: alpha = np.exp(params[-1]) else: alpha = params[-1] params = params[:-1] p = self.parameterization exog = self.exog y = self.endog[:,None] mu = self.predict(params)[:,None] mu_p = np.power(mu, p) a1 = 1 + alpha * mu_p a2 = mu + alpha * mu_p * y a3 = alpha * p * mu ** (p - 1) a4 = a3 * y dmudb = mu * exog dalpha = (mu_p * (y * ((y - 1) / a2 - 2 / a1) + a2 / a1**2)) dparams = dmudb * (-a4 / a1 + a3 * a2 / (a1 ** 2) + (1 + a4) * ((y - 1) / a2 - 1 / a1) + 1 / mu) return np.concatenate((dparams, np.atleast_2d(dalpha)), axis=1) def score(self, params): score = np.sum(self.score_obs(params), axis=0) if self._transparams: score[-1] == score[-1] ** 2 return score else: return score def _score_p(self, params): """ Generalized Poisson model derivative of the log-likelihood by p-parameter Parameters ---------- params : array_like The parameters of the model Returns ------- dldp : float dldp is first derivative of the loglikelihood function, evaluated at `p-parameter`. """ if self._transparams: alpha = np.exp(params[-1]) else: alpha = params[-1] params = params[:-1] p = self.parameterization exog = self.exog y = self.endog[:,None] mu = self.predict(params)[:,None] mu_p = np.power(mu, p) a1 = 1 + alpha * mu_p a2 = mu + alpha * mu_p * y dp = np.sum((np.log(mu) * ((a2 - mu) * ((y - 1) / a2 - 2 / a1) + (a1 - 1) * a2 / a1 ** 2))) return dp def hessian(self, params): """ Generalized Poisson model Hessian matrix of the loglikelihood Parameters ---------- params : array_like The parameters of the model Returns ------- hess : ndarray, (k_vars, k_vars) The Hessian, second derivative of loglikelihood function, evaluated at `params` """ if self._transparams: alpha = np.exp(params[-1]) else: alpha = params[-1] params = params[:-1] p = self.parameterization exog = self.exog y = self.endog[:,None] mu = self.predict(params)[:,None] mu_p = np.power(mu, p) a1 = 1 + alpha * mu_p a2 = mu + alpha * mu_p * y a3 = alpha * p * mu ** (p - 1) a4 = a3 * y a5 = p * mu ** (p - 1) dmudb = mu * exog # for dl/dparams dparams dim = exog.shape[1] hess_arr = np.empty((dim+1,dim+1)) for i in range(dim): for j in range(i + 1): hess_arr[i,j] = np.sum(mu * exog[:,i,None] * exog[:,j,None] * (mu * (a3 * a4 / a1**2 - 2 * a3**2 * a2 / a1**3 + 2 * a3 * (a4 + 1) / a1**2 - a4 * p / (mu * a1) + a3 * p * a2 / (mu * a1**2) + (y - 1) * a4 * (p - 1) / (a2 * mu) - (y - 1) * (1 + a4)**2 / a2**2 - a4 * (p - 1) / (a1 * mu)) + ((y - 1) * (1 + a4) / a2 - (1 + a4) / a1)), axis=0) tri_idx = np.triu_indices(dim, k=1) hess_arr[tri_idx] = hess_arr.T[tri_idx] # for dl/dparams dalpha dldpda = np.sum((2 * a4 * mu_p / a1**2 - 2 * a3 * mu_p * a2 / a1**3 - mu_p * y * (y - 1) * (1 + a4) / a2**2 + mu_p * (1 + a4) / a1**2 + a5 * y * (y - 1) / a2 - 2 * a5 * y / a1 + a5 * a2 / a1**2) * dmudb, axis=0) hess_arr[-1,:-1] = dldpda hess_arr[:-1,-1] = dldpda # for dl/dalpha dalpha dldada = mu_p**2 * (3 * y / a1**2 - (y / a2)**2. * (y - 1) - 2 * a2 / a1**3) hess_arr[-1,-1] = dldada.sum() return hess_arr def predict(self, params, exog=None, exposure=None, offset=None, which='mean'): """ Predict response variable of a count model given exogenous variables. Notes ----- If exposure is specified, then it will be logged by the method. The user does not need to log it first. """ if exog is None: exog = self.exog if exposure is None: exposure = getattr(self, 'exposure', 0) elif exposure != 0: exposure = np.log(exposure) if offset is None: offset = getattr(self, 'offset', 0) fitted = np.dot(exog, params[:exog.shape[1]]) linpred = fitted + exposure + offset if which == 'mean': return np.exp(linpred) elif which == 'linear': return linpred elif which =='prob': counts = np.atleast_2d(np.arange(0, np.max(self.endog)+1)) mu = self.predict(params, exog=exog, exposure=exposure, offset=offset)[:,None] return genpoisson_p.pmf(counts, mu, params[-1], self.parameterization + 1) else: raise ValueError('keyword \'which\' not recognized') class Logit(BinaryModel): __doc__ = """ Logit Model %(params)s %(extra_params)s Attributes ---------- endog : ndarray A reference to the endogenous response variable exog : ndarray A reference to the exogenous design. """ % {'params': base._model_params_doc, 'extra_params': base._missing_param_doc + _check_rank_doc} _continuous_ok = True def cdf(self, X): """ The logistic cumulative distribution function Parameters ---------- X : array_like `X` is the linear predictor of the logit model. See notes. Returns ------- 1/(1 + exp(-X)) Notes ----- In the logit model, .. math:: \\Lambda\\left(x^{\\prime}\\beta\\right)= \\text{Prob}\\left(Y=1|x\\right)= \\frac{e^{x^{\\prime}\\beta}}{1+e^{x^{\\prime}\\beta}} """ X = np.asarray(X) return 1/(1+np.exp(-X)) def pdf(self, X): """ The logistic probability density function Parameters ---------- X : array_like `X` is the linear predictor of the logit model. See notes. Returns ------- pdf : ndarray The value of the Logit probability mass function, PMF, for each point of X. ``np.exp(-x)/(1+np.exp(-X))**2`` Notes ----- In the logit model, .. math:: \\lambda\\left(x^{\\prime}\\beta\\right)=\\frac{e^{-x^{\\prime}\\beta}}{\\left(1+e^{-x^{\\prime}\\beta}\\right)^{2}} """ X = np.asarray(X) return np.exp(-X)/(1+np.exp(-X))**2 def loglike(self, params): """ Log-likelihood of logit model. Parameters ---------- params : array_like The parameters of the logit model. Returns ------- loglike : float The log-likelihood function of the model evaluated at `params`. See notes. Notes ----- .. math:: \\ln L=\\sum_{i}\\ln\\Lambda \\left(q_{i}x_{i}^{\\prime}\\beta\\right) Where :math:`q=2y-1`. This simplification comes from the fact that the logistic distribution is symmetric. """ q = 2*self.endog - 1 X = self.exog return np.sum(np.log(self.cdf(q*np.dot(X,params)))) def loglikeobs(self, params): """ Log-likelihood of logit model for each observation. Parameters ---------- params : array_like The parameters of the logit model. Returns ------- loglike : ndarray The log likelihood for each observation of the model evaluated at `params`. See Notes Notes ----- .. math:: \\ln L=\\sum_{i}\\ln\\Lambda \\left(q_{i}x_{i}^{\\prime}\\beta\\right) for observations :math:`i=1,...,n` where :math:`q=2y-1`. This simplification comes from the fact that the logistic distribution is symmetric. """ q = 2*self.endog - 1 X = self.exog return np.log(self.cdf(q*np.dot(X,params))) def score(self, params): """ Logit model score (gradient) vector of the log-likelihood Parameters ---------- params : array_like The parameters of the model Returns ------- score : ndarray, 1-D The score vector of the model, i.e. the first derivative of the loglikelihood function, evaluated at `params` Notes ----- .. math:: \\frac{\\partial\\ln L}{\\partial\\beta}=\\sum_{i=1}^{n}\\left(y_{i}-\\Lambda_{i}\\right)x_{i} """ y = self.endog X = self.exog L = self.cdf(np.dot(X,params)) return np.dot(y - L,X) def score_obs(self, params): """ Logit model Jacobian of the log-likelihood for each observation Parameters ---------- params : array_like The parameters of the model Returns ------- jac : array_like The derivative of the loglikelihood for each observation evaluated at `params`. Notes ----- .. math:: \\frac{\\partial\\ln L_{i}}{\\partial\\beta}=\\left(y_{i}-\\Lambda_{i}\\right)x_{i} for observations :math:`i=1,...,n` """ y = self.endog X = self.exog L = self.cdf(np.dot(X, params)) return (y - L)[:,None] * X def hessian(self, params): """ Logit model Hessian matrix of the log-likelihood Parameters ---------- params : array_like The parameters of the model Returns ------- hess : ndarray, (k_vars, k_vars) The Hessian, second derivative of loglikelihood function, evaluated at `params` Notes ----- .. math:: \\frac{\\partial^{2}\\ln L}{\\partial\\beta\\partial\\beta^{\\prime}}=-\\sum_{i}\\Lambda_{i}\\left(1-\\Lambda_{i}\\right)x_{i}x_{i}^{\\prime} """ X = self.exog L = self.cdf(np.dot(X,params)) return -np.dot(L*(1-L)*X.T,X) @Appender(DiscreteModel.fit.__doc__) def fit(self, start_params=None, method='newton', maxiter=35, full_output=1, disp=1, callback=None, **kwargs): bnryfit = super().fit(start_params=start_params, method=method, maxiter=maxiter, full_output=full_output, disp=disp, callback=callback, **kwargs) discretefit = LogitResults(self, bnryfit) return BinaryResultsWrapper(discretefit) class Probit(BinaryModel): __doc__ = """ Probit Model %(params)s %(extra_params)s Attributes ---------- endog : ndarray A reference to the endogenous response variable exog : ndarray A reference to the exogenous design. """ % {'params': base._model_params_doc, 'extra_params': base._missing_param_doc + _check_rank_doc} def cdf(self, X): """ Probit (Normal) cumulative distribution function Parameters ---------- X : array_like The linear predictor of the model (XB). Returns ------- cdf : ndarray The cdf evaluated at `X`. Notes ----- This function is just an alias for scipy.stats.norm.cdf """ return stats.norm._cdf(X) def pdf(self, X): """ Probit (Normal) probability density function Parameters ---------- X : array_like The linear predictor of the model (XB). Returns ------- pdf : ndarray The value of the normal density function for each point of X. Notes ----- This function is just an alias for scipy.stats.norm.pdf """ X = np.asarray(X) return stats.norm._pdf(X) def loglike(self, params): """ Log-likelihood of probit model (i.e., the normal distribution). Parameters ---------- params : array_like The parameters of the model. Returns ------- loglike : float The log-likelihood function of the model evaluated at `params`. See notes. Notes ----- .. math:: \\ln L=\\sum_{i}\\ln\\Phi\\left(q_{i}x_{i}^{\\prime}\\beta\\right) Where :math:`q=2y-1`. This simplification comes from the fact that the normal distribution is symmetric. """ q = 2*self.endog - 1 X = self.exog return np.sum(np.log(np.clip(self.cdf(q*np.dot(X,params)), FLOAT_EPS, 1))) def loglikeobs(self, params): """ Log-likelihood of probit model for each observation Parameters ---------- params : array_like The parameters of the model. Returns ------- loglike : array_like The log likelihood for each observation of the model evaluated at `params`. See Notes Notes ----- .. math:: \\ln L_{i}=\\ln\\Phi\\left(q_{i}x_{i}^{\\prime}\\beta\\right) for observations :math:`i=1,...,n` where :math:`q=2y-1`. This simplification comes from the fact that the normal distribution is symmetric. """ q = 2*self.endog - 1 X = self.exog return np.log(np.clip(self.cdf(q*np.dot(X,params)), FLOAT_EPS, 1)) def score(self, params): """ Probit model score (gradient) vector Parameters ---------- params : array_like The parameters of the model Returns ------- score : ndarray, 1-D The score vector of the model, i.e. the first derivative of the loglikelihood function, evaluated at `params` Notes ----- .. math:: \\frac{\\partial\\ln L}{\\partial\\beta}=\\sum_{i=1}^{n}\\left[\\frac{q_{i}\\phi\\left(q_{i}x_{i}^{\\prime}\\beta\\right)}{\\Phi\\left(q_{i}x_{i}^{\\prime}\\beta\\right)}\\right]x_{i} Where :math:`q=2y-1`. This simplification comes from the fact that the normal distribution is symmetric. """ y = self.endog X = self.exog XB = np.dot(X,params) q = 2*y - 1 # clip to get rid of invalid divide complaint L = q*self.pdf(q*XB)/np.clip(self.cdf(q*XB), FLOAT_EPS, 1 - FLOAT_EPS) return np.dot(L,X) def score_obs(self, params): """ Probit model Jacobian for each observation Parameters ---------- params : array_like The parameters of the model Returns ------- jac : array_like The derivative of the loglikelihood for each observation evaluated at `params`. Notes ----- .. math:: \\frac{\\partial\\ln L_{i}}{\\partial\\beta}=\\left[\\frac{q_{i}\\phi\\left(q_{i}x_{i}^{\\prime}\\beta\\right)}{\\Phi\\left(q_{i}x_{i}^{\\prime}\\beta\\right)}\\right]x_{i} for observations :math:`i=1,...,n` Where :math:`q=2y-1`. This simplification comes from the fact that the normal distribution is symmetric. """ y = self.endog X = self.exog XB = np.dot(X,params) q = 2*y - 1 # clip to get rid of invalid divide complaint L = q*self.pdf(q*XB)/np.clip(self.cdf(q*XB), FLOAT_EPS, 1 - FLOAT_EPS) return L[:,None] * X def hessian(self, params): """ Probit model Hessian matrix of the log-likelihood Parameters ---------- params : array_like The parameters of the model Returns ------- hess : ndarray, (k_vars, k_vars) The Hessian, second derivative of loglikelihood function, evaluated at `params` Notes ----- .. math:: \\frac{\\partial^{2}\\ln L}{\\partial\\beta\\partial\\beta^{\\prime}}=-\\lambda_{i}\\left(\\lambda_{i}+x_{i}^{\\prime}\\beta\\right)x_{i}x_{i}^{\\prime} where .. math:: \\lambda_{i}=\\frac{q_{i}\\phi\\left(q_{i}x_{i}^{\\prime}\\beta\\right)}{\\Phi\\left(q_{i}x_{i}^{\\prime}\\beta\\right)} and :math:`q=2y-1` """ X = self.exog XB = np.dot(X,params) q = 2*self.endog - 1 L = q*self.pdf(q*XB)/self.cdf(q*XB) return np.dot(-L*(L+XB)*X.T,X) @Appender(DiscreteModel.fit.__doc__) def fit(self, start_params=None, method='newton', maxiter=35, full_output=1, disp=1, callback=None, **kwargs): bnryfit = super().fit(start_params=start_params, method=method, maxiter=maxiter, full_output=full_output, disp=disp, callback=callback, **kwargs) discretefit = ProbitResults(self, bnryfit) return BinaryResultsWrapper(discretefit) class MNLogit(MultinomialModel): __doc__ = """ Multinomial Logit Model Parameters ---------- endog : array_like `endog` is an 1-d vector of the endogenous response. `endog` can contain strings, ints, or floats or may be a pandas Categorical Series. Note that if it contains strings, every distinct string will be a category. No stripping of whitespace is done. exog : array_like A nobs x k array where `nobs` is the number of observations and `k` is the number of regressors. An intercept is not included by default and should be added by the user. See `statsmodels.tools.add_constant`. %(extra_params)s Attributes ---------- endog : ndarray A reference to the endogenous response variable exog : ndarray A reference to the exogenous design. J : float The number of choices for the endogenous variable. Note that this is zero-indexed. K : float The actual number of parameters for the exogenous design. Includes the constant if the design has one. names : dict A dictionary mapping the column number in `wendog` to the variables in `endog`. wendog : ndarray An n x j array where j is the number of unique categories in `endog`. Each column of j is a dummy variable indicating the category of each observation. See `names` for a dictionary mapping each column to its category. Notes ----- See developer notes for further information on `MNLogit` internals. """ % {'extra_params': base._missing_param_doc + _check_rank_doc} def __init__(self, endog, exog, check_rank=True, **kwargs): super().__init__(endog, exog, check_rank=check_rank, **kwargs) # Override cov_names since multivariate model yname = self.endog_names ynames = self._ynames_map ynames = MultinomialResults._maybe_convert_ynames_int(ynames) # use range below to ensure sortedness ynames = [ynames[key] for key in range(int(self.J))] idx = MultiIndex.from_product((ynames[1:], self.data.xnames), names=(yname, None)) self.data.cov_names = idx def pdf(self, eXB): """ NotImplemented """ raise NotImplementedError def cdf(self, X): """ Multinomial logit cumulative distribution function. Parameters ---------- X : ndarray The linear predictor of the model XB. Returns ------- cdf : ndarray The cdf evaluated at `X`. Notes ----- In the multinomial logit model. .. math:: \\frac{\\exp\\left(\\beta_{j}^{\\prime}x_{i}\\right)}{\\sum_{k=0}^{J}\\exp\\left(\\beta_{k}^{\\prime}x_{i}\\right)} """ eXB = np.column_stack((np.ones(len(X)), np.exp(X))) return eXB/eXB.sum(1)[:,None] def loglike(self, params): """ Log-likelihood of the multinomial logit model. Parameters ---------- params : array_like The parameters of the multinomial logit model. Returns ------- loglike : float The log-likelihood function of the model evaluated at `params`. See notes. Notes ----- .. math:: \\ln L=\\sum_{i=1}^{n}\\sum_{j=0}^{J}d_{ij}\\ln \\left(\\frac{\\exp\\left(\\beta_{j}^{\\prime}x_{i}\\right)} {\\sum_{k=0}^{J} \\exp\\left(\\beta_{k}^{\\prime}x_{i}\\right)}\\right) where :math:`d_{ij}=1` if individual `i` chose alternative `j` and 0 if not. """ params = params.reshape(self.K, -1, order='F') d = self.wendog logprob = np.log(self.cdf(np.dot(self.exog,params))) return np.sum(d * logprob) def loglikeobs(self, params): """ Log-likelihood of the multinomial logit model for each observation. Parameters ---------- params : array_like The parameters of the multinomial logit model. Returns ------- loglike : array_like The log likelihood for each observation of the model evaluated at `params`. See Notes Notes ----- .. math:: \\ln L_{i}=\\sum_{j=0}^{J}d_{ij}\\ln \\left(\\frac{\\exp\\left(\\beta_{j}^{\\prime}x_{i}\\right)} {\\sum_{k=0}^{J} \\exp\\left(\\beta_{k}^{\\prime}x_{i}\\right)}\\right) for observations :math:`i=1,...,n` where :math:`d_{ij}=1` if individual `i` chose alternative `j` and 0 if not. """ params = params.reshape(self.K, -1, order='F') d = self.wendog logprob = np.log(self.cdf(np.dot(self.exog,params))) return d * logprob def score(self, params): """ Score matrix for multinomial logit model log-likelihood Parameters ---------- params : ndarray The parameters of the multinomial logit model. Returns ------- score : ndarray, (K * (J-1),) The 2-d score vector, i.e. the first derivative of the loglikelihood function, of the multinomial logit model evaluated at `params`. Notes ----- .. math:: \\frac{\\partial\\ln L}{\\partial\\beta_{j}}=\\sum_{i}\\left(d_{ij}-\\frac{\\exp\\left(\\beta_{j}^{\\prime}x_{i}\\right)}{\\sum_{k=0}^{J}\\exp\\left(\\beta_{k}^{\\prime}x_{i}\\right)}\\right)x_{i} for :math:`j=1,...,J` In the multinomial model the score matrix is K x J-1 but is returned as a flattened array to work with the solvers. """ params = params.reshape(self.K, -1, order='F') firstterm = self.wendog[:,1:] - self.cdf(np.dot(self.exog, params))[:,1:] #NOTE: might need to switch terms if params is reshaped return np.dot(firstterm.T, self.exog).flatten() def loglike_and_score(self, params): """ Returns log likelihood and score, efficiently reusing calculations. Note that both of these returned quantities will need to be negated before being minimized by the maximum likelihood fitting machinery. """ params = params.reshape(self.K, -1, order='F') cdf_dot_exog_params = self.cdf(np.dot(self.exog, params)) loglike_value = np.sum(self.wendog * np.log(cdf_dot_exog_params)) firstterm = self.wendog[:, 1:] - cdf_dot_exog_params[:, 1:] score_array = np.dot(firstterm.T, self.exog).flatten() return loglike_value, score_array def score_obs(self, params): """ Jacobian matrix for multinomial logit model log-likelihood Parameters ---------- params : ndarray The parameters of the multinomial logit model. Returns ------- jac : array_like The derivative of the loglikelihood for each observation evaluated at `params` . Notes ----- .. math:: \\frac{\\partial\\ln L_{i}}{\\partial\\beta_{j}}=\\left(d_{ij}-\\frac{\\exp\\left(\\beta_{j}^{\\prime}x_{i}\\right)}{\\sum_{k=0}^{J}\\exp\\left(\\beta_{k}^{\\prime}x_{i}\\right)}\\right)x_{i} for :math:`j=1,...,J`, for observations :math:`i=1,...,n` In the multinomial model the score vector is K x (J-1) but is returned as a flattened array. The Jacobian has the observations in rows and the flattened array of derivatives in columns. """ params = params.reshape(self.K, -1, order='F') firstterm = self.wendog[:,1:] - self.cdf(np.dot(self.exog, params))[:,1:] #NOTE: might need to switch terms if params is reshaped return (firstterm[:,:,None] * self.exog[:,None,:]).reshape(self.exog.shape[0], -1) def hessian(self, params): """ Multinomial logit Hessian matrix of the log-likelihood Parameters ---------- params : array_like The parameters of the model Returns ------- hess : ndarray, (J*K, J*K) The Hessian, second derivative of loglikelihood function with respect to the flattened parameters, evaluated at `params` Notes ----- .. math:: \\frac{\\partial^{2}\\ln L}{\\partial\\beta_{j}\\partial\\beta_{l}}=-\\sum_{i=1}^{n}\\frac{\\exp\\left(\\beta_{j}^{\\prime}x_{i}\\right)}{\\sum_{k=0}^{J}\\exp\\left(\\beta_{k}^{\\prime}x_{i}\\right)}\\left[\\boldsymbol{1}\\left(j=l\\right)-\\frac{\\exp\\left(\\beta_{l}^{\\prime}x_{i}\\right)}{\\sum_{k=0}^{J}\\exp\\left(\\beta_{k}^{\\prime}x_{i}\\right)}\\right]x_{i}x_{l}^{\\prime} where :math:`\\boldsymbol{1}\\left(j=l\\right)` equals 1 if `j` = `l` and 0 otherwise. The actual Hessian matrix has J**2 * K x K elements. Our Hessian is reshaped to be square (J*K, J*K) so that the solvers can use it. This implementation does not take advantage of the symmetry of the Hessian and could probably be refactored for speed. """ params = params.reshape(self.K, -1, order='F') X = self.exog pr = self.cdf(np.dot(X,params)) partials = [] J = self.J K = self.K for i in range(J-1): for j in range(J-1): # this loop assumes we drop the first col. if i == j: partials.append(\ -np.dot(((pr[:,i+1]*(1-pr[:,j+1]))[:,None]*X).T,X)) else: partials.append(-np.dot(((pr[:,i+1]*-pr[:,j+1])[:,None]*X).T,X)) H = np.array(partials) # the developer's notes on multinomial should clear this math up H = np.transpose(H.reshape(J-1, J-1, K, K), (0, 2, 1, 3)).reshape((J-1)*K, (J-1)*K) return H #TODO: Weibull can replaced by a survival analsysis function # like stat's streg (The cox model as well) #class Weibull(DiscreteModel): # """ # Binary choice Weibull model # # Notes # ------ # This is unfinished and untested. # """ ##TODO: add analytic hessian for Weibull # def initialize(self): # pass # # def cdf(self, X): # """ # Gumbell (Log Weibull) cumulative distribution function # """ ## return np.exp(-np.exp(-X)) # return stats.gumbel_r.cdf(X) # # these two are equivalent. # # Greene table and discussion is incorrect. # # def pdf(self, X): # """ # Gumbell (LogWeibull) probability distribution function # """ # return stats.gumbel_r.pdf(X) # # def loglike(self, params): # """ # Loglikelihood of Weibull distribution # """ # X = self.exog # cdf = self.cdf(np.dot(X,params)) # y = self.endog # return np.sum(y*np.log(cdf) + (1-y)*np.log(1-cdf)) # # def score(self, params): # y = self.endog # X = self.exog # F = self.cdf(np.dot(X,params)) # f = self.pdf(np.dot(X,params)) # term = (y*f/F + (1 - y)*-f/(1-F)) # return np.dot(term,X) # # def hessian(self, params): # hess = nd.Jacobian(self.score) # return hess(params) # # def fit(self, start_params=None, method='newton', maxiter=35, tol=1e-08): ## The example had problems with all zero start values, Hessian = 0 # if start_params is None: # start_params = OLS(self.endog, self.exog).fit().params # mlefit = super(Weibull, self).fit(start_params=start_params, # method=method, maxiter=maxiter, tol=tol) # return mlefit # class NegativeBinomial(CountModel): __doc__ = """ Negative Binomial Model %(params)s %(extra_params)s Attributes ---------- endog : ndarray A reference to the endogenous response variable exog : ndarray A reference to the exogenous design. References ---------- <NAME>. 2008. "Functional forms for the negative binomial model for count data". Economics Letters. Volume 99, Number 3, pp.585-590. <NAME>. 2011. "Negative binomial regression". Cambridge University Press. """ % {'params': base._model_params_doc, 'extra_params': """loglike_method : str Log-likelihood type. 'nb2','nb1', or 'geometric'. Fitted value :math:`\\mu` Heterogeneity parameter :math:`\\alpha` - nb2: Variance equal to :math:`\\mu + \\alpha\\mu^2` (most common) - nb1: Variance equal to :math:`\\mu + \\alpha\\mu` - geometric: Variance equal to :math:`\\mu + \\mu^2` offset : array_like Offset is added to the linear prediction with coefficient equal to 1. exposure : array_like Log(exposure) is added to the linear prediction with coefficient equal to 1. """ + base._missing_param_doc + _check_rank_doc} def __init__(self, endog, exog, loglike_method='nb2', offset=None, exposure=None, missing='none', check_rank=True, **kwargs): super().__init__(endog, exog, offset=offset, exposure=exposure, missing=missing, check_rank=check_rank, **kwargs) self.loglike_method = loglike_method self._initialize() if loglike_method in ['nb2', 'nb1']: self.exog_names.append('alpha') self.k_extra = 1 else: self.k_extra = 0 # store keys for extras if we need to recreate model instance # we need to append keys that do not go to super self._init_keys.append('loglike_method') def _initialize(self): if self.loglike_method == 'nb2': self.hessian = self._hessian_nb2 self.score = self._score_nbin self.loglikeobs = self._ll_nb2 self._transparams = True # transform lnalpha -> alpha in fit elif self.loglike_method == 'nb1': self.hessian = self._hessian_nb1 self.score = self._score_nb1 self.loglikeobs = self._ll_nb1 self._transparams = True # transform lnalpha -> alpha in fit elif self.loglike_method == 'geometric': self.hessian = self._hessian_geom self.score = self._score_geom self.loglikeobs = self._ll_geometric else: raise ValueError('Likelihood type must "nb1", "nb2" ' 'or "geometric"') # Workaround to pickle instance methods def __getstate__(self): odict = self.__dict__.copy() # copy the dict since we change it del odict['hessian'] del odict['score'] del odict['loglikeobs'] return odict def __setstate__(self, indict): self.__dict__.update(indict) self._initialize() def _ll_nbin(self, params, alpha, Q=0): if np.any(np.iscomplex(params)) or np.iscomplex(alpha): gamma_ln = loggamma else: gamma_ln = gammaln endog = self.endog mu = self.predict(params) size = 1/alpha * mu**Q prob = size/(size+mu) coeff = (gamma_ln(size+endog) - gamma_ln(endog+1) - gamma_ln(size)) llf = coeff + size*np.log(prob) + endog*np.log(1-prob) return llf def _ll_nb2(self, params): if self._transparams: # got lnalpha during fit alpha = np.exp(params[-1]) else: alpha = params[-1] return self._ll_nbin(params[:-1], alpha, Q=0) def _ll_nb1(self, params): if self._transparams: # got lnalpha during fit alpha = np.exp(params[-1]) else: alpha = params[-1] return self._ll_nbin(params[:-1], alpha, Q=1) def _ll_geometric(self, params): # we give alpha of 1 because it's actually log(alpha) where alpha=0 return self._ll_nbin(params, 1, 0) def loglike(self, params): r""" Loglikelihood for negative binomial model Parameters ---------- params : array_like The parameters of the model. If `loglike_method` is nb1 or nb2, then the ancillary parameter is expected to be the last element. Returns ------- llf : float The loglikelihood value at `params` Notes ----- Following notation in Greene (2008), with negative binomial heterogeneity parameter :math:`\alpha`: .. math:: \lambda_i &= exp(X\beta) \\ \theta &= 1 / \alpha \\ g_i &= \theta \lambda_i^Q \\ w_i &= g_i/(g_i + \lambda_i) \\ r_i &= \theta / (\theta+\lambda_i) \\ ln \mathcal{L}_i &= ln \Gamma(y_i+g_i) - ln \Gamma(1+y_i) + g_iln (r_i) + y_i ln(1-r_i) where :math`Q=0` for NB2 and geometric and :math:`Q=1` for NB1. For the geometric, :math:`\alpha=0` as well. """ llf = np.sum(self.loglikeobs(params)) return llf def _score_geom(self, params): exog = self.exog y = self.endog[:, None] mu = self.predict(params)[:, None] dparams = exog * (y-mu)/(mu+1) return dparams.sum(0) def _score_nbin(self, params, Q=0): """ Score vector for NB2 model """ if self._transparams: # lnalpha came in during fit alpha = np.exp(params[-1]) else: alpha = params[-1] params = params[:-1] exog = self.exog y = self.endog[:,None] mu = self.predict(params)[:,None] a1 = 1/alpha * mu**Q prob = a1 / (a1 + mu) # a1 aka "size" in _ll_nbin if Q == 1: # nb1 # Q == 1 --> a1 = mu / alpha --> prob = 1 / (alpha + 1) dgpart = digamma(y + a1) - digamma(a1) dparams = exog * a1 * (np.log(prob) + dgpart) dalpha = ((alpha * (y - mu * np.log(prob) - mu*(dgpart + 1)) - mu * (np.log(prob) + dgpart))/ (alpha**2*(alpha + 1))).sum() elif Q == 0: # nb2 dgpart = digamma(y + a1) - digamma(a1) dparams = exog*a1 * (y-mu)/(mu+a1) da1 = -alpha**-2 dalpha = (dgpart + np.log(a1) - np.log(a1+mu) - (y-mu)/(a1+mu)).sum() * da1 #multiply above by constant outside sum to reduce rounding error if self._transparams: return np.r_[dparams.sum(0), dalpha*alpha] else: return np.r_[dparams.sum(0), dalpha] def _score_nb1(self, params): return self._score_nbin(params, Q=1) def _hessian_geom(self, params): exog = self.exog y = self.endog[:,None] mu = self.predict(params)[:,None] # for dl/dparams dparams dim = exog.shape[1] hess_arr = np.empty((dim, dim)) const_arr = mu*(1+y)/(mu+1)**2 for i in range(dim): for j in range(dim): if j > i: continue hess_arr[i,j] = np.sum(-exog[:,i,None] * exog[:,j,None] * const_arr, axis=0) tri_idx = np.triu_indices(dim, k=1) hess_arr[tri_idx] = hess_arr.T[tri_idx] return hess_arr def _hessian_nb1(self, params): """ Hessian of NB1 model. """ if self._transparams: # lnalpha came in during fit alpha = np.exp(params[-1]) else: alpha = params[-1] params = params[:-1] exog = self.exog y = self.endog[:,None] mu = self.predict(params)[:,None] a1 = mu/alpha dgpart = digamma(y + a1) - digamma(a1) prob = 1 / (1 + alpha) # equiv: a1 / (a1 + mu) # for dl/dparams dparams dim = exog.shape[1] hess_arr = np.empty((dim+1,dim+1)) #const_arr = a1*mu*(a1+y)/(mu+a1)**2 # not all of dparams dparams = exog / alpha * (np.log(prob) + dgpart) dmudb = exog*mu xmu_alpha = exog * a1 trigamma = (special.polygamma(1, a1 + y) - special.polygamma(1, a1)) for i in range(dim): for j in range(dim): if j > i: continue hess_arr[i,j] = np.sum(dparams[:,i,None] * dmudb[:,j,None] + xmu_alpha[:,i,None] * xmu_alpha[:,j,None] * trigamma, axis=0) tri_idx = np.triu_indices(dim, k=1) hess_arr[tri_idx] = hess_arr.T[tri_idx] # for dl/dparams dalpha da1 = -alpha**-2 dldpda = np.sum(-a1 * dparams + exog * a1 * (-trigamma*mu/alpha**2 - prob), axis=0) hess_arr[-1,:-1] = dldpda hess_arr[:-1,-1] = dldpda log_alpha = np.log(prob) alpha3 = alpha**3 alpha2 = alpha**2 mu2 = mu**2 dada = ((alpha3*mu*(2*log_alpha + 2*dgpart + 3) - 2*alpha3*y + 4*alpha2*mu*(log_alpha + dgpart) + alpha2 * (2*mu - y) + 2*alpha*mu2*trigamma + mu2 * trigamma + alpha2 * mu2 * trigamma + 2*alpha*mu*(log_alpha + dgpart) )/(alpha**4*(alpha2 + 2*alpha + 1))) hess_arr[-1,-1] = dada.sum() return hess_arr def _hessian_nb2(self, params): """ Hessian of NB2 model. """ if self._transparams: # lnalpha came in during fit alpha = np.exp(params[-1]) else: alpha = params[-1] a1 = 1/alpha params = params[:-1] exog = self.exog y = self.endog[:,None] mu = self.predict(params)[:,None] prob = a1 / (a1 + mu) dgpart = digamma(a1 + y) - digamma(a1) # for dl/dparams dparams dim = exog.shape[1] hess_arr = np.empty((dim+1,dim+1)) const_arr = a1*mu*(a1+y)/(mu+a1)**2 for i in range(dim): for j in range(dim): if j > i: continue hess_arr[i,j] = np.sum(-exog[:,i,None] * exog[:,j,None] * const_arr, axis=0) tri_idx = np.triu_indices(dim, k=1) hess_arr[tri_idx] = hess_arr.T[tri_idx] # for dl/dparams dalpha da1 = -alpha**-2 dldpda = -np.sum(mu*exog*(y-mu)*a1**2/(mu+a1)**2 , axis=0) hess_arr[-1,:-1] = dldpda hess_arr[:-1,-1] = dldpda # for dl/dalpha dalpha #NOTE: polygamma(1,x) is the trigamma function da2 = 2*alpha**-3 dalpha = da1 * (dgpart + np.log(prob) - (y - mu)/(a1+mu)) dada = (da2 * dalpha/da1 + da1**2 * (special.polygamma(1, a1+y) - special.polygamma(1, a1) + 1/a1 - 1/(a1 + mu) + (y - mu)/(mu + a1)**2)).sum() hess_arr[-1,-1] = dada return hess_arr #TODO: replace this with analytic where is it used? def score_obs(self, params): sc = approx_fprime_cs(params, self.loglikeobs) return sc @Appender(_get_start_params_null_docs) def _get_start_params_null(self): offset = getattr(self, "offset", 0) exposure = getattr(self, "exposure", 0) const = (self.endog / np.exp(offset + exposure)).mean() params = [np.log(const)] mu = const * np.exp(offset + exposure) resid = self.endog - mu a = self._estimate_dispersion(mu, resid, df_resid=resid.shape[0] - 1) params.append(a) return np.array(params) def _estimate_dispersion(self, mu, resid, df_resid=None): if df_resid is None: df_resid = resid.shape[0] if self.loglike_method == 'nb2': #params.append(np.linalg.pinv(mu[:,None]).dot(resid**2 / mu - 1)) a = ((resid**2 / mu - 1) / mu).sum() / df_resid else: #self.loglike_method == 'nb1': a = (resid**2 / mu - 1).sum() / df_resid return a def fit(self, start_params=None, method='bfgs', maxiter=35, full_output=1, disp=1, callback=None, cov_type='nonrobust', cov_kwds=None, use_t=None, optim_kwds_prelim=None, **kwargs): # Note: do not let super handle robust covariance because it has # transformed params self._transparams = False # always define attribute if self.loglike_method.startswith('nb') and method not in ['newton', 'ncg']: self._transparams = True # in case same Model instance is refit elif self.loglike_method.startswith('nb'): # method is newton/ncg self._transparams = False # because we need to step in alpha space if start_params is None: # Use poisson fit as first guess. #TODO, Warning: this assumes exposure is logged offset = getattr(self, "offset", 0) + getattr(self, "exposure", 0) if np.size(offset) == 1 and offset == 0: offset = None kwds_prelim = {'disp': 0, 'skip_hessian': True, 'warn_convergence': False} if optim_kwds_prelim is not None: kwds_prelim.update(optim_kwds_prelim) mod_poi = Poisson(self.endog, self.exog, offset=offset) with warnings.catch_warnings(): warnings.simplefilter("always") res_poi = mod_poi.fit(**kwds_prelim) start_params = res_poi.params if self.loglike_method.startswith('nb'): a = self._estimate_dispersion(res_poi.predict(), res_poi.resid, df_resid=res_poi.df_resid) start_params = np.append(start_params, max(0.05, a)) else: if self._transparams is True: # transform user provided start_params dispersion, see #3918 start_params = np.array(start_params, copy=True) start_params[-1] = np.log(start_params[-1]) if callback is None: # work around perfect separation callback #3895 callback = lambda *x: x mlefit = super().fit(start_params=start_params, maxiter=maxiter, method=method, disp=disp, full_output=full_output, callback=callback, **kwargs) if optim_kwds_prelim is not None: mlefit.mle_settings["optim_kwds_prelim"] = optim_kwds_prelim # TODO: Fix NBin _check_perfect_pred if self.loglike_method.startswith('nb'): # mlefit is a wrapped counts results self._transparams = False # do not need to transform anymore now # change from lnalpha to alpha if method not in ["newton", "ncg"]: mlefit._results.params[-1] = np.exp(mlefit._results.params[-1]) nbinfit = NegativeBinomialResults(self, mlefit._results) result = NegativeBinomialResultsWrapper(nbinfit) else: result = mlefit if cov_kwds is None: cov_kwds = {} #TODO: make this unnecessary ? result._get_robustcov_results(cov_type=cov_type, use_self=True, use_t=use_t, **cov_kwds) return result def fit_regularized(self, start_params=None, method='l1', maxiter='defined_by_method', full_output=1, disp=1, callback=None, alpha=0, trim_mode='auto', auto_trim_tol=0.01, size_trim_tol=1e-4, qc_tol=0.03, **kwargs): _validate_l1_method(method) if self.loglike_method.startswith('nb') and (np.size(alpha) == 1 and alpha != 0): # do not penalize alpha if alpha is scalar k_params = self.exog.shape[1] + self.k_extra alpha = alpha * np.ones(k_params) alpha[-1] = 0 # alpha for regularized poisson to get starting values alpha_p = alpha[:-1] if (self.k_extra and np.size(alpha) > 1) else alpha self._transparams = False if start_params is None: # Use poisson fit as first guess. #TODO, Warning: this assumes exposure is logged offset = getattr(self, "offset", 0) + getattr(self, "exposure", 0) if np.size(offset) == 1 and offset == 0: offset = None mod_poi = Poisson(self.endog, self.exog, offset=offset) with warnings.catch_warnings(): warnings.simplefilter("always") start_params = mod_poi.fit_regularized( start_params=start_params, method=method, maxiter=maxiter, full_output=full_output, disp=0, callback=callback, alpha=alpha_p, trim_mode=trim_mode, auto_trim_tol=auto_trim_tol, size_trim_tol=size_trim_tol, qc_tol=qc_tol, **kwargs).params if self.loglike_method.startswith('nb'): start_params = np.append(start_params, 0.1) cntfit = super(CountModel, self).fit_regularized( start_params=start_params, method=method, maxiter=maxiter, full_output=full_output, disp=disp, callback=callback, alpha=alpha, trim_mode=trim_mode, auto_trim_tol=auto_trim_tol, size_trim_tol=size_trim_tol, qc_tol=qc_tol, **kwargs) discretefit = L1NegativeBinomialResults(self, cntfit) return L1NegativeBinomialResultsWrapper(discretefit) class NegativeBinomialP(CountModel): __doc__ = """ Generalized Negative Binomial (NB-P) Model %(params)s %(extra_params)s Attributes ---------- endog : ndarray A reference to the endogenous response variable exog : ndarray A reference to the exogenous design. p : scalar P denotes parameterizations for NB-P regression. p=1 for NB-1 and p=2 for NB-2. Default is p=1. """ % {'params': base._model_params_doc, 'extra_params': """p : scalar P denotes parameterizations for NB regression. p=1 for NB-1 and p=2 for NB-2. Default is p=2. offset : array_like Offset is added to the linear prediction with coefficient equal to 1. exposure : array_like Log(exposure) is added to the linear prediction with coefficient equal to 1. """ + base._missing_param_doc + _check_rank_doc} def __init__(self, endog, exog, p=2, offset=None, exposure=None, missing='none', check_rank=True, **kwargs): super().__init__(endog, exog, offset=offset, exposure=exposure, missing=missing, check_rank=check_rank, **kwargs) self.parameterization = p self.exog_names.append('alpha') self.k_extra = 1 self._transparams = False def _get_init_kwds(self): kwds = super()._get_init_kwds() kwds['p'] = self.parameterization return kwds def loglike(self, params): """ Loglikelihood of Generalized Negative Binomial (NB-P) model Parameters ---------- params : array_like The parameters of the model. Returns ------- loglike : float The log-likelihood function of the model evaluated at `params`. See notes. """ return np.sum(self.loglikeobs(params)) def loglikeobs(self, params): """ Loglikelihood for observations of Generalized Negative Binomial (NB-P) model Parameters ---------- params : array_like The parameters of the model. Returns ------- loglike : ndarray The log likelihood for each observation of the model evaluated at `params`. See Notes """ if self._transparams: alpha = np.exp(params[-1]) else: alpha = params[-1] params = params[:-1] p = self.parameterization y = self.endog mu = self.predict(params) mu_p = mu**(2 - p) a1 = mu_p / alpha a2 = mu + a1 llf = (gammaln(y + a1) - gammaln(y + 1) - gammaln(a1) + a1 * np.log(a1) + y * np.log(mu) - (y + a1) * np.log(a2)) return llf def score_obs(self, params): """ Generalized Negative Binomial (NB-P) model score (gradient) vector of the log-likelihood for each observations. Parameters ---------- params : array_like The parameters of the model Returns ------- score : ndarray, 1-D The score vector of the model, i.e. the first derivative of the loglikelihood function, evaluated at `params` """ if self._transparams: alpha = np.exp(params[-1]) else: alpha = params[-1] params = params[:-1] p = 2 - self.parameterization y = self.endog mu = self.predict(params) mu_p = mu**p a1 = mu_p / alpha a2 = mu + a1 a3 = y + a1 a4 = p * a1 / mu dgpart = digamma(a3) - digamma(a1) dgterm = dgpart + np.log(a1 / a2) + 1 - a3 / a2 # TODO: better name/interpretation for dgterm? dparams = (a4 * dgterm - a3 / a2 + y / mu) dparams = (self.exog.T * mu * dparams).T dalpha = -a1 / alpha * dgterm return np.concatenate((dparams, np.atleast_2d(dalpha).T), axis=1) def score(self, params): """ Generalized Negative Binomial (NB-P) model score (gradient) vector of the log-likelihood Parameters ---------- params : array_like The parameters of the model Returns ------- score : ndarray, 1-D The score vector of the model, i.e. the first derivative of the loglikelihood function, evaluated at `params` """ score = np.sum(self.score_obs(params), axis=0) if self._transparams: score[-1] == score[-1] ** 2 return score else: return score def hessian(self, params): """ Generalized Negative Binomial (NB-P) model hessian maxtrix of the log-likelihood Parameters ---------- params : array_like The parameters of the model Returns ------- hessian : ndarray, 2-D The hessian matrix of the model. """ if self._transparams: alpha = np.exp(params[-1]) else: alpha = params[-1] params = params[:-1] p = 2 - self.parameterization y = self.endog exog = self.exog mu = self.predict(params) mu_p = mu**p a1 = mu_p / alpha a2 = mu + a1 a3 = y + a1 a4 = p * a1 / mu prob = a1 / a2 lprob = np.log(prob) dgpart = digamma(a3) - digamma(a1) pgpart = polygamma(1, a3) - polygamma(1, a1) dim = exog.shape[1] hess_arr = np.zeros((dim + 1, dim + 1)) coeff = mu**2 * (((1 + a4)**2 * a3 / a2**2 - a3 / a2 * (p - 1) * a4 / mu - y / mu**2 - 2 * a4 * (1 + a4) / a2 + p * a4 / mu * (lprob + dgpart + 2) - a4 / mu * (lprob + dgpart + 1) + a4**2 * pgpart) + (-(1 + a4) * a3 / a2 + y / mu + a4 * (lprob + dgpart + 1)) / mu) for i in range(dim): hess_arr[i, :-1] = np.sum(self.exog[:, :].T * self.exog[:, i] * coeff, axis=1) hess_arr[-1,:-1] = (self.exog[:, :].T * mu * a1 * ((1 + a4) * (1 - a3 / a2) / a2 - p * (lprob + dgpart + 2) / mu + p / mu * (a3 + p * a1) / a2 - a4 * pgpart) / alpha).sum(axis=1) da2 = (a1 * (2 * lprob + 2 * dgpart + 3 - 2 * a3 / a2 + a1 * pgpart - 2 * prob + prob * a3 / a2) / alpha**2) hess_arr[-1, -1] = da2.sum() tri_idx = np.triu_indices(dim + 1, k=1) hess_arr[tri_idx] = hess_arr.T[tri_idx] return hess_arr @Appender(_get_start_params_null_docs) def _get_start_params_null(self): offset = getattr(self, "offset", 0) exposure = getattr(self, "exposure", 0) q = self.parameterization - 1 const = (self.endog / np.exp(offset + exposure)).mean() params = [np.log(const)] mu = const * np.exp(offset + exposure) resid = self.endog - mu a = self._estimate_dispersion(mu, resid, df_resid=resid.shape[0] - 1) params.append(a) return np.array(params) def _estimate_dispersion(self, mu, resid, df_resid=None): q = self.parameterization - 1 if df_resid is None: df_resid = resid.shape[0] a = ((resid**2 / mu - 1) * mu**(-q)).sum() / df_resid return a @Appender(DiscreteModel.fit.__doc__) def fit(self, start_params=None, method='bfgs', maxiter=35, full_output=1, disp=1, callback=None, use_transparams=False, cov_type='nonrobust', cov_kwds=None, use_t=None, optim_kwds_prelim=None, **kwargs): # TODO: Fix doc string """ use_transparams : bool This parameter enable internal transformation to impose non-negativity. True to enable. Default is False. use_transparams=True imposes the no underdispersion (alpha > 0) constraint. In case use_transparams=True and method="newton" or "ncg" transformation is ignored. """ if use_transparams and method not in ['newton', 'ncg']: self._transparams = True else: if use_transparams: warnings.warn('Parameter "use_transparams" is ignored', RuntimeWarning) self._transparams = False if start_params is None: offset = getattr(self, "offset", 0) + getattr(self, "exposure", 0) if np.size(offset) == 1 and offset == 0: offset = None kwds_prelim = {'disp': 0, 'skip_hessian': True, 'warn_convergence': False} if optim_kwds_prelim is not None: kwds_prelim.update(optim_kwds_prelim) mod_poi = Poisson(self.endog, self.exog, offset=offset) with warnings.catch_warnings(): warnings.simplefilter("always") res_poi = mod_poi.fit(**kwds_prelim) start_params = res_poi.params a = self._estimate_dispersion(res_poi.predict(), res_poi.resid, df_resid=res_poi.df_resid) start_params = np.append(start_params, max(0.05, a)) if callback is None: # work around perfect separation callback #3895 callback = lambda *x: x mlefit = super(NegativeBinomialP, self).fit(start_params=start_params, maxiter=maxiter, method=method, disp=disp, full_output=full_output, callback=callback, **kwargs) if optim_kwds_prelim is not None: mlefit.mle_settings["optim_kwds_prelim"] = optim_kwds_prelim if use_transparams and method not in ["newton", "ncg"]: self._transparams = False mlefit._results.params[-1] = np.exp(mlefit._results.params[-1]) nbinfit = NegativeBinomialResults(self, mlefit._results) result = NegativeBinomialResultsWrapper(nbinfit) if cov_kwds is None: cov_kwds = {} result._get_robustcov_results(cov_type=cov_type, use_self=True, use_t=use_t, **cov_kwds) return result @Appender(DiscreteModel.fit_regularized.__doc__) def fit_regularized(self, start_params=None, method='l1', maxiter='defined_by_method', full_output=1, disp=1, callback=None, alpha=0, trim_mode='auto', auto_trim_tol=0.01, size_trim_tol=1e-4, qc_tol=0.03, **kwargs): _validate_l1_method(method) if np.size(alpha) == 1 and alpha != 0: k_params = self.exog.shape[1] + self.k_extra alpha = alpha * np.ones(k_params) alpha[-1] = 0 alpha_p = alpha[:-1] if (self.k_extra and np.size(alpha) > 1) else alpha self._transparams = False if start_params is None: offset = getattr(self, "offset", 0) + getattr(self, "exposure", 0) if np.size(offset) == 1 and offset == 0: offset = None mod_poi = Poisson(self.endog, self.exog, offset=offset) with warnings.catch_warnings(): warnings.simplefilter("always") start_params = mod_poi.fit_regularized( start_params=start_params, method=method, maxiter=maxiter, full_output=full_output, disp=0, callback=callback, alpha=alpha_p, trim_mode=trim_mode, auto_trim_tol=auto_trim_tol, size_trim_tol=size_trim_tol, qc_tol=qc_tol, **kwargs).params start_params = np.append(start_params, 0.1) cntfit = super(CountModel, self).fit_regularized( start_params=start_params, method=method, maxiter=maxiter, full_output=full_output, disp=disp, callback=callback, alpha=alpha, trim_mode=trim_mode, auto_trim_tol=auto_trim_tol, size_trim_tol=size_trim_tol, qc_tol=qc_tol, **kwargs) discretefit = L1NegativeBinomialResults(self, cntfit) return L1NegativeBinomialResultsWrapper(discretefit) def predict(self, params, exog=None, exposure=None, offset=None, which='mean'): """ Predict response variable of a model given exogenous variables. Parameters ---------- params : array_like 2d array of fitted parameters of the model. Should be in the order returned from the model. exog : array_like, optional 1d or 2d array of exogenous values. If not supplied, the whole exog attribute of the model is used. If a 1d array is given it assumed to be 1 row of exogenous variables. If you only have one regressor and would like to do prediction, you must provide a 2d array with shape[1] == 1. linear : bool, optional If True, returns the linear predictor dot(exog,params). Else, returns the value of the cdf at the linear predictor. offset : array_like, optional Offset is added to the linear prediction with coefficient equal to 1. exposure : array_like, optional Log(exposure) is added to the linear prediction with coefficient equal to 1. which : 'mean', 'linear', 'prob', optional. 'mean' returns the exp of linear predictor exp(dot(exog,params)). 'linear' returns the linear predictor dot(exog,params). 'prob' return probabilities for counts from 0 to max(endog). Default is 'mean'. Notes ----- """ if exog is None: exog = self.exog if exposure is None: exposure = getattr(self, 'exposure', 0) elif exposure != 0: exposure = np.log(exposure) if offset is None: offset = getattr(self, 'offset', 0) fitted = np.dot(exog, params[:exog.shape[1]]) linpred = fitted + exposure + offset if which == 'mean': return np.exp(linpred) elif which == 'linear': return linpred elif which =='prob': counts = np.atleast_2d(np.arange(0, np.max(self.endog)+1)) mu = self.predict(params, exog, exposure, offset) size, prob = self.convert_params(params, mu) return nbinom.pmf(counts, size[:,None], prob[:,None]) else: raise ValueError('keyword "which" = %s not recognized' % which) def convert_params(self, params, mu): alpha = params[-1] p = 2 - self.parameterization size = 1. / alpha * mu**p prob = size / (size + mu) return (size, prob) ### Results Class ### class DiscreteResults(base.LikelihoodModelResults): __doc__ = _discrete_results_docs % {"one_line_description" : "A results class for the discrete dependent variable models.", "extra_attr" : ""} def __init__(self, model, mlefit, cov_type='nonrobust', cov_kwds=None, use_t=None): #super(DiscreteResults, self).__init__(model, params, # np.linalg.inv(-hessian), scale=1.) self.model = model self.df_model = model.df_model self.df_resid = model.df_resid self._cache = {} self.nobs = model.exog.shape[0] self.__dict__.update(mlefit.__dict__) if not hasattr(self, 'cov_type'): # do this only if super, i.e. mlefit did not already add cov_type # robust covariance if use_t is not None: self.use_t = use_t if cov_type == 'nonrobust': self.cov_type = 'nonrobust' self.cov_kwds = {'description' : 'Standard Errors assume that the ' + 'covariance matrix of the errors is correctly ' + 'specified.'} else: if cov_kwds is None: cov_kwds = {} from statsmodels.base.covtype import get_robustcov_results get_robustcov_results(self, cov_type=cov_type, use_self=True, **cov_kwds) def __getstate__(self): # remove unpicklable methods mle_settings = getattr(self, 'mle_settings', None) if mle_settings is not None: if 'callback' in mle_settings: mle_settings['callback'] = None if 'cov_params_func' in mle_settings: mle_settings['cov_params_func'] = None return self.__dict__ @cache_readonly def prsquared(self): """ McFadden's pseudo-R-squared. `1 - (llf / llnull)` """ return 1 - self.llf/self.llnull @cache_readonly def llr(self): """ Likelihood ratio chi-squared statistic; `-2*(llnull - llf)` """ return -2*(self.llnull - self.llf) @cache_readonly def llr_pvalue(self): """ The chi-squared probability of getting a log-likelihood ratio statistic greater than llr. llr has a chi-squared distribution with degrees of freedom `df_model`. """ return stats.distributions.chi2.sf(self.llr, self.df_model) def set_null_options(self, llnull=None, attach_results=True, **kwargs): """ Set the fit options for the Null (constant-only) model. This resets the cache for related attributes which is potentially fragile. This only sets the option, the null model is estimated when llnull is accessed, if llnull is not yet in cache. Parameters ---------- llnull : {None, float} If llnull is not None, then the value will be directly assigned to the cached attribute "llnull". attach_results : bool Sets an internal flag whether the results instance of the null model should be attached. By default without calling this method, thenull model results are not attached and only the loglikelihood value llnull is stored. **kwargs Additional keyword arguments used as fit keyword arguments for the null model. The override and model default values. Notes ----- Modifies attributes of this instance, and so has no return. """ # reset cache, note we need to add here anything that depends on # llnullor the null model. If something is missing, then the attribute # might be incorrect. self._cache.pop('llnull', None) self._cache.pop('llr', None) self._cache.pop('llr_pvalue', None) self._cache.pop('prsquared', None) if hasattr(self, 'res_null'): del self.res_null if llnull is not None: self._cache['llnull'] = llnull self._attach_nullmodel = attach_results self._optim_kwds_null = kwargs @cache_readonly def llnull(self): """ Value of the constant-only loglikelihood """ model = self.model kwds = model._get_init_kwds().copy() for key in getattr(model, '_null_drop_keys', []): del kwds[key] # TODO: what parameters to pass to fit? mod_null = model.__class__(model.endog, np.ones(self.nobs), **kwds) # TODO: consider catching and warning on convergence failure? # in the meantime, try hard to converge. see # TestPoissonConstrained1a.test_smoke optim_kwds = getattr(self, '_optim_kwds_null', {}).copy() if 'start_params' in optim_kwds: # user provided sp_null = optim_kwds.pop('start_params') elif hasattr(model, '_get_start_params_null'): # get moment estimates if available sp_null = model._get_start_params_null() else: sp_null = None opt_kwds = dict(method='bfgs', warn_convergence=False, maxiter=10000, disp=0) opt_kwds.update(optim_kwds) if optim_kwds: res_null = mod_null.fit(start_params=sp_null, **opt_kwds) else: # this should be a reasonably method case across versions res_null = mod_null.fit(start_params=sp_null, method='nm', warn_convergence=False, maxiter=10000, disp=0) res_null = mod_null.fit(start_params=res_null.params, method='bfgs', warn_convergence=False, maxiter=10000, disp=0) if getattr(self, '_attach_nullmodel', False) is not False: self.res_null = res_null return res_null.llf @cache_readonly def fittedvalues(self): """ Linear predictor XB. """ return np.dot(self.model.exog, self.params[:self.model.exog.shape[1]]) @cache_readonly def resid_response(self): """ Respnose residuals. The response residuals are defined as `endog - fittedvalues` """ return self.model.endog - self.predict() @cache_readonly def aic(self): """ Akaike information criterion. `-2*(llf - p)` where `p` is the number of regressors including the intercept. """ return -2*(self.llf - (self.df_model+1)) @cache_readonly def bic(self): """ Bayesian information criterion. `-2*llf + ln(nobs)*p` where `p` is the number of regressors including the intercept. """ return -2*self.llf + np.log(self.nobs)*(self.df_model+1) def _get_endog_name(self, yname, yname_list): if yname is None: yname = self.model.endog_names if yname_list is None: yname_list = self.model.endog_names return yname, yname_list def get_margeff(self, at='overall', method='dydx', atexog=None, dummy=False, count=False): """Get marginal effects of the fitted model. Parameters ---------- at : str, optional Options are: - 'overall', The average of the marginal effects at each observation. - 'mean', The marginal effects at the mean of each regressor. - 'median', The marginal effects at the median of each regressor. - 'zero', The marginal effects at zero for each regressor. - 'all', The marginal effects at each observation. If `at` is all only margeff will be available from the returned object. Note that if `exog` is specified, then marginal effects for all variables not specified by `exog` are calculated using the `at` option. method : str, optional Options are: - 'dydx' - dy/dx - No transformation is made and marginal effects are returned. This is the default. - 'eyex' - estimate elasticities of variables in `exog` -- d(lny)/d(lnx) - 'dyex' - estimate semi-elasticity -- dy/d(lnx) - 'eydx' - estimate semi-elasticity -- d(lny)/dx Note that tranformations are done after each observation is calculated. Semi-elasticities for binary variables are computed using the midpoint method. 'dyex' and 'eyex' do not make sense for discrete variables. For interpretations of these methods see notes below. atexog : array_like, optional Optionally, you can provide the exogenous variables over which to get the marginal effects. This should be a dictionary with the key as the zero-indexed column number and the value of the dictionary. Default is None for all independent variables less the constant. dummy : bool, optional If False, treats binary variables (if present) as continuous. This is the default. Else if True, treats binary variables as changing from 0 to 1. Note that any variable that is either 0 or 1 is treated as binary. Each binary variable is treated separately for now. count : bool, optional If False, treats count variables (if present) as continuous. This is the default. Else if True, the marginal effect is the change in probabilities when each observation is increased by one. Returns ------- DiscreteMargins : marginal effects instance Returns an object that holds the marginal effects, standard errors, confidence intervals, etc. See `statsmodels.discrete.discrete_margins.DiscreteMargins` for more information. Notes ----- Interpretations of methods: - 'dydx' - change in `endog` for a change in `exog`. - 'eyex' - proportional change in `endog` for a proportional change in `exog`. - 'dyex' - change in `endog` for a proportional change in `exog`. - 'eydx' - proportional change in `endog` for a change in `exog`. When using after Poisson, returns the expected number of events per period, assuming that the model is loglinear. """ from statsmodels.discrete.discrete_margins import DiscreteMargins return DiscreteMargins(self, (at, method, atexog, dummy, count)) def summary(self, yname=None, xname=None, title=None, alpha=.05, yname_list=None): """ Summarize the Regression Results. Parameters ---------- yname : str, optional The name of the endog variable in the tables. The default is `y`. xname : list[str], optional The names for the exogenous variables, default is "var_xx". Must match the number of parameters in the model. title : str, optional Title for the top table. If not None, then this replaces the default title. alpha : float The significance level for the confidence intervals. Returns ------- Summary Class that holds the summary tables and text, which can be printed or converted to various output formats. See Also -------- statsmodels.iolib.summary.Summary : Class that hold summary results. """ top_left = [('Dep. Variable:', None), ('Model:', [self.model.__class__.__name__]), ('Method:', ['MLE']), ('Date:', None), ('Time:', None), ('converged:', ["%s" % self.mle_retvals['converged']]), ] top_right = [('No. Observations:', None), ('Df Residuals:', None), ('Df Model:', None), ('Pseudo R-squ.:', ["%#6.4g" % self.prsquared]), ('Log-Likelihood:', None), ('LL-Null:', ["%#8.5g" % self.llnull]), ('LLR p-value:', ["%#6.4g" % self.llr_pvalue]) ] if hasattr(self, 'cov_type'): top_left.append(('Covariance Type:', [self.cov_type])) if title is None: title = self.model.__class__.__name__ + ' ' + "Regression Results" # boiler plate from statsmodels.iolib.summary import Summary smry = Summary() yname, yname_list = self._get_endog_name(yname, yname_list) # for top of table smry.add_table_2cols(self, gleft=top_left, gright=top_right, yname=yname, xname=xname, title=title) # for parameters, etc smry.add_table_params(self, yname=yname_list, xname=xname, alpha=alpha, use_t=self.use_t) if hasattr(self, 'constraints'): smry.add_extra_txt(['Model has been estimated subject to linear ' 'equality constraints.']) return smry def summary2(self, yname=None, xname=None, title=None, alpha=.05, float_format="%.4f"): """ Experimental function to summarize regression results. Parameters ---------- yname : str Name of the dependent variable (optional). xname : list[str], optional List of strings of length equal to the number of parameters Names of the independent variables (optional). title : str, optional Title for the top table. If not None, then this replaces the default title. alpha : float The significance level for the confidence intervals. float_format : str The print format for floats in parameters summary. Returns ------- Summary Instance that contains the summary tables and text, which can be printed or converted to various output formats. See Also -------- statsmodels.iolib.summary2.Summary : Class that holds summary results. """ from statsmodels.iolib import summary2 smry = summary2.Summary() smry.add_base(results=self, alpha=alpha, float_format=float_format, xname=xname, yname=yname, title=title) if hasattr(self, 'constraints'): smry.add_text('Model has been estimated subject to linear ' 'equality constraints.') return smry class CountResults(DiscreteResults): __doc__ = _discrete_results_docs % { "one_line_description": "A results class for count data", "extra_attr": ""} @cache_readonly def resid(self): """ Residuals Notes ----- The residuals for Count models are defined as .. math:: y - p where :math:`p = \\exp(X\\beta)`. Any exposure and offset variables are also handled. """ return self.model.endog - self.predict() class NegativeBinomialResults(CountResults): __doc__ = _discrete_results_docs % { "one_line_description": "A results class for NegativeBinomial 1 and 2", "extra_attr": ""} @cache_readonly def lnalpha(self): """Natural log of alpha""" return np.log(self.params[-1]) @cache_readonly def lnalpha_std_err(self): """Natural log of standardized error""" return self.bse[-1] / self.params[-1] @cache_readonly def aic(self): # + 1 because we estimate alpha k_extra = getattr(self.model, 'k_extra', 0) return -2*(self.llf - (self.df_model + self.k_constant + k_extra)) @cache_readonly def bic(self): # + 1 because we estimate alpha k_extra = getattr(self.model, 'k_extra', 0) return -2*self.llf + np.log(self.nobs)*(self.df_model + self.k_constant + k_extra) class GeneralizedPoissonResults(NegativeBinomialResults): __doc__ = _discrete_results_docs % { "one_line_description": "A results class for Generalized Poisson", "extra_attr": ""} @cache_readonly def _dispersion_factor(self): p = getattr(self.model, 'parameterization', 0) mu = self.predict() return (1 + self.params[-1] * mu**p)**2 class L1CountResults(DiscreteResults): __doc__ = _discrete_results_docs % {"one_line_description" : "A results class for count data fit by l1 regularization", "extra_attr" : _l1_results_attr} def __init__(self, model, cntfit): super(L1CountResults, self).__init__(model, cntfit) # self.trimmed is a boolean array with T/F telling whether or not that # entry in params has been set zero'd out. self.trimmed = cntfit.mle_retvals['trimmed'] self.nnz_params = (~self.trimmed).sum() # Set degrees of freedom. In doing so, # adjust for extra parameter in NegativeBinomial nb1 and nb2 # extra parameter is not included in df_model k_extra = getattr(self.model, 'k_extra', 0) self.df_model = self.nnz_params - 1 - k_extra self.df_resid = float(self.model.endog.shape[0] - self.nnz_params) + k_extra class PoissonResults(CountResults): def predict_prob(self, n=None, exog=None, exposure=None, offset=None, transform=True): """ Return predicted probability of each count level for each observation Parameters ---------- n : array_like or int The counts for which you want the probabilities. If n is None then the probabilities for each count from 0 to max(y) are given. Returns ------- ndarray A nobs x n array where len(`n`) columns are indexed by the count n. If n is None, then column 0 is the probability that each observation is 0, column 1 is the probability that each observation is 1, etc. """ if n is not None: counts = np.atleast_2d(n) else: counts = np.atleast_2d(np.arange(0, np.max(self.model.endog)+1)) mu = self.predict(exog=exog, exposure=exposure, offset=offset, transform=transform, linear=False)[:,None] # uses broadcasting return stats.poisson.pmf(counts, mu) @property def resid_pearson(self): """ Pearson residuals Notes ----- Pearson residuals are defined to be .. math:: r_j = \\frac{(y - M_jp_j)}{\\sqrt{M_jp_j(1-p_j)}} where :math:`p_j=cdf(X\\beta)` and :math:`M_j` is the total number of observations sharing the covariate pattern :math:`j`. For now :math:`M_j` is always set to 1. """ # Pearson residuals p = self.predict() # fittedvalues is still linear return (self.model.endog - p)/np.sqrt(p) class L1PoissonResults(L1CountResults, PoissonResults): pass class L1NegativeBinomialResults(L1CountResults, NegativeBinomialResults): pass class L1GeneralizedPoissonResults(L1CountResults, GeneralizedPoissonResults): pass class OrderedResults(DiscreteResults): __doc__ = _discrete_results_docs % {"one_line_description" : "A results class for ordered discrete data." , "extra_attr" : ""} pass class BinaryResults(DiscreteResults): __doc__ = _discrete_results_docs % {"one_line_description" : "A results class for binary data", "extra_attr" : ""} def pred_table(self, threshold=.5): """ Prediction table Parameters ---------- threshold : scalar Number between 0 and 1. Threshold above which a prediction is considered 1 and below which a prediction is considered 0. Notes ----- pred_table[i,j] refers to the number of times "i" was observed and the model predicted "j". Correct predictions are along the diagonal. """ model = self.model actual = model.endog pred = np.array(self.predict() > threshold, dtype=float) bins = np.array([0, 0.5, 1]) return np.histogram2d(actual, pred, bins=bins)[0] @Appender(DiscreteResults.summary.__doc__) def summary(self, yname=None, xname=None, title=None, alpha=.05, yname_list=None): smry = super(BinaryResults, self).summary(yname, xname, title, alpha, yname_list) fittedvalues = self.model.cdf(self.fittedvalues) absprederror = np.abs(self.model.endog - fittedvalues) predclose_sum = (absprederror < 1e-4).sum() predclose_frac = predclose_sum / len(fittedvalues) # add warnings/notes etext = [] if predclose_sum == len(fittedvalues): # TODO: nobs? wstr = "Complete Separation: The results show that there is" wstr += "complete separation.\n" wstr += "In this case the Maximum Likelihood Estimator does " wstr += "not exist and the parameters\n" wstr += "are not identified." etext.append(wstr) elif predclose_frac > 0.1: # TODO: get better diagnosis wstr = "Possibly complete quasi-separation: A fraction " wstr += "%4.2f of observations can be\n" % predclose_frac wstr += "perfectly predicted. This might indicate that there " wstr += "is complete\nquasi-separation. In this case some " wstr += "parameters will not be identified." etext.append(wstr) if etext: smry.add_extra_txt(etext) return smry @cache_readonly def resid_dev(self): """ Deviance residuals Notes ----- Deviance residuals are defined .. math:: d_j = \\pm\\left(2\\left[Y_j\\ln\\left(\\frac{Y_j}{M_jp_j}\\right) + (M_j - Y_j\\ln\\left(\\frac{M_j-Y_j}{M_j(1-p_j)} \\right) \\right] \\right)^{1/2} where :math:`p_j = cdf(X\\beta)` and :math:`M_j` is the total number of observations sharing the covariate pattern :math:`j`. For now :math:`M_j` is always set to 1. """ #These are the deviance residuals #model = self.model endog = self.model.endog #exog = model.exog # M = # of individuals that share a covariate pattern # so M[i] = 2 for i = two share a covariate pattern M = 1 p = self.predict() #Y_0 = np.where(exog == 0) #Y_M = np.where(exog == M) #NOTE: Common covariate patterns are not yet handled res = -(1-endog)*np.sqrt(2*M*np.abs(np.log(1-p))) + \ endog*np.sqrt(2*M*np.abs(np.log(p))) return res @cache_readonly def resid_pearson(self): """ Pearson residuals Notes ----- Pearson residuals are defined to be .. math:: r_j = \\frac{(y - M_jp_j)}{\\sqrt{M_jp_j(1-p_j)}} where :math:`p_j=cdf(X\\beta)` and :math:`M_j` is the total number of observations sharing the covariate pattern :math:`j`. For now :math:`M_j` is always set to 1. """ # Pearson residuals #model = self.model endog = self.model.endog #exog = model.exog # M = # of individuals that share a covariate pattern # so M[i] = 2 for i = two share a covariate pattern # use unique row pattern? M = 1 p = self.predict() return (endog - M*p)/np.sqrt(M*p*(1-p)) @cache_readonly def resid_response(self): """ The response residuals Notes ----- Response residuals are defined to be .. math:: y - p where :math:`p=cdf(X\\beta)`. """ return self.model.endog - self.predict() class LogitResults(BinaryResults): __doc__ = _discrete_results_docs % { "one_line_description": "A results class for Logit Model", "extra_attr": ""} @cache_readonly def resid_generalized(self): """ Generalized residuals Notes ----- The generalized residuals for the Logit model are defined .. math:: y - p where :math:`p=cdf(X\\beta)`. This is the same as the `resid_response` for the Logit model. """ # Generalized residuals return self.model.endog - self.predict() class ProbitResults(BinaryResults): __doc__ = _discrete_results_docs % { "one_line_description": "A results class for Probit Model", "extra_attr": ""} @cache_readonly def resid_generalized(self): """ Generalized residuals Notes ----- The generalized residuals for the Probit model are defined .. math:: y\\frac{\\phi(X\\beta)}{\\Phi(X\\beta)}-(1-y)\\frac{\\phi(X\\beta)}{1-\\Phi(X\\beta)} """ # generalized residuals model = self.model endog = model.endog XB = self.predict(linear=True) pdf = model.pdf(XB) cdf = model.cdf(XB) return endog * pdf/cdf - (1-endog)*pdf/(1-cdf) class L1BinaryResults(BinaryResults): __doc__ = _discrete_results_docs % {"one_line_description" : "Results instance for binary data fit by l1 regularization", "extra_attr" : _l1_results_attr} def __init__(self, model, bnryfit): super(L1BinaryResults, self).__init__(model, bnryfit) # self.trimmed is a boolean array with T/F telling whether or not that # entry in params has been set zero'd out. self.trimmed = bnryfit.mle_retvals['trimmed'] self.nnz_params = (~self.trimmed).sum() self.df_model = self.nnz_params - 1 self.df_resid = float(self.model.endog.shape[0] - self.nnz_params) class MultinomialResults(DiscreteResults): __doc__ = _discrete_results_docs % {"one_line_description" : "A results class for multinomial data", "extra_attr" : ""} def __init__(self, model, mlefit): super(MultinomialResults, self).__init__(model, mlefit) self.J = model.J self.K = model.K @staticmethod def _maybe_convert_ynames_int(ynames): # see if they're integers issue_warning = False msg = ('endog contains values are that not int-like. Uses string ' 'representation of value. Use integer-valued endog to ' 'suppress this warning.') for i in ynames: try: if ynames[i] % 1 == 0: ynames[i] = str(int(ynames[i])) else: issue_warning = True ynames[i] = str(ynames[i]) except TypeError: ynames[i] = str(ynames[i]) if issue_warning: import warnings warnings.warn(msg, SpecificationWarning) return ynames def _get_endog_name(self, yname, yname_list, all=False): """ If all is False, the first variable name is dropped """ model = self.model if yname is None: yname = model.endog_names if yname_list is None: ynames = model._ynames_map ynames = self._maybe_convert_ynames_int(ynames) # use range below to ensure sortedness ynames = [ynames[key] for key in range(int(model.J))] ynames = ['='.join([yname, name]) for name in ynames] if not all: yname_list = ynames[1:] # assumes first variable is dropped else: yname_list = ynames return yname, yname_list def pred_table(self): """ Returns the J x J prediction table. Notes ----- pred_table[i,j] refers to the number of times "i" was observed and the model predicted "j". Correct predictions are along the diagonal. """ ju = self.model.J - 1 # highest index # these are the actual, predicted indices #idx = lzip(self.model.endog, self.predict().argmax(1)) bins = np.concatenate(([0], np.linspace(0.5, ju - 0.5, ju), [ju])) return np.histogram2d(self.model.endog, self.predict().argmax(1), bins=bins)[0] @cache_readonly def bse(self): bse = np.sqrt(np.diag(self.cov_params())) return bse.reshape(self.params.shape, order='F') @cache_readonly def aic(self): return -2*(self.llf - (self.df_model+self.model.J-1)) @cache_readonly def bic(self): return -2*self.llf + np.log(self.nobs)*(self.df_model+self.model.J-1) def conf_int(self, alpha=.05, cols=None): confint = super(DiscreteResults, self).conf_int(alpha=alpha, cols=cols) return confint.transpose(2,0,1) def margeff(self): raise NotImplementedError("Use get_margeff instead") @cache_readonly def resid_misclassified(self): """ Residuals indicating which observations are misclassified. Notes ----- The residuals for the multinomial model are defined as .. math:: argmax(y_i) \\neq argmax(p_i) where :math:`argmax(y_i)` is the index of the category for the endogenous variable and :math:`argmax(p_i)` is the index of the predicted probabilities for each category. That is, the residual is a binary indicator that is 0 if the category with the highest predicted probability is the same as that of the observed variable and 1 otherwise. """ # it's 0 or 1 - 0 for correct prediction and 1 for a missed one return (self.model.wendog.argmax(1) != self.predict().argmax(1)).astype(float) def summary2(self, alpha=0.05, float_format="%.4f"): """Experimental function to summarize regression results Parameters ---------- alpha : float significance level for the confidence intervals float_format : str print format for floats in parameters summary Returns ------- smry : Summary instance this holds the summary tables and text, which can be printed or converted to various output formats. See Also -------- statsmodels.iolib.summary2.Summary : class to hold summary results """ from statsmodels.iolib import summary2 smry = summary2.Summary() smry.add_dict(summary2.summary_model(self)) # One data frame per value of endog eqn = self.params.shape[1] confint = self.conf_int(alpha) for i in range(eqn): coefs = summary2.summary_params((self, self.params[:, i], self.bse[:, i], self.tvalues[:, i], self.pvalues[:, i], confint[i]), alpha=alpha) # Header must show value of endog level_str = self.model.endog_names + ' = ' + str(i) coefs[level_str] = coefs.index coefs = coefs.iloc[:, [-1, 0, 1, 2, 3, 4, 5]] smry.add_df(coefs, index=False, header=True, float_format=float_format) smry.add_title(results=self) return smry class L1MultinomialResults(MultinomialResults): __doc__ = _discrete_results_docs % {"one_line_description" : "A results class for multinomial data fit by l1 regularization", "extra_attr" : _l1_results_attr} def __init__(self, model, mlefit): super(L1MultinomialResults, self).__init__(model, mlefit) # self.trimmed is a boolean array with T/F telling whether or not that # entry in params has been set zero'd out. self.trimmed = mlefit.mle_retvals['trimmed'] self.nnz_params = (~self.trimmed).sum() # Note: J-1 constants self.df_model = self.nnz_params - (self.model.J - 1) self.df_resid = float(self.model.endog.shape[0] - self.nnz_params) #### Results Wrappers #### class OrderedResultsWrapper(lm.RegressionResultsWrapper): pass wrap.populate_wrapper(OrderedResultsWrapper, OrderedResults) class CountResultsWrapper(lm.RegressionResultsWrapper): pass wrap.populate_wrapper(CountResultsWrapper, CountResults) class NegativeBinomialResultsWrapper(lm.RegressionResultsWrapper): pass wrap.populate_wrapper(NegativeBinomialResultsWrapper, NegativeBinomialResults) class GeneralizedPoissonResultsWrapper(lm.RegressionResultsWrapper): pass wrap.populate_wrapper(GeneralizedPoissonResultsWrapper, GeneralizedPoissonResults) class PoissonResultsWrapper(lm.RegressionResultsWrapper): pass wrap.populate_wrapper(PoissonResultsWrapper, PoissonResults) class L1CountResultsWrapper(lm.RegressionResultsWrapper): pass class L1PoissonResultsWrapper(lm.RegressionResultsWrapper): pass wrap.populate_wrapper(L1PoissonResultsWrapper, L1PoissonResults) class L1NegativeBinomialResultsWrapper(lm.RegressionResultsWrapper): pass wrap.populate_wrapper(L1NegativeBinomialResultsWrapper, L1NegativeBinomialResults) class L1GeneralizedPoissonResultsWrapper(lm.RegressionResultsWrapper): pass wrap.populate_wrapper(L1GeneralizedPoissonResultsWrapper, L1GeneralizedPoissonResults) class BinaryResultsWrapper(lm.RegressionResultsWrapper): _attrs = {"resid_dev": "rows", "resid_generalized": "rows", "resid_pearson": "rows", "resid_response": "rows" } _wrap_attrs = wrap.union_dicts(lm.RegressionResultsWrapper._wrap_attrs, _attrs) wrap.populate_wrapper(BinaryResultsWrapper, BinaryResults) class L1BinaryResultsWrapper(lm.RegressionResultsWrapper): pass wrap.populate_wrapper(L1BinaryResultsWrapper, L1BinaryResults) class MultinomialResultsWrapper(lm.RegressionResultsWrapper): _attrs = {"resid_misclassified": "rows"} _wrap_attrs = wrap.union_dicts(lm.RegressionResultsWrapper._wrap_attrs, _attrs) _methods = {'conf_int': 'multivariate_confint'} _wrap_methods = wrap.union_dicts(lm.RegressionResultsWrapper._wrap_methods, _methods) wrap.populate_wrapper(MultinomialResultsWrapper, MultinomialResults) class L1MultinomialResultsWrapper(lm.RegressionResultsWrapper): pass wrap.populate_wrapper(L1MultinomialResultsWrapper, L1MultinomialResults)
76,293
443
<reponame>oauth-io/oauthd<gh_stars>100-1000 { "settings": { "createApp": { "url": "https://developers.store.qip.ru/applications/new", "image": "config.png" }, "copyingKey": { "url": "https://developers.store.qip.ru/applications" }, "install": { "href": { "provider": "https://qip.ru/", "docs": "https://store.qip.ru/docs/OpenApi.credentials" } } } }
192
521
<reponame>wk8/elle #include <elle/das/cli.hh> #include <numeric> #include <boost/range/numeric.hpp> // boost::accumulate #include <elle/Duration.hh> #include <elle/Exception.hh> #include <elle/printf.hh> #include <elle/test.hh> using namespace std::literals; ELLE_LOG_COMPONENT("tests.das.cli"); ELLE_DAS_SYMBOL(foo); ELLE_DAS_SYMBOL(bar); ELLE_DAS_SYMBOL(baz); ELLE_DAS_SYMBOL(quux); #define CHECK_THROW(Expression, Exception, Message) \ do { \ BOOST_CHECK_THROW(Expression, Exception); \ try \ { \ Expression; \ } \ catch (Exception const& e) \ { \ BOOST_TEST(elle::sprintf("%s", e) == Message); \ } \ } while (false) static void basics() { auto const f = [] (std::string const& a, std::string const& b) { return a + b; }; auto const proto = elle::das::named::prototype(foo, baz); namespace cli = elle::das::cli; BOOST_TEST(cli::call(proto, f, {"--foo", "bar", "--baz", "quux"}) == "barquux"); BOOST_TEST(cli::call(proto, f, { "--baz", "quux", "--foo", "bar"}) == "barquux"); BOOST_TEST(cli::call(proto, f, {"--foo=bar", "--baz", "quux"}) == "barquux"); BOOST_TEST(cli::call(proto, f, {"--foo=bar", "--baz=--quux"}) == "bar--quux"); BOOST_TEST(cli::call(proto, f, {"--foo=", "--baz="}) == ""); CHECK_THROW( cli::call(proto, f, {"--foo", "foo", "--baz"}), cli::ValuelessOption, "option requires an argument: --baz"); CHECK_THROW( cli::call(proto, f, {"--foo", "bar", "--baz", "x", "--bar", "quux"}), cli::UnknownOption, "unknown option: --bar"); CHECK_THROW( cli::call(proto, f, {"--foo", "bar", "garbage", "--baz", "quux"}), cli::UnrecognizedValue, "extra unrecognized argument: garbage"); CHECK_THROW( cli::call(proto, f, {"--foo", "bar"}), cli::MissingOption, "missing option: --baz"); // Regression: at some point our algorithm could first look for // --baz and its argument, leaving --foo and foo together. We don't // want to support that. CHECK_THROW( cli::call(proto, f, {"--foo", "--baz", "baz", "foo"}), cli::ValuelessOption, "option requires an argument: --foo"); CHECK_THROW( cli::call(proto, f, {"--foo", "bar", "--baz", "quux", "--foo", "foo"}), cli::DuplicateOption, "duplicate option: --foo"); } namespace conversions { template <typename I> void check(std::string const& too_little, std::string const& too_big) { using elle::das::cli::call; ELLE_LOG("check %s: %s and %s", elle::type_info<I>(), too_little, too_big); auto constexpr max = std::numeric_limits<I>::max(); auto constexpr min = std::numeric_limits<I>::min(); auto const proto = elle::das::named::prototype(foo); auto const f = [] (I i) { return i; }; BOOST_TEST(call(proto, f, {"--foo", std::to_string(max)}) == max); BOOST_TEST(call(proto, f, {"--foo", std::to_string(min)}) == min); CHECK_THROW(call(proto, f, {"--foo", too_big}), elle::das::cli::ValueError, "invalid value \"" + too_big + "\" for option --foo: integer out of range"); CHECK_THROW(call(proto, f, {"--foo", too_little}), elle::das::cli::ValueError, "invalid value \"" + too_little + "\" for option --foo: integer out of range"); } template <typename I> void check() { static_assert(std::is_integral<I>::value, ""); using Min = int64_t; auto constexpr min = Min{std::numeric_limits<I>::min()}; using Max = std::conditional_t<std::is_signed<I>::value, int64_t, uint64_t>; auto constexpr max = Max{std::numeric_limits<I>::max()}; check<I>(std::to_string(min - 1), std::to_string(max + 1)); } static void integers() { check<int8_t>(); check<int16_t>(); check<int32_t>(); check<int64_t>("-9223372036854775809", "9223372036854775808"); check<uint8_t>(); check<uint16_t>(); check<uint32_t>(); check<uint64_t>("-1", "18446744073709551616"); { auto const proto = elle::das::named::prototype(foo); { auto const f = [] (int i) {return i;}; CHECK_THROW( elle::das::cli::call(proto, f, {"--foo", "lol"}), elle::das::cli::ValueError, "invalid value \"lol\" for option --foo: invalid integer"); } // Verify we check for trailing garbage. { auto const f = [] (unsigned i) {return i;}; CHECK_THROW( elle::das::cli::call( proto, f, {"--foo", "007 james bond"}), elle::das::cli::ValueError, "invalid value \"007 james bond\" for option --foo: invalid integer"); } { auto const f = [] (signed i) {return i;}; CHECK_THROW( elle::das::cli::call( proto, f, {"--foo", "-666numberofthebeast"}), elle::das::cli::ValueError, "invalid value \"-666numberofthebeast\" for option --foo: invalid integer"); } } } static void multiple_strings() { using elle::das::cli::call; auto const f = [] (std::vector<std::string> const& strings) { return boost::accumulate( strings, std::string(""), [] (std::string const& a, std::string const& b) { return a + "-" + b; }); }; auto const proto = elle::das::named::prototype(foo); BOOST_TEST(call(proto, f, {}) == ""); BOOST_TEST(call(proto, f, {"--foo", "foo"}) == "-foo"); BOOST_TEST(call(proto, f, {"--foo", "foo", "--foo", "bar"}) == "-foo-bar"); } static void boolean() { using elle::das::cli::call; { auto const f = [] (int expected, bool enabled) { BOOST_TEST(bool(expected) == enabled); }; auto const named = elle::das::named::function(f, foo, bar = false); call(named, {"--foo", "0"}); call(named, {"--foo", "1", "--bar"}); call(named, {"--bar", "--foo", "1"}); call(named, {"--foo", "1", "--bar", "true"}); call(named, {"--foo", "0", "--bar", "false"}); } { auto const f = [] (bool b) { BOOST_CHECK(!b); }; call(elle::das::named::function(f, foo), {}); } { auto const f = [] (bool foo, int bar) { return elle::sprintf("%s %s", foo, bar); }; auto const named = elle::das::named::function(f, foo = false, bar = 20); BOOST_TEST(call(named, {}) == "false 20"); BOOST_TEST(call(named, {"--foo", "true", "--bar", "33"}) == "true 33"); BOOST_TEST(call(named, {"--bar", "33"}) == "false 33"); BOOST_TEST(call(named, {"--foo", "--bar", "33"}) == "true 33"); CHECK_THROW(call(named, {"--foo", "--bar", "33", "--BAD"}), elle::das::cli::UnknownOption, "unknown option: --BAD"); // Because of the way we parse the options and their argument, // we may extract "--bar 33" first, leaving "--foo BAD1 BAD2", // which results in a complaint about BAD1 not being valid for // --foo, instead of being an extra argument. // // FIXME: Of course this pairing of --foo with BAD1 is a // problem. try { call(named, {"--foo", "--bar", "33", "BAD1", "BAD2"}); BOOST_FAIL("did not raise an exception"); } catch (elle::das::cli::UnrecognizedValue const& e) { BOOST_TEST(elle::sprintf("%s", e) == "extra unrecognized argument: BAD1"); } catch (elle::das::cli::OptionValueError const& e) { BOOST_TEST(elle::sprintf("%s", e) == "invalid value \"BAD1\" for option --foo: invalid boolean"); } catch (...) { BOOST_FAIL("unexpected exception: " + elle::exception_string()); } } } static void multiple_integers() { using elle::das::cli::call; auto const f = [] (std::vector<int> ints) { return ints; }; auto const proto = elle::das::named::prototype(foo); BOOST_TEST(call(proto, f, {}) == std::vector<int>{}); BOOST_TEST(call(proto, f, {"--foo", "2"}) == std::vector<int>{2}); BOOST_TEST(call(proto, f, {"--foo", "2", "--foo", "3"}) == std::vector<int>({2, 3})); } static void durations() { using elle::das::cli::call; auto const f = [] (elle::Duration d) { return d; }; auto const proto = elle::das::named::prototype(foo); BOOST_TEST_MESSAGE(call(proto, f, {"--foo", "10ms"})); BOOST_TEST(call(proto, f, {"--foo", "10ms"}) == 10ms); BOOST_TEST(call(proto, f, {"--foo", "123min"}) == 123min); BOOST_TEST(call(proto, f, {"--foo", "10h"}) == 10h); } } static void defaults() { using elle::das::cli::call; auto const f = [] (std::string const& foo, int baz) { return foo + std::to_string(baz); }; auto const proto = elle::das::named::prototype(foo = "16", baz = 42); BOOST_TEST(call(proto, f, {}) == "1642"); BOOST_TEST(call(proto, f, {"--baz", "64"}) == "1664"); BOOST_TEST(call(proto, f, {"--foo", "51"}) == "5142"); BOOST_TEST(call(proto, f, {"--baz", "23", "--foo", "01"}) == "0123"); } static void defaulted() { using elle::das::cli::call; auto const f = elle::das::named::function( [] (elle::Defaulted<bool> b) { return std::make_pair(bool(b), b.get()); }, foo = elle::defaulted(false)); BOOST_TEST(call(f, {}) == std::make_pair(false, false)); BOOST_TEST(call(f, {"--foo"}) == std::make_pair(true, true)); } static void flag() { auto const f = [] (std::string const& foo, int bar) { return foo + std::to_string(bar); }; auto const proto = elle::das::named::prototype(foo, bar); // Beware that the order of parsing options and of conversion of // their argument values in undefined. So be sure to have a single // type of error, to avoid error messages that differ depending on // the platform. namespace cli = elle::das::cli; BOOST_CHECK_THROW(cli::call(proto, f, {"--foo", "--bar", "12"}), cli::ValuelessOption); BOOST_CHECK_THROW(cli::call(proto, f, {"--foo", "foo", "--bar"}), cli::ValuelessOption); BOOST_CHECK_THROW(cli::call(proto, f, {"--foo", "foo", "--bar", "bar"}), cli::OptionValueError); } ELLE_DAS_SYMBOL(composite_option); static void dash() { auto const f = [] (int foo) { return foo; }; auto const proto = elle::das::named::prototype(composite_option = 0); { BOOST_CHECK_EQUAL( elle::das::cli::call(proto, f, {"--composite-option", "193"}), 193); CHECK_THROW( elle::das::cli::call(proto, f, {"--composite_option", "193"}), elle::das::cli::UnknownOption, "unknown option: --composite_option"); } } namespace short_options_ct { ELLE_DAS_CLI_SYMBOL(foo, 'f', ""); ELLE_DAS_CLI_SYMBOL(bar, 'b', ""); static void compile_time() { using elle::das::cli::call; auto const f = [] (int foo, int bar) { return foo + bar; }; auto const proto = elle::das::named::prototype(foo, bar); BOOST_CHECK_EQUAL(call(proto, f, {"--foo", "1", "-b", "2"}), 3); BOOST_CHECK_EQUAL(call(proto, f, {"-f", "3", "--bar", "4"}), 7); BOOST_CHECK_EQUAL(call(proto, f, {"-f", "5", "-b", "6"}), 11); } } namespace short_options_rt { static void run_time() { using elle::das::cli::call; auto const f = [] (int foo, int bar) { return foo + bar; }; auto const proto = elle::das::named::prototype(foo, bar); { elle::das::cli::Options opts = { {"foo", {'f', ""}}, {"bar", {'b', ""}}, }; BOOST_CHECK_EQUAL(call(proto, f, {"--foo", "1", "-b", "2"}, opts), 3); BOOST_CHECK_EQUAL(call(proto, f, {"-f", "3", "--bar", "4"}, opts), 7); BOOST_CHECK_EQUAL(call(proto, f, {"-f", "5", "-b", "6"}, opts), 11); } } } namespace positional_ct { ELLE_DAS_CLI_SYMBOL(foo, ""); ELLE_DAS_CLI_SYMBOL(bar, 'b', "", true); ELLE_DAS_CLI_SYMBOL(quux, 'q', "", true); static void compile_time() { using elle::das::cli::call; auto const f = [] (int foo, int bar) { return foo + bar; }; { auto const proto = elle::das::named::prototype(foo, bar); BOOST_TEST(call(proto, f, {"--foo", "1", "-b", "2"}) == 3); BOOST_TEST(call(proto, f, {"--foo", "3", "4"}) == 7); BOOST_TEST(call(proto, f, {"6", "--foo", "5"}) == 11); } { auto const proto = elle::das::named::prototype(foo = 1, bar = 2); BOOST_TEST(call(proto, f, {"247"}) == 248); } { auto const f = [] (std::vector<int> const& ints) { return boost::accumulate(ints, 0); }; auto const proto = elle::das::named::prototype(bar); BOOST_TEST(call(proto, f, {"1", "2", "3"}) == 6); CHECK_THROW(call(proto, f, {"-b", "1", "2", "3"}), elle::das::cli::UnrecognizedValue, "extra unrecognized argument: 2"); } { auto const f = elle::das::named::function( [] (int b, int q) { return b - q; }, bar, quux); BOOST_TEST(call(f, {"-b", "1", "-q", "2"}) == -1); // Not handled yet. // BOOST_CHECK_EQUAL(call(f, {"1", "2"}), -1); } } } namespace positional_rt { static void run_time() { using elle::das::cli::call; auto const f = [] (int foo, int bar) { return foo + bar; }; elle::das::cli::Options opts = { {"foo", {'f', "", false}}, {"bar", {'b', "", true}}, }; { auto const proto = elle::das::named::prototype(foo, bar); BOOST_CHECK_EQUAL(call(proto, f, {"-f", "1", "-b", "2"}, opts), 3); BOOST_CHECK_EQUAL(call(proto, f, {"-f", "3", "4"}, opts), 7); BOOST_CHECK_EQUAL(call(proto, f, {"6", "-f", "5"}, opts), 11); } { auto const proto = elle::das::named::prototype(foo = 1, bar = 2); BOOST_CHECK_EQUAL(call(proto, f, {"247"}, opts), 248); } { auto const f = [] (std::vector<int> const& ints) { return boost::accumulate(ints, 0); }; auto const proto = elle::das::named::prototype(bar); BOOST_CHECK_EQUAL(call(proto, f, {"1", "2", "3"}, opts), 6); CHECK_THROW(call(proto, f, {"-b", "1", "2", "3"}, opts), elle::das::cli::UnrecognizedValue, "extra unrecognized argument: 2"); } } } static void serialization() { using elle::das::cli::call; auto const f = [] (elle::Version v) { return v; }; auto const proto = elle::das::named::prototype(foo); BOOST_CHECK_EQUAL(call(proto, f, {"--foo", "0.1.2"}), elle::Version(0, 1, 2)); } static void help() { auto const help = [](auto&&... args) { return elle::sprintf("%s", elle::das::cli::help( elle::das::named::prototype( std::forward<decltype(args)>(args)...))); }; BOOST_TEST(help(foo) == " --foo arg \n"); BOOST_TEST(help(foo = 42) == " --foo arg (default: 42)\n"); BOOST_TEST(help(foo = false) == " --foo \n"); BOOST_TEST(help(foo = true) == " --foo \n"); } ELLE_TEST_SUITE() { auto& master = boost::unit_test::framework::master_test_suite(); master.add(BOOST_TEST_CASE(basics)); { auto conversions = BOOST_TEST_SUITE("conversions"); master.add(conversions); using namespace conversions; conversions->add(BOOST_TEST_CASE(integers)); conversions->add(BOOST_TEST_CASE(boolean)); conversions->add(BOOST_TEST_CASE(multiple_strings)); conversions->add(BOOST_TEST_CASE(multiple_integers)); conversions->add(BOOST_TEST_CASE(durations)); } master.add(BOOST_TEST_CASE(defaults)); master.add(BOOST_TEST_CASE(defaulted)); master.add(BOOST_TEST_CASE(flag)); { auto suite = BOOST_TEST_SUITE("short_options"); master.add(suite); using namespace short_options_ct; using namespace short_options_rt; suite->add(BOOST_TEST_CASE(compile_time)); suite->add(BOOST_TEST_CASE(run_time)); } master.add(BOOST_TEST_CASE(dash)); { auto suite = BOOST_TEST_SUITE("positional"); master.add(suite); using namespace positional_ct; using namespace positional_rt; suite->add(BOOST_TEST_CASE(compile_time)); suite->add(BOOST_TEST_CASE(run_time)); } master.add(BOOST_TEST_CASE(serialization)); master.add(BOOST_TEST_CASE(help)); }
8,206
5,169
{ "name": "APLIntroView", "version": "0.4.0", "summary": "This piece of information is pretty invalid", "description": "* APLIntroView uses a single ViewController to extend the launch screen by displaying the launch image \n* or showing a video with configurable options.", "homepage": "https://github.com/apploft/APLIntroView", "swift_versions": "5.0", "license": { "type": "MIT", "file": "LICENSE" }, "authors": "<NAME>", "platforms": { "ios": "10.0" }, "source": { "git": "https://github.com/apploft/APLIntroView.git", "tag": "0.4.0" }, "source_files": [ "APLIntroView", "APLIntroView/**/*.{swift,h}" ], "exclude_files": "APLIntroView/Exclude", "frameworks": "UIKit", "requires_arc": true, "dependencies": { "APLVideoPlayerView": [ "~> 0.0.6" ] } }
348
3,799
/* * Copyright 2019 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package androidx.camera.core.internal; import androidx.annotation.NonNull; import androidx.annotation.Nullable; import androidx.annotation.RequiresApi; import androidx.camera.core.impl.ReadableConfig; import java.util.concurrent.Executor; /** * Configuration containing IO related options. */ @RequiresApi(21) // TODO(b/200306659): Remove and replace with annotation on package-info.java public interface IoConfig extends ReadableConfig { // Option Declarations: // ********************************************************************************************* /** * Option: camerax.core.io.ioExecutor */ Option<Executor> OPTION_IO_EXECUTOR = Option.create("camerax.core.io.ioExecutor", Executor.class); // ********************************************************************************************* /** * Returns the executor that will be used for IO tasks. * * @param valueIfMissing The value to return if this configuration option has not been set. * @return The stored value or <code>valueIfMissing</code> if the value does not exist in this * configuration. */ @Nullable default Executor getIoExecutor(@Nullable Executor valueIfMissing) { return retrieveOption(OPTION_IO_EXECUTOR, valueIfMissing); } /** * Returns the executor that will be used for IO tasks. * * @return The stored value, if it exists in this configuration. * @throws IllegalArgumentException if the option does not exist in this configuration. */ @NonNull default Executor getIoExecutor() { return retrieveOption(OPTION_IO_EXECUTOR); } /** * Builder for a {@link IoConfig}. * * @param <B> The top level builder type for which this builder is composed with. */ interface Builder<B> { /** * Sets the default executor that will be used for IO tasks. * * @param executor The executor which will be used for IO tasks. * @return the current Builder. */ @NonNull B setIoExecutor(@NonNull Executor executor); } }
877
412
/******************************************************************************* * Copyright (c) 2008, 2011 Sonatype Inc. and others. * All rights reserved. This program and the accompanying materials * are made available under the terms of the Eclipse Public License v1.0 * which accompanies this distribution, and is available at * http://www.eclipse.org/legal/epl-v10.html * * Contributors: * Sonatype Inc. - initial API and implementation *******************************************************************************/ package com.simpligility.maven.plugins.android.common; import org.apache.maven.artifact.Artifact; import org.apache.maven.artifact.repository.ArtifactRepository; import org.apache.maven.artifact.resolver.ArtifactResolutionRequest; import org.apache.maven.artifact.resolver.ArtifactResolutionResult; import org.apache.maven.artifact.resolver.ArtifactResolver; import org.apache.maven.plugin.MojoExecutionException; import org.codehaus.plexus.logging.Logger; import java.io.File; import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.LinkedHashSet; import java.util.List; import java.util.Set; /** * Provides convenient functions for resolving artifacts. */ public final class ArtifactResolverHelper { /** * Which dependency scopes should be excluded when packing dependencies into the apk. */ public static final List<String> EXCLUDE_NON_PACKAGED_SCOPES = Arrays.asList( Artifact.SCOPE_PROVIDED, Artifact.SCOPE_IMPORT ); private final ArtifactResolver artifactResolver; private final Logger log; private final List<ArtifactRepository> remoteArtifactRepositories; /** * Creates an ArtifactResolver that has no remote repositories to resolve against. */ public ArtifactResolverHelper( ArtifactResolver artifactResolver, Logger log ) { this( artifactResolver, log, Collections.<ArtifactRepository>emptyList() ); } public ArtifactResolverHelper( ArtifactResolver artifactResolver, Logger log, final List<ArtifactRepository> remoteArtifactRepositories ) { this.artifactResolver = artifactResolver; this.log = log; this.remoteArtifactRepositories = remoteArtifactRepositories; } /** * Filters provided artifacts and selects only defined types based on {@code types} argument * or all types if {@code types} argument is empty. * * @param allArtifacts artifacts to be filtered * @param types artifact types to be selected * @return a {@code List} of all project dependencies. Never {@code null}. * This excludes artifacts of the {@code EXCLUDED_DEPENDENCY_SCOPES} scopes. * And this should maintain dependency order to comply with library project resource precedence. */ public Set<Artifact> getFilteredArtifacts( Iterable<Artifact> allArtifacts, String... types ) { return getFilteredArtifacts( EXCLUDE_NON_PACKAGED_SCOPES, allArtifacts, types ); } /** * Filters provided artifacts and selects only defined types based on {@code types} argument * or all types if {@code types} argument is empty. * * @param filteredScopes scopes to be filtered * @param allArtifacts artifacts to be filtered * @param types artifact types to be selected * @return a {@code List} of all project dependencies. Never {@code null}. * This should maintain dependency order to comply with library project resource precedence. */ public Set<Artifact> getFilteredArtifacts( List<String> filteredScopes, Iterable<Artifact> allArtifacts, String... types ) { final List<String> acceptTypeList = Arrays.asList( types ); boolean acceptAllArtifacts = acceptTypeList.isEmpty(); final Set<Artifact> results = new LinkedHashSet<Artifact>(); for ( Artifact artifact : allArtifacts ) { if ( artifact == null ) { continue; } if ( filteredScopes.contains( artifact.getScope() ) ) { continue; } if ( acceptAllArtifacts || acceptTypeList.contains( artifact.getType() ) ) { results.add( artifact ); } } return results; } /** * Attempts to resolve an {@link org.apache.maven.artifact.Artifact} to a {@link java.io.File}. * * @param artifact to resolve * @return a {@link java.io.File} to the resolved artifact, never <code>null</code>. * @throws org.apache.maven.plugin.MojoExecutionException if the artifact could not be resolved. */ public File resolveArtifactToFile( Artifact artifact ) throws MojoExecutionException { final Artifact resolvedArtifact = resolveArtifact( artifact ); final File jar = resolvedArtifact.getFile(); if ( jar == null ) { throw new MojoExecutionException( "Could not resolve artifact " + artifact.getId() + ". Please install it with \"mvn install:install-file ...\" or deploy it to a repository " + "with \"mvn deploy:deploy-file ...\"" ); } return jar; } public Set<Artifact> resolveArtifacts( Collection<Artifact> artifacts ) throws MojoExecutionException { final Set<Artifact> resolvedArtifacts = new LinkedHashSet<Artifact>(); for ( final Artifact artifact : artifacts ) { resolvedArtifacts.add( resolveArtifact( artifact ) ); } return resolvedArtifacts; } /** * Resolves an artifact to a particular repository. * * @param artifact Artifact to resolve * @return fully resolved artifact. */ private Artifact resolveArtifact( Artifact artifact ) throws MojoExecutionException { final ArtifactResolutionRequest artifactResolutionRequest = new ArtifactResolutionRequest(); artifactResolutionRequest.setArtifact( artifact ); if ( remoteArtifactRepositories != null && !remoteArtifactRepositories.isEmpty() ) { artifactResolutionRequest.setRemoteRepositories( remoteArtifactRepositories ); } final ArtifactResolutionResult resolutionResult = this.artifactResolver.resolve( artifactResolutionRequest ); log.debug( "Resolving : " + artifact ); if ( resolutionResult.getArtifacts().size() == 0 ) { throw new MojoExecutionException( "Could not resolve artifact " + artifact + ". Please install it with \"mvn install:install-file ...\" or deploy it to a repository " + "with \"mvn deploy:deploy-file ...\"" ); } if ( resolutionResult.getArtifacts().size() > 1 ) { log.debug( "Resolved artifacts : " + resolutionResult.getArtifacts() ); throw new MojoExecutionException( "Could not resolve artifact " + artifact + " to single target. Found the following possible options : " + resolutionResult.getArtifacts() ); } final Artifact resolvedArtifact = resolutionResult.getArtifacts().iterator().next(); log.debug( "Resolved : " + resolvedArtifact ); return resolvedArtifact; } }
2,641
550
/* * Copyright 2016 Google Inc. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.google.android.apps.forscience.ble; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; import android.content.Intent; import android.content.IntentFilter; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; import org.robolectric.RobolectricTestRunner; /** Tests for {@link BleEvents} */ @RunWith(RobolectricTestRunner.class) public class BleEventsTest { private IntentFilter intentFilter; private final String TEST_ACTION = BleEvents.GATT_CONNECT; private final String FILTER_ADDRESS = "AA:BB:CC:DD:EE:FF"; @Before public void setUp() { intentFilter = BleEvents.createIntentFilter(FILTER_ADDRESS); } @Test public void testIntentFilterMismatchAddress() { Intent mismatchAddressIntent = BleEvents.createIntent(TEST_ACTION, "FF:EE:DD:CC:BB:AA"); assertTrue(intentFilter.hasAction(mismatchAddressIntent.getAction())); assertFalse(intentFilter.hasDataAuthority(mismatchAddressIntent.getData())); } @Test public void testIntentFilterMismatchAction() { Intent mismatchActionIntent = BleEvents.createIntent("UNKNOWN", FILTER_ADDRESS); assertFalse(intentFilter.hasAction(mismatchActionIntent.getAction())); assertTrue(intentFilter.hasDataAuthority(mismatchActionIntent.getData())); } @Test public void testIntentFilterMatchAll() { Intent matchIntent = BleEvents.createIntent(TEST_ACTION, FILTER_ADDRESS); assertTrue(intentFilter.hasAction(matchIntent.getAction())); assertTrue(intentFilter.hasDataAuthority(matchIntent.getData())); } }
697
1,150
package io.confluent.developer; import io.micronaut.configuration.kafka.annotation.KafkaListener; import io.micronaut.configuration.kafka.annotation.OffsetReset; import jakarta.inject.Inject; import org.apache.kafka.clients.consumer.ConsumerRecord; import io.micronaut.configuration.kafka.annotation.Topic; import io.confluent.developer.avro.Pizza; @KafkaListener(offsetReset = OffsetReset.EARLIEST) public class PizzaConsumer { final String completedPizzaTopic = "pizza-with-veggies"; @Inject private PizzaService pizzaService; @Topic(completedPizzaTopic) public void receive(ConsumerRecord<String, Pizza> record) { Pizza pizza = record.value(); String orderId = record.key(); pizzaService.addToOrder(orderId, pizza); } }
274
1,718
<reponame>popcornell/audio<filename>examples/source_separation/utils/dataset/utils.py from collections import namedtuple from functools import partial from typing import List import torch from torchaudio.datasets import LibriMix from . import wsj0mix Batch = namedtuple("Batch", ["mix", "src", "mask"]) def get_dataset(dataset_type, root_dir, num_speakers, sample_rate, task=None, librimix_tr_split=None): if dataset_type == "wsj0mix": train = wsj0mix.WSJ0Mix(root_dir / "tr", num_speakers, sample_rate) validation = wsj0mix.WSJ0Mix(root_dir / "cv", num_speakers, sample_rate) evaluation = wsj0mix.WSJ0Mix(root_dir / "tt", num_speakers, sample_rate) elif dataset_type == "librimix": train = LibriMix(root_dir, librimix_tr_split, num_speakers, sample_rate, task) validation = LibriMix(root_dir, "dev", num_speakers, sample_rate, task) evaluation = LibriMix(root_dir, "test", num_speakers, sample_rate, task) else: raise ValueError(f"Unexpected dataset: {dataset_type}") return train, validation, evaluation def _fix_num_frames(sample: wsj0mix.SampleType, target_num_frames: int, sample_rate: int, random_start=False): """Ensure waveform has exact number of frames by slicing or padding""" mix = sample[1] # [1, time] src = torch.cat(sample[2], 0) # [num_sources, time] num_channels, num_frames = src.shape num_seconds = torch.div(num_frames, sample_rate, rounding_mode="floor") target_seconds = torch.div(target_num_frames, sample_rate, rounding_mode="floor") if num_frames >= target_num_frames: if random_start and num_frames > target_num_frames: start_frame = torch.randint(num_seconds - target_seconds + 1, [1]) * sample_rate mix = mix[:, start_frame:] src = src[:, start_frame:] mix = mix[:, :target_num_frames] src = src[:, :target_num_frames] mask = torch.ones_like(mix) else: num_padding = target_num_frames - num_frames pad = torch.zeros([1, num_padding], dtype=mix.dtype, device=mix.device) mix = torch.cat([mix, pad], 1) src = torch.cat([src, pad.expand(num_channels, -1)], 1) mask = torch.ones_like(mix) mask[..., num_frames:] = 0 return mix, src, mask def collate_fn_wsj0mix_train(samples: List[wsj0mix.SampleType], sample_rate, duration): target_num_frames = int(duration * sample_rate) mixes, srcs, masks = [], [], [] for sample in samples: mix, src, mask = _fix_num_frames(sample, target_num_frames, sample_rate, random_start=True) mixes.append(mix) srcs.append(src) masks.append(mask) return Batch(torch.stack(mixes, 0), torch.stack(srcs, 0), torch.stack(masks, 0)) def collate_fn_wsj0mix_test(samples: List[wsj0mix.SampleType], sample_rate): max_num_frames = max(s[1].shape[-1] for s in samples) mixes, srcs, masks = [], [], [] for sample in samples: mix, src, mask = _fix_num_frames(sample, max_num_frames, sample_rate, random_start=False) mixes.append(mix) srcs.append(src) masks.append(mask) return Batch(torch.stack(mixes, 0), torch.stack(srcs, 0), torch.stack(masks, 0)) def get_collate_fn(dataset_type, mode, sample_rate=None, duration=4): assert mode in ["train", "test"] if dataset_type in ["wsj0mix", "librimix"]: if mode == "train": if sample_rate is None: raise ValueError("sample_rate is not given.") return partial(collate_fn_wsj0mix_train, sample_rate=sample_rate, duration=duration) return partial(collate_fn_wsj0mix_test, sample_rate=sample_rate) raise ValueError(f"Unexpected dataset: {dataset_type}")
1,547
990
''' Students are asked to stand in non-decreasing order of heights for an annual photo. Return the minimum number of students not standing in the right positions. (This is the number of students that must move in order for all students to be standing in non-decreasing order of height.) Example 1: Input: [1,1,4,2,1,3] Output: 3 Explanation: Students with heights 4, 3 and the last 1 are not standing in the right positions. Note: 1 <= heights.length <= 100 1 <= heights[i] <= 100 ''' class Solution(object): def heightChecker(self, heights): """ :type heights: List[int] :rtype: int """ result = 0 for new_h, hei in zip(heights, sorted(heights)): if new_h != hei: result += 1 return result
301
372
<filename>lsass/server/api/svc_register.c #include "api.h" static DWORD RpcSvcCreateDomainSocketPath( PCSTR pszPath ); static DWORD RpcSvcCreateDirectory( PSTR pszDirName, mode_t DirMode ); static DWORD RpcSvcInitServerBinding( rpc_binding_vector_p_t *ppSrvBinding, PENDPOINT pEndPoints ); static DWORD RpcSvcCreateDomainSocketPath( PCSTR pszPath ) { const mode_t PathMode = 0655; const mode_t DirMode = 0755; DWORD dwError = 0; PSTR pszSocketPath = NULL; PSTR pszSocketName = NULL; PSTR pszDirName = NULL; dwError = LwAllocateString(pszPath, &pszSocketPath); BAIL_ON_LSA_ERROR(dwError); pszSocketName = strrchr(pszSocketPath, '/'); if (!pszSocketName) { dwError = LW_ERROR_INVALID_PARAMETER; BAIL_ON_LSA_ERROR(dwError); } *(pszSocketName++) = '\0'; pszDirName = pszSocketPath; dwError = RpcSvcCreateDirectory(pszDirName, PathMode); BAIL_ON_LSA_ERROR(dwError); if (chmod(pszDirName, DirMode)) { dwError = LwMapErrnoToLwError(errno); BAIL_ON_LSA_ERROR(dwError); } cleanup: if (pszSocketPath) { LW_SAFE_FREE_STRING(pszSocketPath); } return dwError; error: goto cleanup; } static DWORD RpcSvcCreateDirectory( PSTR pszDirPath, mode_t DirMode ) { DWORD dwError = 0; struct stat statbuf; PSTR pszSlash = NULL; for (pszSlash = strchr(pszDirPath, '/'); pszSlash != NULL; pszSlash = strchr(pszSlash + 1, '/')) { if (pszSlash == pszDirPath) { continue; } *pszSlash = '\0'; if (stat(pszDirPath, &statbuf) == 0) { /* Make sure it's a directory */ if (!S_ISDIR(statbuf.st_mode)) { dwError = ERROR_FILE_NOT_FOUND; BAIL_ON_LSA_ERROR(dwError); } } else { /* Create it */ if (mkdir(pszDirPath, DirMode)) { dwError = LwMapErrnoToLwError(errno); BAIL_ON_LSA_ERROR(dwError); } } *pszSlash = '/'; } if (stat(pszDirPath, &statbuf) == 0) { /* Make sure its a directory */ if (!S_ISDIR(statbuf.st_mode)) { dwError = ERROR_FILE_NOT_FOUND; BAIL_ON_LSA_ERROR(dwError); } } else { /* Create it */ if (mkdir(pszDirPath, DirMode)) { dwError = LwMapErrnoToLwError(errno); BAIL_ON_LSA_ERROR(dwError); } } error: if (pszSlash) { *pszSlash = '/'; } return dwError; } DWORD RpcSvcRegisterRpcInterface( rpc_if_handle_t SrvInterface ) { DWORD dwError = 0; unsigned32 rpcstatus = rpc_s_ok; DCETHREAD_TRY { rpc_server_register_if(SrvInterface, NULL, NULL, &rpcstatus); } DCETHREAD_CATCH_ALL(THIS_CATCH) { if (!rpcstatus) { rpcstatus = dcethread_exc_getstatus(THIS_CATCH); } if (!rpcstatus) { dwError = LW_ERROR_RPC_SERVER_REGISTRATION_ERROR; } } DCETHREAD_ENDTRY; BAIL_ON_DCERPC_ERROR(rpcstatus); BAIL_ON_LSA_ERROR(dwError); cleanup: return dwError; error: goto cleanup; } DWORD RpcSvcBindRpcInterface( rpc_binding_vector_p_t *ppSrvBinding, rpc_if_handle_t SrvInterface, PENDPOINT pEndPoints, PCSTR pszSrvDescription ) { DWORD dwError = 0; unsigned32 rpcstatus = rpc_s_ok; DCETHREAD_TRY { dwError = RpcSvcInitServerBinding(ppSrvBinding, pEndPoints); } DCETHREAD_CATCH_ALL(THIS_CATCH) { if (!dwError) { rpcstatus = dcethread_exc_getstatus(THIS_CATCH); } if (!rpcstatus) { dwError = LW_ERROR_RPC_SERVER_REGISTRATION_ERROR; } } DCETHREAD_ENDTRY; BAIL_ON_DCERPC_ERROR(rpcstatus); BAIL_ON_LSA_ERROR(dwError); DCETHREAD_TRY { rpc_ep_register(SrvInterface, *ppSrvBinding, NULL, (unsigned char*)pszSrvDescription, &rpcstatus); } DCETHREAD_CATCH_ALL(THIS_CATCH) { if (!dwError) { rpcstatus = dcethread_exc_getstatus(THIS_CATCH); } if (!rpcstatus) { dwError = LW_ERROR_RPC_SERVER_REGISTRATION_ERROR; } } DCETHREAD_ENDTRY; BAIL_ON_DCERPC_ERROR(rpcstatus); BAIL_ON_LSA_ERROR(dwError); cleanup: return dwError; error: goto cleanup; } static DWORD RpcSvcInitServerBinding( rpc_binding_vector_p_t *ppSrvBinding, PENDPOINT pEndPoints ) { DWORD dwError = 0; DWORD rpcstatus = rpc_s_ok; DWORD i = 0; BOOLEAN bIsLocalInterface = FALSE; for (i = 0; pEndPoints[i].pszProtocol != NULL; i++) { bIsLocalInterface = (!strcmp(pEndPoints[i].pszProtocol, "ncalrpc")) && (pEndPoints[i].pszEndpoint[0] == '/'); if (!pEndPoints[i].pszEndpoint) { rpc_server_use_protseq((unsigned char*) pEndPoints[i].pszProtocol, rpc_c_protseq_max_calls_default, (unsigned32*)&rpcstatus); BAIL_ON_DCERPC_ERROR(rpcstatus); } else { if (bIsLocalInterface) { dwError = RpcSvcCreateDomainSocketPath(pEndPoints[i].pszEndpoint); BAIL_ON_LSA_ERROR(dwError); } rpc_server_use_protseq_ep((unsigned char*)pEndPoints[i].pszProtocol, rpc_c_protseq_max_calls_default, (unsigned char*)pEndPoints[i].pszEndpoint, (unsigned32*)&rpcstatus); BAIL_ON_DCERPC_ERROR(rpcstatus); } } rpc_server_inq_bindings(ppSrvBinding, (unsigned32*)&rpcstatus); BAIL_ON_DCERPC_ERROR(rpcstatus); error: return dwError; } /* local variables: mode: c c-basic-offset: 4 indent-tabs-mode: nil tab-width: 4 end: */
3,496
407
package com.alibaba.smart.framework.engine.extendsion.parser.engine; import java.util.Map; import javax.xml.namespace.QName; import com.alibaba.smart.framework.engine.common.util.MapUtil; import com.alibaba.smart.framework.engine.constant.ExtensionElementsConstant; import com.alibaba.smart.framework.engine.model.assembly.ExtensionDecorator; import com.alibaba.smart.framework.engine.model.assembly.ExtensionElements; import lombok.Data; /** * @author zilong.jiangzl * @create 2020-07-16 9:42 下午 * please use @com.alibaba.smart.framework.engine.smart.Property, Example: com.alibaba.smart.framework.engine.test.cases.extensions.CompositePropertiesTest */ @Data @Deprecated public class ProcessField implements ExtensionDecorator { static String PROCESS_NS ="http://test.com/process"; private static final long serialVersionUID = -5129848456612155165L; public final static QName qtype = new QName(PROCESS_NS, "field"); private String name; private String value; private String valueType; @Override public String getDecoratorType() { return ExtensionElementsConstant.PROPERTIES; } @Override public void decorate(ExtensionElements extensionElements) { Map map = (Map)extensionElements.getDecorationMap().get(getDecoratorType()); if (null == map) { map = MapUtil.newHashMap(); extensionElements.getDecorationMap().put(this.getDecoratorType(), map); } map.put(this.getName(), this.getValue()); } }
541
1,056
<gh_stars>1000+ /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.netbeans.modules.java.hints.spiimpl; import java.io.File; import javax.swing.text.Document; import org.netbeans.api.java.lexer.JavaTokenId; import org.netbeans.api.java.source.CompilationInfo; import org.netbeans.api.java.source.JavaSource; import org.netbeans.api.java.source.JavaSource.Phase; import org.netbeans.api.java.source.SourceUtilsTestUtil; import org.netbeans.api.java.source.SourceUtilsTestUtil2; import org.netbeans.api.java.source.TestUtilities; import org.netbeans.api.lexer.Language; import org.netbeans.junit.NbTestCase; import org.openide.cookies.EditorCookie; import org.openide.filesystems.FileObject; import org.openide.filesystems.FileUtil; import org.openide.loaders.DataObject; /** * * @author <NAME> */ public class TestBase extends NbTestCase { public TestBase(String name) { super(name); } @Override protected void setUp() throws Exception { super.setUp(); SourceUtilsTestUtil.prepareTest(new String[0], new Object[0]); SourceUtilsTestUtil2.disableConfinementTest(); clearWorkDir(); FileUtil.refreshFor(File.listRoots()); } private int workDirPart = 0; protected void prepareTest(String fileName, String code) throws Exception { prepareTest(fileName, code, null); } protected void prepareTest(String fileName, String code, String sourceLevel) throws Exception { FileObject workFO = FileUtil.createFolder(new File(getWorkDir(), String.valueOf(workDirPart++))); assertNotNull(workFO); workFO.refresh(); sourceRoot = workFO.createFolder("src"); FileObject buildRoot = workFO.createFolder("build"); FileObject cache = workFO.createFolder("cache"); FileObject data = FileUtil.createData(sourceRoot, fileName); if (sourceLevel != null) SourceUtilsTestUtil.setSourceLevel(data, sourceLevel); File dataFile = FileUtil.toFile(data); assertNotNull(dataFile); TestUtilities.copyStringToFile(dataFile, code); SourceUtilsTestUtil.prepareTest(sourceRoot, buildRoot, cache); DataObject od = DataObject.find(data); EditorCookie ec = od.getLookup().lookup(EditorCookie.class); assertNotNull(ec); doc = ec.openDocument(); doc.putProperty(Language.class, JavaTokenId.language()); JavaSource js = JavaSource.forFileObject(data); assertNotNull(js); info = SourceUtilsTestUtil.getCompilationInfo(js, Phase.RESOLVED); assertNotNull(info); } protected FileObject sourceRoot; protected CompilationInfo info; protected Document doc; }
1,225
412
#include <assert.h> // this is a gcc extension to allow various interfaces #ifdef __GNUC__ typedef unsigned int pid_t; union wait { int whatnot; }; typedef union { int *__ip; union wait *__up; } wait_status_ptr_t __attribute__ ((__transparent_union__)); pid_t wait(wait_status_ptr_t); int w1 () { int w; return wait(&w); } int w2 () { union wait w; return wait(&w); } pid_t wait(wait_status_ptr_t p) { assert(p.__ip!=0); } // alternative syntax union U { int *p; char *q; } __attribute__((transparent_union)); void f(union U u) { } int main() { int *p; char *q; f(p); f(q); } #else int main() { } #endif
278
673
<gh_stars>100-1000 package com.tv.ui.metro.idata; import android.content.ContentProvider; import android.content.ContentUris; import android.content.ContentValues; import android.content.Context; import android.database.Cursor; import android.database.sqlite.SQLiteDatabase; import android.database.sqlite.SQLiteOpenHelper; import android.database.sqlite.SQLiteQueryBuilder; import android.net.Uri; import android.text.TextUtils; /** * local database save */ public class iDataProvider extends ContentProvider { private static final String TAG = "iDataProvider"; public static final String DATABASE_NAME = "idata.db"; public static final int DATABASE_VERSION = 1; public static final String AUTHORITY = iDataORM.AUTHORITY; public static SQLiteOpenHelper mOpenHelper; private static final String TABLE_SETTINGS = "settings"; private static final String TABLE_Favor = "favor"; public static final String _ID = "_id"; public static final String NAME = "name"; public static final String VALUE = "value"; @Override public boolean onCreate() { mOpenHelper = new DatabaseHelper(getContext()); return true; } private static class DatabaseHelper extends SQLiteOpenHelper { public DatabaseHelper(Context context) { super(context, DATABASE_NAME, null, DATABASE_VERSION); } public DatabaseHelper(Context context, String name, SQLiteDatabase.CursorFactory factory, int version) { super(context, name, factory, version); } @Override public void onCreate(SQLiteDatabase db) { try { db.execSQL("CREATE TABLE " + TABLE_SETTINGS + " (" + " _id INTEGER PRIMARY KEY AUTOINCREMENT," + " name TEXT," + " value TEXT," + " application TEXT," + " date_time TEXT);"); db.execSQL("CREATE TABLE " + TABLE_Favor + " (" + " _id INTEGER PRIMARY KEY AUTOINCREMENT," + " res_id TEXT, " + " ns TEXT," + " value TEXT," + " date_time TEXT);"); }catch (Exception ne){} } private void dropTables(SQLiteDatabase db) { try { db.execSQL("DROP TABLE IF EXISTS " + TABLE_SETTINGS); db.execSQL("DROP TABLE IF EXISTS " + TABLE_Favor); }catch (Exception ne){} } @Override public void onUpgrade(SQLiteDatabase db, int oldVersion, int newVersion) { } @Override public void onDowngrade(SQLiteDatabase db, int oldVersion, int newVersion) { try { try { db.execSQL("DROP TABLE IF EXISTS " + TABLE_SETTINGS); db.execSQL("DROP TABLE IF EXISTS " + TABLE_Favor); }catch (Exception ne){} onCreate(db); } catch (Exception e) { dropTables(db); onCreate(db); } } } @Override public Cursor query(Uri uri, String[] projection, final String selection, final String[] selectionArgs, String sortOrder) { final SqlArguments args = new SqlArguments(uri, selection, selectionArgs); SQLiteQueryBuilder qb = new SQLiteQueryBuilder(); qb.setTables(args.table); SQLiteDatabase db = mOpenHelper.getReadableDatabase(); Cursor result = qb.query(db, projection, args.where, args.args, args.groupby, null, sortOrder); return result; } @Override public int update(Uri uri, ContentValues values, String selection, String[] selectionArgs) { int count = 0; SqlArguments args = new SqlArguments(uri, selection, selectionArgs); //always update local database SQLiteDatabase db = mOpenHelper.getWritableDatabase(); count = db.update(args.table, values, args.where, args.args); if (count > 0) { getContext().getContentResolver().notifyChange(uri, null); } return count; } @Override public int delete(Uri uri, String selection, String[] selectionArgs) { SqlArguments args = new SqlArguments(uri, selection, selectionArgs); SQLiteDatabase db = mOpenHelper.getWritableDatabase(); int count = db.delete(args.table, args.where, args.args); if (count > 0) { getContext().getContentResolver().notifyChange(uri, null); } return count; } @Override public String getType(Uri uri) { SqlArguments args = new SqlArguments(uri, null, null); if (TextUtils.isEmpty(args.where)) { return "vnd.android.cursor.dir/" + args.table; } else { return "vnd.android.cursor.item/" + args.table; } } @Override public Uri insert(Uri uri, ContentValues values) { SqlArguments args = new SqlArguments(uri); SQLiteDatabase db = mOpenHelper.getWritableDatabase(); final long rowId = db.insert(args.table, null, values); if (rowId <= 0) return null; else { getContext().getContentResolver().notifyChange(uri, null); } uri = ContentUris.withAppendedId(uri, rowId); return uri; } static class SqlArguments { public final String table; public final String where; public final String[] args; public String groupby = null; SqlArguments(Uri url, String where, String[] args) { if (url.getPathSegments().size() == 1) { this.table = url.getPathSegments().get(0); this.where = where; this.args = args; } else if (url.getPathSegments().size() != 2) { throw new IllegalArgumentException("Invalid URI: " + url); } else if (!TextUtils.isEmpty(where)) { throw new UnsupportedOperationException( "WHERE clause not supported: " + url); } else { this.table = url.getPathSegments().get(0); this.where = "_id=" + ContentUris.parseId(url); this.args = null; } } SqlArguments(Uri url) { if (url.getPathSegments().size() == 1) { table = url.getPathSegments().get(0); where = null; args = null; } else { throw new IllegalArgumentException("Invalid URI: " + url); } } } }
3,175
569
/* * Copyright (C) 2018-2021 <NAME> <<EMAIL>> * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package am.ik.yavi.constraint; import java.math.BigDecimal; import java.math.BigInteger; import java.net.MalformedURLException; import java.net.URL; import java.nio.charset.Charset; import java.nio.charset.StandardCharsets; import java.text.Normalizer; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.Set; import java.util.function.Function; import java.util.function.ToIntFunction; import java.util.regex.Pattern; import am.ik.yavi.constraint.base.ContainerConstraintBase; import am.ik.yavi.constraint.charsequence.ByteSizeConstraint; import am.ik.yavi.constraint.charsequence.CodePoints; import am.ik.yavi.constraint.charsequence.CodePoints.CodePointsRanges; import am.ik.yavi.constraint.charsequence.CodePoints.CodePointsSet; import am.ik.yavi.constraint.charsequence.CodePoints.Range; import am.ik.yavi.constraint.charsequence.CodePointsConstraint; import am.ik.yavi.constraint.charsequence.EmojiConstraint; import am.ik.yavi.constraint.charsequence.variant.VariantOptions; import am.ik.yavi.constraint.inetaddress.InetAddressUtils; import am.ik.yavi.constraint.password.CharSequencePasswordPoliciesBuilder; import am.ik.yavi.core.ConstraintPredicate; import am.ik.yavi.core.ViolationMessage; import static am.ik.yavi.core.NullAs.INVALID; import static am.ik.yavi.core.NullAs.VALID; import static am.ik.yavi.core.ViolationMessage.Default.*; public class CharSequenceConstraint<T, E extends CharSequence> extends ContainerConstraintBase<T, E, CharSequenceConstraint<T, E>> { private static final String EMAIL_PART = "[^\\x00-\\x1F()<>@,;:\\\\\".\\[\\]\\s]"; private static final String DOMAIN_PATTERN = EMAIL_PART + "+(\\." + EMAIL_PART + "+)*"; private static final Pattern VALID_EMAIL_ADDRESS_REGEX = Pattern .compile( "^" + EMAIL_PART + "+(\\." + EMAIL_PART + "+)*@(" + DOMAIN_PATTERN + "|" + InetAddressUtils.IPV4_REGEX + ")$", Pattern.CASE_INSENSITIVE); private static final Pattern VALID_UUID_REGEX = Pattern .compile("\\p{XDigit}{8}(-\\p{XDigit}{4}){4}\\p{XDigit}{8}"); protected final Normalizer.Form normalizerForm; protected final VariantOptions variantOptions; public CharSequenceConstraint() { this(Normalizer.Form.NFC, VariantOptions.builder().build()); } public CharSequenceConstraint(Normalizer.Form normalizerForm, VariantOptions variantOptions) { this.normalizerForm = normalizerForm; this.variantOptions = variantOptions; } public ByteSizeConstraint<T, E> asByteArray(Charset charset) { return new ByteSizeConstraint<>(this, charset); } public ByteSizeConstraint<T, E> asByteArray() { return this.asByteArray(StandardCharsets.UTF_8); } @Override public CharSequenceConstraint<T, E> cast() { return this; } public CodePointsConstraint.Builder<T, E> codePoints(CodePoints<E> codePoints) { return new CodePointsConstraint.Builder<>(this, codePoints); } public CodePointsConstraint.Builder<T, E> codePoints(Set<Integer> allowedCodePoints) { return this.codePoints((CodePointsSet<E>) () -> allowedCodePoints); } public CodePointsConstraint.Builder<T, E> codePoints(int begin, int end) { return this.codePoints(Range.of(begin, end)); } public CodePointsConstraint.Builder<T, E> codePoints(Range range, Range... ranges) { return this.codePoints((CodePointsRanges<E>) () -> { List<Range> list = new ArrayList<>(); list.add(range); list.addAll(Arrays.asList(ranges)); return list; }); } public CharSequenceConstraint<T, E> contains(CharSequence s) { this.predicates().add(ConstraintPredicate.of(x -> x.toString().contains(s), CHAR_SEQUENCE_CONTAINS, () -> new Object[] { s }, VALID)); return this; } /** * Does the given value start with the {@code prefix} * @param prefix the prefix the value has to start with * @since 0.10.0 */ public CharSequenceConstraint<T, E> startsWith(CharSequence prefix) { this.predicates() .add(ConstraintPredicate.of( x -> x.toString().startsWith(prefix.toString()), CHAR_SEQUENCE_STARTSWITH, () -> new Object[] { prefix }, VALID)); return this; } /** * Does the given value end with the {@code suffix} * @param suffix the suffix the value has to end with * @since 0.10.0 */ public CharSequenceConstraint<T, E> endsWith(CharSequence suffix) { this.predicates() .add(ConstraintPredicate.of(x -> x.toString().endsWith(suffix.toString()), CHAR_SEQUENCE_ENDSWITH, () -> new Object[] { suffix }, VALID)); return this; } public CharSequenceConstraint<T, E> email() { this.predicates().add(ConstraintPredicate.of(x -> { if (size().applyAsInt(x) == 0) { return true; } return VALID_EMAIL_ADDRESS_REGEX.matcher(x).matches(); }, CHAR_SEQUENCE_EMAIL, () -> new Object[] {}, VALID)); return this; } /** * @since 0.7.0 */ public CharSequenceConstraint<T, E> password( Function<CharSequencePasswordPoliciesBuilder<T, E>, List<ConstraintPredicate<E>>> builder) { final List<ConstraintPredicate<E>> predicates = builder .apply(new CharSequencePasswordPoliciesBuilder<>()); this.predicates().addAll(predicates); return this; } private <U> CharSequenceConstraint<T, E> isValidRepresentationOf( Function<String, U> converter, ViolationMessage message) { this.predicates().add(ConstraintPredicate.of(x -> { if (size().applyAsInt(x) == 0) { return true; } try { converter.apply(x.toString()); return true; } catch (NumberFormatException ignored) { return false; } }, message, () -> new Object[] {}, VALID)); return this; } /** * @since 0.6.0 */ public CharSequenceConstraint<T, E> isByte() { return this.isValidRepresentationOf(Byte::parseByte, CHAR_SEQUENCE_BYTE); } /** * @since 0.6.0 */ public CharSequenceConstraint<T, E> isShort() { return this.isValidRepresentationOf(Short::parseShort, CHAR_SEQUENCE_SHORT); } /** * @since 0.6.0 */ public CharSequenceConstraint<T, E> isInteger() { return this.isValidRepresentationOf(Integer::parseInt, CHAR_SEQUENCE_INTEGER); } /** * @since 0.6.0 */ public CharSequenceConstraint<T, E> isLong() { return this.isValidRepresentationOf(Long::parseLong, CHAR_SEQUENCE_LONG); } /** * @since 0.6.0 */ public CharSequenceConstraint<T, E> isFloat() { return this.isValidRepresentationOf(Float::parseFloat, CHAR_SEQUENCE_FLOAT); } /** * @since 0.6.0 */ public CharSequenceConstraint<T, E> isDouble() { return this.isValidRepresentationOf(Double::parseDouble, CHAR_SEQUENCE_DOUBLE); } /** * @since 0.6.0 */ public CharSequenceConstraint<T, E> isBigInteger() { return this.isValidRepresentationOf(BigInteger::new, CHAR_SEQUENCE_BIGINTEGER); } /** * @since 0.6.0 */ public CharSequenceConstraint<T, E> isBigDecimal() { return this.isValidRepresentationOf(BigDecimal::new, CHAR_SEQUENCE_BIGDECIMAL); } public EmojiConstraint<T, E> emoji() { return new EmojiConstraint<>(this, this.normalizerForm, this.variantOptions); } public CharSequenceConstraint<T, E> normalizer(Normalizer.Form normalizerForm) { CharSequenceConstraint<T, E> constraint = new CharSequenceConstraint<>( normalizerForm, this.variantOptions); constraint.predicates().addAll(this.predicates()); return constraint; } public CharSequenceConstraint<T, E> notBlank() { this.predicates() .add(ConstraintPredicate.of( x -> x != null && trim(x.toString()).length() != 0, CHAR_SEQUENCE_NOT_BLANK, () -> new Object[] {}, INVALID)); return this; } public CharSequenceConstraint<T, E> pattern(String regex) { this.predicates().add(ConstraintPredicate.of(x -> Pattern.matches(regex, x), CHAR_SEQUENCE_PATTERN, () -> new Object[] { regex }, VALID)); return this; } /** * @since 0.7.0 */ public CharSequenceConstraint<T, E> ipv4() { this.predicates() .add(ConstraintPredicate.of(x -> InetAddressUtils.isIpv4(x.toString()), CHAR_SEQUENCE_IPV4, () -> new Object[] {}, VALID)); return this; } /** * @since 0.7.0 */ public CharSequenceConstraint<T, E> ipv6() { this.predicates() .add(ConstraintPredicate.of(x -> InetAddressUtils.isIpv6(x.toString()), CHAR_SEQUENCE_IPV6, () -> new Object[] {}, VALID)); return this; } public CharSequenceConstraint<T, E> url() { this.predicates().add(ConstraintPredicate.of(x -> { if (size().applyAsInt(x) == 0) { return true; } try { new URL(x.toString()); return true; } catch (MalformedURLException e) { return false; } }, CHAR_SEQUENCE_URL, () -> new Object[] {}, VALID)); return this; } /** * @since 0.10.0 */ public CharSequenceConstraint<T, E> uuid() { this.predicates().add(ConstraintPredicate.of(x -> { if (size().applyAsInt(x) == 0) { return true; } return VALID_UUID_REGEX.matcher(x).matches(); }, CHAR_SEQUENCE_UUID, () -> new Object[] {}, VALID)); return this; } /** * @since 0.7.0 */ public CharSequenceConstraint<T, E> luhn() { this.predicates().add(ConstraintPredicate.of(CharSequenceConstraint::luhnCheck, CHAR_SEQUENCE_LUHN, () -> new Object[] {}, VALID)); return this; } // https://github.com/apache/commons-validator/blob/master/src/main/java/org/apache/commons/validator/CreditCardValidator.java static boolean luhnCheck(CharSequence cardNumber) { // number must be validated as 0..9 numeric first!! final int digits = cardNumber.length(); final int oddOrEven = digits & 1; long sum = 0; for (int count = 0; count < digits; count++) { int digit; try { digit = Integer.parseInt(cardNumber.charAt(count) + ""); } catch (NumberFormatException e) { return false; } if (((count & 1) ^ oddOrEven) == 0) { // not digit *= 2; if (digit > 9) { digit -= 9; } } sum += digit; } return sum != 0 && (sum % 10 == 0); } public CharSequenceConstraint<T, E> variant( Function<VariantOptions.Builder, VariantOptions.Builder> opts) { VariantOptions.Builder builder = VariantOptions.builder(); CharSequenceConstraint<T, E> constraint = new CharSequenceConstraint<>( this.normalizerForm, opts.apply(builder).build()); constraint.predicates().addAll(this.predicates()); return constraint; } protected String normalize(String s) { String str = this.variantOptions.ignored(s); return this.normalizerForm == null ? str : Normalizer.normalize(str, this.normalizerForm); } @Override protected ToIntFunction<E> size() { return cs -> { String s = this.normalize(cs.toString()); return s.codePointCount(0, s.length()); }; } private static String trim(String s) { if (s.length() == 0) { return s; } StringBuilder sb = new StringBuilder(s); while (sb.length() > 0 && Character.isWhitespace(sb.charAt(0))) { sb.deleteCharAt(0); } while (sb.length() > 0 && Character.isWhitespace(sb.charAt(sb.length() - 1))) { sb.deleteCharAt(sb.length() - 1); } return sb.toString(); } }
4,418
3,262
package com.tencent.angel.ps.server.data.handler; import com.tencent.angel.ps.PSContext; import com.tencent.angel.ps.server.data.request.GetRowSplitRequest; import com.tencent.angel.ps.server.data.request.RequestData; import com.tencent.angel.ps.server.data.request.RequestHeader; import com.tencent.angel.ps.server.data.response.GetRowSplitResponse; import com.tencent.angel.ps.server.data.response.ResponseData; import com.tencent.angel.ps.storage.vector.ServerRow; import com.tencent.angel.utils.MatrixUtils; import io.netty.buffer.ByteBuf; public class GetRowHandler extends Handler { public GetRowHandler(PSContext context) { super(context); } @Override public RequestData parseRequest(ByteBuf in) { GetRowSplitRequest request = new GetRowSplitRequest(); request.deserialize(in); return request; } @Override public ResponseData handle(RequestHeader header, RequestData data) throws Exception { GetRowSplitRequest request = (GetRowSplitRequest) data; ServerRow row = MatrixUtils.getRow(context.getMatrixStorageManager(), header.matrixId, header.partId, request.getRowId()); return new GetRowSplitResponse(row); } }
376
3,301
<filename>core/src/main/java/com/alibaba/alink/common/sql/builtin/agg/DenseRankUdaf.java package com.alibaba.alink.common.sql.builtin.agg; public class DenseRankUdaf extends BaseRankUdaf { public DenseRankUdaf() { super(); } @Override public void accumulate(RankData rankData, Object... values) { accumulateTemp(rankData, values); rankData.updateDenseRank(); } }
141
348
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class PyNiworkflows(PythonPackage): """Common workflows for MRI (anatomical, functional, diffusion, etc)""" homepage = "https://github.com/nipreps/niworkflows" pypi = "niworkflows/niworkflows-1.4.0.tar.gz" version('1.4.0', sha256='d4e59070fde0290e0bfeece120ff1d2ff1f9573e3f2e6a719fe463c913af25ec') version('1.3.5', sha256='92e24f3462fb3ad4d8ee724506fba05da2b3ca0626850dd2e637a553e17d69b8') version('1.0.4', sha256='34bfa5561e6f872dbd85bb30a1b44c5e1be525167abe3932aee8ac06d15f6ed9') variant('fsl', default=False, description="Enable fsl support.") variant('ants', default=False, description="Enable ants support.") depends_on('[email protected]:', when='@1.3.3:', type=('build', 'run')) depends_on('[email protected]:', when='@1.2:', type=('build', 'run')) depends_on('[email protected]:', type=('build', 'run')) depends_on('[email protected]:', when='@1.2.3:', type='build') depends_on('[email protected]:', type='build') depends_on('py-attrs', when='@1.1.4:', type=('build', 'run')) depends_on('py-jinja2', type=('build', 'run')) depends_on('[email protected]:', when='^[email protected]:', type=('build', 'run')) depends_on('[email protected]:3.1', when='^python@:3.5', type=('build', 'run')) depends_on('[email protected]:', when='@1.1.6:', type=('build', 'run')) depends_on('[email protected]:', type=('build', 'run')) depends_on('[email protected]:0.4,0.5.2:', type=('build', 'run')) depends_on('[email protected]:', when='@1.3:', type=('build', 'run')) depends_on('[email protected]:', type=('build', 'run')) depends_on('py-nitransforms@20:20.1', when='@1.2:', type=('build', 'run')) depends_on('py-numpy', when='@1.3.3:', type=('build', 'run')) depends_on('py-packaging', type=('build', 'run')) depends_on('py-pandas', type=('build', 'run')) depends_on('[email protected]:', when='@1.3:', type=('build', 'run')) depends_on('[email protected]:', type=('build', 'run')) depends_on('py-pyyaml', type=('build', 'run')) depends_on('py-scikit-image', type=('build', 'run')) depends_on('py-scikit-learn', when='@:1.3', type=('build', 'run')) depends_on('py-scipy', type=('build', 'run')) depends_on('py-seaborn', type=('build', 'run')) depends_on('[email protected]:', when='@1.4:', type=('build', 'run')) depends_on('[email protected]', when='@1.3.5', type=('build', 'run')) depends_on('py-svgutils', type=('build', 'run')) depends_on('py-transforms3d', type=('build', 'run')) depends_on('[email protected]:', when='@1.3:', type=('build', 'run')) depends_on('[email protected]:0.4', when='@:1.0', type=('build', 'run')) with when('+fsl'): depends_on('[email protected]:', type=('build', 'run')) with when('+ants'): depends_on('[email protected]:', type=('build', 'run')) # dependencies that are not yet in spack # depends_on('[email protected]:', type=('build', 'run')) # depends_on('c3d@1:', type=('build', 'run')) # depends_on('[email protected]:', type=('build', 'run'))
1,512
1,399
<filename>test/formtest/manualOverride.json<gh_stars>1000+ { "type": "form", "components": [{ "label": "Number1", "mask": false, "spellcheck": true, "tableView": false, "delimiter": false, "requireDecimal": false, "inputFormat": "plain", "calculateServer": false, "validate": { "required": true }, "key": "number1", "type": "number", "input": true }, { "label": "Number2", "mask": false, "spellcheck": true, "tableView": false, "delimiter": false, "requireDecimal": false, "inputFormat": "plain", "calculateValue": { "_camelCase": { "var": "row.number1" } }, "calculateServer": false, "allowCalculateOverride": true, "validate": { "required": true }, "key": "number2", "type": "number", "input": true }, { "label": "Submit", "showValidations": false, "tableView": false, "key": "submit", "type": "button", "input": true }], "title": "manualOverrideTest", "display": "form", "name": "manualOverrideTest", "path": "manualoverridetest" }
496
417
<filename>metrics-core-impl/src/test/java/com/alibaba/metrics/FastCompassTest.java /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.alibaba.metrics; import org.junit.Test; import static org.assertj.core.api.Assertions.assertThat; public class FastCompassTest { @Test public void testFastCompass() { ManualClock clock = new ManualClock(); FastCompass fastCompass = new FastCompassImpl(60, 10, clock, 10); fastCompass.record(10, "success"); fastCompass.record(20, "error"); fastCompass.record(15, "success"); clock.addSeconds(60); // verify count assertThat(fastCompass.getMethodCountPerCategory()).containsKey("success"); assertThat(fastCompass.getMethodCountPerCategory(0L).get("success").get(0L)).isEqualTo(2L); assertThat(fastCompass.getMethodCountPerCategory()).containsKey("error"); assertThat(fastCompass.getMethodCountPerCategory(0L).get("error").get(0L)).isEqualTo(1L); // verify rt assertThat(fastCompass.getMethodRtPerCategory()).containsKey("success"); assertThat(fastCompass.getMethodRtPerCategory(0L).get("success").get(0L)).isEqualTo(25L); assertThat(fastCompass.getMethodRtPerCategory()).containsKey("error"); assertThat(fastCompass.getMethodRtPerCategory(0L).get("error").get(0L)).isEqualTo(20L); // total count long totalCount = fastCompass.getMethodCountPerCategory(0L).get("success").get(0L) + fastCompass.getMethodCountPerCategory(0L).get("error").get(0L); assertThat(totalCount).isEqualTo(3L); // average rt long avgRt = (fastCompass.getMethodRtPerCategory(0L).get("success").get(0L) + fastCompass.getMethodRtPerCategory(0L).get("error").get(0L)) / totalCount; assertThat(avgRt).isEqualTo(15L); // verify count and rt assertThat(fastCompass.getCountAndRtPerCategory()).containsKey("success"); assertThat(fastCompass.getCountAndRtPerCategory(0L).get("success").get(0L)).isEqualTo((2L << 38) + 25); assertThat(fastCompass.getCountAndRtPerCategory()).containsKey("error"); assertThat(fastCompass.getCountAndRtPerCategory(0L).get("error").get(0L)).isEqualTo((1L << 38) + 20); } @Test public void testBinaryAdd() { long a1 = (1L << 38) + 10; long a2 = (1L << 38) + 20; assertThat((a1 + a2) >> 38).isEqualTo(2); } @Test public void testMaxSubCategoryCount() { ManualClock clock = new ManualClock(); FastCompass fastCompass = new FastCompassImpl(60, 10, clock, 2); fastCompass.record(10, "success"); fastCompass.record(20, "error1"); fastCompass.record(15, "error2"); assertThat(fastCompass.getMethodRtPerCategory().keySet().size()).isEqualTo(2); } }
1,366
1,647
<filename>pythran/tests/rosetta/hailstone_sequence.py #from http://rosettacode.org/wiki/Hailstone_sequence#Python #pythran export hailstone(int) #runas hailstone(27) #runas max((len(hailstone(i)), i) for i in range(1,100000)) def hailstone(n): seq = [n] while n>1: n = 3*n + 1 if n & 1 else n//2 seq.append(n) return seq
157
14,668
<reponame>zealoussnow/chromium // Copyright 2020 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef CHROME_BROWSER_CHROMEOS_CHROMEBOX_FOR_MEETINGS_CFM_CHROME_SERVICES_H_ #define CHROME_BROWSER_CHROMEOS_CHROMEBOX_FOR_MEETINGS_CFM_CHROME_SERVICES_H_ #include "base/observer_list_types.h" namespace chromeos { namespace cfm { // Registers the observers for service interface requests by the // |CfmServiceContext| granting hotline access to services. void InitializeCfmServices(); // Removes the observers for service interface requests by the // |CfmServiceContext| removing hotline access to services. void ShutdownCfmServices(); } // namespace cfm } // namespace chromeos #endif // CHROME_BROWSER_CHROMEOS_CHROMEBOX_FOR_MEETINGS_CFM_CHROME_SERVICES_H_
281
634
/**************************************************************** * Licensed to the Apache Software Foundation (ASF) under one * * or more contributor license agreements. See the NOTICE file * * distributed with this work for additional information * * regarding copyright ownership. The ASF licenses this file * * to you under the Apache License, Version 2.0 (the * * "License"); you may not use this file except in compliance * * with the License. You may obtain a copy of the License at * * * * http://www.apache.org/licenses/LICENSE-2.0 * * * * Unless required by applicable law or agreed to in writing, * * software distributed under the License is distributed on an * * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * * KIND, either express or implied. See the License for the * * specific language governing permissions and limitations * * under the License. * ****************************************************************/ package org.apache.james.imap.message; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.InputStream; public class BytesBackedLiteral implements Literal { public static BytesBackedLiteral copy(InputStream stream) throws IOException { ByteArrayOutputStream out = new ByteArrayOutputStream(); stream.transferTo(out); return of(out.toByteArray()); } public static BytesBackedLiteral of(byte[] bytes) { return new BytesBackedLiteral(bytes); } private final byte[] content; private BytesBackedLiteral(byte[] content) { this.content = content; } @Override public long size() { return content.length; } @Override public InputStream getInputStream() { return new ByteArrayInputStream(content); } }
757
711
<reponame>shachindrasingh/apiman<gh_stars>100-1000 /* * Copyright 2017 JBoss Inc * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.apiman.gateway.engine.vertx.shareddata; import io.apiman.gateway.engine.IEngineConfig; import io.apiman.gateway.engine.IRegistry; import io.apiman.gateway.engine.async.AsyncResultImpl; import io.apiman.gateway.engine.async.IAsyncResultHandler; import io.apiman.gateway.engine.beans.Api; import io.apiman.gateway.engine.beans.ApiContract; import io.apiman.gateway.engine.beans.Client; import io.apiman.gateway.engine.beans.Contract; import io.apiman.gateway.engine.beans.exceptions.ApiNotFoundException; import io.apiman.gateway.engine.beans.exceptions.ApiRetiredException; import io.apiman.gateway.engine.beans.exceptions.ClientNotFoundException; import io.apiman.gateway.engine.beans.exceptions.NoContractFoundException; import io.apiman.gateway.engine.i18n.Messages; import java.util.ArrayList; import java.util.List; import java.util.Map; import java.util.Optional; import io.vertx.core.AsyncResult; import io.vertx.core.CompositeFuture; import io.vertx.core.Future; import io.vertx.core.Handler; import io.vertx.core.Vertx; import io.vertx.core.logging.Logger; import io.vertx.core.logging.LoggerFactory; import io.vertx.core.shareddata.AsyncMap; /** * @author <NAME> {@literal <<EMAIL>>} */ @SuppressWarnings("nls") public class SharedGlobalDataRegistry implements IRegistry { private static final Logger LOGGER = LoggerFactory.getLogger(SharedGlobalDataRegistry.class); private AsyncMap<String, Object> objectMap; public SharedGlobalDataRegistry(Vertx vertx, IEngineConfig vxConfig, Map<String, String> options) { if (!vertx.isClustered()) { throw new IllegalStateException(SharedGlobalDataRegistry.class.getCanonicalName() + " only works when operating in clustered mode!"); } vertx.sharedData().<String, Object> getClusterWideMap("SharedGlobalDataRegistry-Shared", async -> { if (async.succeeded()) { objectMap = async.result(); } else { LOGGER.error("Problem getting cluster-wide Vert.x map: {}", async.cause(), async.cause().getMessage()); throw new IllegalStateException(async.cause()); } }); } @Override public void publishApi(Api api, IAsyncResultHandler<Void> handler) { objectMap.put(getApiIndex(api), api, handleResultVoid(handler)); } @Override public void retireApi(Api api, IAsyncResultHandler<Void> handler) { objectMap.remove(getApiIndex(api), handleSuccessfulResult(handler, deletedApi -> { if (deletedApi == null) { Exception ex = new ApiNotFoundException(Messages.i18n.format("InMemoryRegistry.ApiNotFound")); handler.handle(AsyncResultImpl.create(ex)); } else { handler.handle(AsyncResultImpl.create((Void) null)); } })); } private <T> Handler<AsyncResult<T>> handleSuccessfulResult(IAsyncResultHandler<Void> failureHandler, Handler<T> successHandler) { return result -> { if (result.succeeded()) { successHandler.handle(result.result()); } else { failureHandler.handle(AsyncResultImpl.create(result.cause())); } }; } @SuppressWarnings("rawtypes") // CompositeFuture.all(list) requires raw futures. @Override public void registerClient(Client client, IAsyncResultHandler<Void> resultHandler) { List<Future> futures = new ArrayList<>(client.getContracts().size()); List<Contract> contracts = new ArrayList<>(client.getContracts()); String clientIndex = getClientIndex(client); // Future for each contract and execute get. for (Contract contract : contracts) { Future future = Future.future(); futures.add(future); String apiIndex = getApiIndex(contract.getApiOrgId(), contract.getApiId(), contract.getApiVersion()); objectMap.get(apiIndex, future.completer()); } CompositeFuture.all(futures).setHandler(compositeResult -> { if (compositeResult.succeeded()) { // If any contract didn't correspond to a stored API. Contract failedContract = null; for (int i=0; i<futures.size(); i++) { if (futures.get(i).result() == null) { failedContract = contracts.get(0); break; } } // If we found an invalid contract. if (failedContract != null) { Exception ex = new ApiNotFoundException(Messages.i18n.format("InMemoryRegistry.ApiNotFoundInOrg", failedContract.getApiId(), failedContract.getApiOrgId())); resultHandler.handle(AsyncResultImpl.create(ex)); } else { Future<Object> putNewApiKeyFuture = Future.future(); Future<Object> endFuture = Future.future(); // Order: Create new API Key reference; Replace old ID -> API mapping; Delete old key reference) // This should ensure no breaking/irreconcilable behaviour. objectMap.putIfAbsent(client.getApiKey(), client, putNewApiKeyFuture.completer()); // Replace API Key reference putNewApiKeyFuture.compose(clientWithSameApiKey -> { Future<Object> replaceClientFuture = Future.future(); // There's a small chance the same key will replace the old one, usually // only in hard-coded tests. Generally sameKeyReplace will be null. if (clientWithSameApiKey != null) { //System.err.println("!!!!! Same API Key -- Replacing. Must not delete later. !!!!!!"); objectMap.replace(client.getApiKey(), client, replaceClientFuture.completer()); } else { objectMap.putIfAbsent(clientIndex, client, replaceClientFuture.completer()); } return replaceClientFuture; // Remove old API key reference }).compose(oldClientRaw -> { Client oldClient = (Client) oldClientRaw; if (oldClientRaw != null && !oldClient.getApiKey().equals(client.getApiKey())) { objectMap.remove(oldClient.getApiKey(), endFuture.completer()); } else { endFuture.complete(); } }, endFuture) // When finished, call this handler and then resultHandler .setHandler(handleResult(resultHandler)); } } else { resultHandler.handle(AsyncResultImpl.create(compositeResult.cause())); } }); } @Override public void unregisterClient(Client client, IAsyncResultHandler<Void> resultHandler) { String clientIndex = getClientIndex(client); objectMap.get(clientIndex, handleSuccessfulResult(resultHandler, oldClientRaw -> { Client oldClient = (Client) oldClientRaw; if (oldClient == null) { Exception ex = new ClientNotFoundException(Messages.i18n.format("InMemoryRegistry.ClientNotFound")); resultHandler.handle(AsyncResultImpl.create(ex)); } else { Future<Object> future1 = Future.future(); Future<Object> future2 = Future.future(); objectMap.remove(clientIndex, future1.completer()); objectMap.remove(oldClient.getApiKey(), future2.completer()); CompositeFuture.all(future1, future2).setHandler(handleCompositeResult(resultHandler)); } })); } @Override public void getApi(String organizationId, String apiId, String apiVersion, IAsyncResultHandler<Api> handler) { objectMap.get(getApiIndex(organizationId, apiId, apiVersion), handleResult(handler)); } @Override public void getClient(String apiKey, IAsyncResultHandler<Client> handler) { objectMap.get(apiKey, handleResult(handler)); } @Override public void getClient(String organizationId, String clientId, String clientVersion, IAsyncResultHandler<Client> handler) { String idx = getClientIndex(organizationId, clientId, clientVersion); objectMap.get(idx, handleResult(handler)); } @SuppressWarnings({ "rawtypes", "unchecked" }) @Override public void getContract(String apiOrganizationId, String apiId, String apiVersion, String apiKey, IAsyncResultHandler<ApiContract> handler) { String apiIndex = getApiIndex(apiOrganizationId, apiId, apiVersion); Future apiFuture = Future.future(); Future clientFuture = Future.future(); objectMap.get(apiIndex, apiFuture.completer()); objectMap.get(apiKey, clientFuture.completer()); CompositeFuture.all(apiFuture, clientFuture).setHandler(compositeResult -> { if (compositeResult.succeeded()) { Api api = (Api) apiFuture.result(); Client client = (Client) clientFuture.result(); if (api == null) { Exception error = new ClientNotFoundException(Messages.i18n.format("InMemoryRegistry.NoClientForAPIKey", apiKey)); handler.handle(AsyncResultImpl.create(error, ApiContract.class)); } else if (client == null) { Exception error = new ApiRetiredException(Messages.i18n.format("InMemoryRegistry.ApiWasRetired", apiId, apiOrganizationId)); handler.handle(AsyncResultImpl.create(error, ApiContract.class)); } else { Optional<Contract> matchedOpt = client.getContracts().stream() .filter(contract -> contract.matches(apiOrganizationId, apiId, apiVersion)) .findFirst(); if (matchedOpt.isPresent()) { Contract contract = matchedOpt.get(); ApiContract apiContract = new ApiContract(api, client, contract.getPlan(), contract.getPolicies()); handler.handle(AsyncResultImpl.create(apiContract)); } else { Exception error = new NoContractFoundException(Messages.i18n.format("InMemoryRegistry.NoContractFound", //$NON-NLS-1$ client.getClientId(), api.getApiId())); handler.handle(AsyncResultImpl.create(error, ApiContract.class)); } } } else { handler.handle(AsyncResultImpl.create(compositeResult.cause())); } }); } @Override public void listApis(String organizationId, int page, int pageSize, IAsyncResultHandler<List<String>> handler) { throw new UnsupportedOperationException("Vert.x AsyncMap does not yet support iteration"); // TODO 1.5.x supports iteration. } @Override public void listOrgs(IAsyncResultHandler<List<String>> handler) { throw new UnsupportedOperationException("Vert.x AsyncMap does not yet support iteration"); // TODO 1.5.x supports iteration. } @Override public void listApiVersions(String organizationId, String apiId, int page, int pageSize, IAsyncResultHandler<List<String>> handler) { throw new UnsupportedOperationException("Vert.x AsyncMap does not yet support iteration"); // TODO 1.5.x supports iteration. } @Override public void listClients(String organizationId, int page, int pageSize, IAsyncResultHandler<List<String>> handler) { throw new UnsupportedOperationException("Vert.x AsyncMap does not yet support iteration"); // TODO 1.5.x supports iteration. } @Override public void listClientVersions(String organizationId, String clientId, int page, int pageSize, IAsyncResultHandler<List<String>> handler) { throw new UnsupportedOperationException("Vert.x AsyncMap does not yet support iteration"); // TODO 1.5.x supports iteration. } private String getApiIndex(Api api) { return getApiIndex(api.getOrganizationId(), api.getApiId(), api.getVersion()); } private String getApiIndex(String orgId, String apiId, String version) { return "API::" + orgId + "|" + apiId + "|" + version; //$NON-NLS-1$ //$NON-NLS-2$ //$NON-NLS-3$ } private String getClientIndex(Client client) { return getClientIndex(client.getOrganizationId(), client.getClientId(), client.getVersion()); } private String getClientIndex(String orgId, String clientId, String version) { return "CLIENT::" + orgId + "|" + clientId + "|" + version; //$NON-NLS-1$ //$NON-NLS-2$ //$NON-NLS-3$ } @SuppressWarnings("unchecked") private <T> Handler<AsyncResult<CompositeFuture>> handleCompositeResult(IAsyncResultHandler<T> apimanResultHandler) { return result -> { if (result.succeeded()) { apimanResultHandler.handle(AsyncResultImpl.create((T) result.result())); } else { apimanResultHandler.handle(AsyncResultImpl.create(result.cause())); } }; } @SuppressWarnings("unchecked") private <T, Q> Handler<AsyncResult<Q>> handleResult(IAsyncResultHandler<T> apimanResultHandler) { return result -> { if (result.succeeded()) { apimanResultHandler.handle(AsyncResultImpl.create((T) result.result())); } else { apimanResultHandler.handle(AsyncResultImpl.create(result.cause())); } }; } private Handler<AsyncResult<Void>> handleResultVoid(IAsyncResultHandler<Void> apimanResultHandler) { return result -> { if (result.succeeded()) { apimanResultHandler.handle(AsyncResultImpl.create(result.result())); } else { apimanResultHandler.handle(AsyncResultImpl.create(result.cause())); } }; } }
6,285
2,281
<reponame>guigzzz/simple-binary-encoding /* * Copyright 2013-2021 Real Logic Limited. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package uk.co.real_logic.sbe.generation.rust; import org.agrona.Verify; import uk.co.real_logic.sbe.PrimitiveType; import uk.co.real_logic.sbe.generation.Generators; import uk.co.real_logic.sbe.generation.rust.RustGenerator.CodecType; import uk.co.real_logic.sbe.ir.Encoding; import java.io.IOException; import java.nio.charset.StandardCharsets; import java.util.*; import static java.lang.Long.parseLong; import static java.lang.String.format; import static uk.co.real_logic.sbe.generation.Generators.toLowerFirstChar; import static uk.co.real_logic.sbe.generation.rust.RustGenerator.CodecType.Decoder; import static uk.co.real_logic.sbe.generation.rust.RustGenerator.CodecType.Encoder; /** * Utility method for Rust codec generation. */ public class RustUtil { static final String INDENT = " "; static final Map<PrimitiveType, String> TYPE_NAME_BY_PRIMITIVE_TYPE_MAP = new EnumMap<>(PrimitiveType.class); static { TYPE_NAME_BY_PRIMITIVE_TYPE_MAP.put(PrimitiveType.CHAR, "u8"); TYPE_NAME_BY_PRIMITIVE_TYPE_MAP.put(PrimitiveType.INT8, "i8"); TYPE_NAME_BY_PRIMITIVE_TYPE_MAP.put(PrimitiveType.INT16, "i16"); TYPE_NAME_BY_PRIMITIVE_TYPE_MAP.put(PrimitiveType.INT32, "i32"); TYPE_NAME_BY_PRIMITIVE_TYPE_MAP.put(PrimitiveType.INT64, "i64"); TYPE_NAME_BY_PRIMITIVE_TYPE_MAP.put(PrimitiveType.UINT8, "u8"); TYPE_NAME_BY_PRIMITIVE_TYPE_MAP.put(PrimitiveType.UINT16, "u16"); TYPE_NAME_BY_PRIMITIVE_TYPE_MAP.put(PrimitiveType.UINT32, "u32"); TYPE_NAME_BY_PRIMITIVE_TYPE_MAP.put(PrimitiveType.UINT64, "u64"); TYPE_NAME_BY_PRIMITIVE_TYPE_MAP.put(PrimitiveType.FLOAT, "f32"); TYPE_NAME_BY_PRIMITIVE_TYPE_MAP.put(PrimitiveType.DOUBLE, "f64"); } /** * Map the name of a {@link PrimitiveType} to a Rust primitive type name. * * @param primitiveType to map. * @return the name of the Rust primitive that most closely maps. */ static String rustTypeName(final PrimitiveType primitiveType) { return TYPE_NAME_BY_PRIMITIVE_TYPE_MAP.get(primitiveType); } static String generateRustLiteral(final PrimitiveType type, final String value) { Verify.notNull(type, "type"); Verify.notNull(value, "value"); final String typeName = rustTypeName(type); if (typeName == null) { throw new IllegalArgumentException("Unknown Rust type name found for primitive " + type.primitiveName()); } switch (type) { case CHAR: case INT8: case INT16: case INT32: case INT64: return value + '_' + typeName; case UINT8: case UINT16: case UINT32: case UINT64: return "0x" + Long.toHexString(parseLong(value)) + '_' + typeName; case FLOAT: case DOUBLE: return value.endsWith("NaN") ? typeName + "::NAN" : value + '_' + typeName; default: throw new IllegalArgumentException("Unsupported literal generation for type: " + type.primitiveName()); } } static byte eightBitCharacter(final String asciiCharacter) { Verify.notNull(asciiCharacter, "asciiCharacter"); final byte[] bytes = asciiCharacter.getBytes(StandardCharsets.US_ASCII); if (bytes.length != 1) { throw new IllegalArgumentException( format("String value %s did not fit into a single 8-bit character", asciiCharacter)); } return bytes[0]; } /** * Format a struct name for the generated code. * * @param structName to be formatted. * @return the formatted struct name. */ static String formatStructName(final String structName) { return Generators.toUpperFirstChar(structName); } static String codecModName(final String prefix) { return toLowerSnakeCase(prefix + "Codec"); } static String codecName(final String structName, final CodecType codecType) { return formatStructName(structName + codecType.name()); } static String encoderName(final String structName) { return codecName(structName, Encoder); } static String decoderName(final String structName) { return codecName(structName, Decoder); } static String formatFunctionName(final String value) { if (value.isEmpty()) { return value; } return sanitizeMethodOrProperty(toLowerSnakeCase(value)); } static String cleanUpperAcronyms(final String value) { final int length = value.length(); for (int i = 0; i < length; i++) { final char c = value.charAt(i); if (!isUpperAlpha(c) && !isNumeric(c)) { if (c != '_' && i > 2) { final int index = i - 1; return value.substring(0, index).toLowerCase() + value.substring(index); } return value; } } return value; } static String characterEncoding(final Encoding encoding) { final String characterEncoding = encoding.characterEncoding(); if (characterEncoding == null) { return "None"; } return characterEncoding; } /** * Converts to 'snake_case' but will also handle when there are multiple * upper case characters in a row 'UPPERCase' => 'upper_case' * * @param value to be formatted * @return the string formatted to 'lower_snake_case' */ static String toLowerSnakeCase(final String value) { if (value.isEmpty()) { return value; } final String cleaned = cleanUpperAcronyms(value); final String s = toLowerFirstChar(cleaned); final int length = s.length(); final StringBuilder out = new StringBuilder(length + 4); char lastChar = '\0'; for (int i = 0, j = 0; j < length; j++) { final boolean wasUpper = isUpperAlpha(lastChar); final boolean wasNumeric = isNumeric(lastChar); final boolean wasUnderscore = lastChar == '_'; final char c = s.charAt(j); if (c == '_') { out.append(c); i = j + 1; } else if (isUpperAlpha(c)) { if (wasNumeric || (!wasUpper && j - i > 1 && !wasUnderscore)) { out.append('_'); out.append(toLowerSnakeCase(s.substring(j))); return out.toString(); } out.append(Character.toLowerCase(c)); } else if (isNumeric(c)) { if (!wasNumeric && j - i > 1 && !wasUnderscore) { out.append('_'); out.append(toLowerSnakeCase(s.substring(j))); return out.toString(); } out.append(c); } else { if ((wasUpper || wasNumeric) && j - i > 1 && !wasUnderscore) { out.append('_'); out.append(toLowerSnakeCase(s.substring(j))); return out.toString(); } out.append(c); } lastChar = c; } return out.toString(); } private static boolean isUpperAlpha(final char c) { return 'A' <= c && c <= 'Z'; } private static boolean isNumeric(final char c) { return '0' <= c && c <= '9'; } private static String sanitizeMethodOrProperty(final String name) { if (shadowsKeyword(name)) { return name + "_"; } else { return name; } } private static boolean shadowsKeyword(final String name) { return ReservedKeyword.anyMatch(name); } static Appendable indent(final Appendable appendable) throws IOException { return indent(appendable, 1); } static Appendable indent(final Appendable appendable, final int level) throws IOException { Appendable out = appendable; for (int i = 0; i < level; i++) { out = out.append(INDENT); } return out; } static Appendable indent(final Appendable appendable, final int level, final String f, final Object... args) throws IOException { return indent(appendable, level).append(format(f, args)); } private enum ReservedKeyword { Abstract, AlignOf, As, Async, Become, Box, Break, Const, Continue, Crate, Do, Else, Enum, Extern, False, Final, Fn, For, If, Impl, In, Let, Loop, Macro, Match, Mod, Move, Mut, OffsetOf, Override, Priv, Proc, Pub, Pure, Ref, Return, Self, Sizeof, Static, Struct, Super, Trait, True, Type, Typeof, Unsafe, Unsized, Use, Virtual, Where, While, Yield; private static final Set<String> LOWER_CASE_NAMES = new HashSet<>(); static { Arrays.stream(ReservedKeyword.values()) .map(java.lang.Enum::name) .map(String::toLowerCase) .forEach(LOWER_CASE_NAMES::add); } private static boolean anyMatch(final String v) { return LOWER_CASE_NAMES.contains(v.toLowerCase()); } } }
4,713
967
<filename>concurrency/src/main/java/com/javaedge/concurrency/example/furure/jdk/CompletionServiceDemo.java package com.javaedge.concurrency.example.furure.jdk; import java.util.concurrent.*; import java.util.concurrent.atomic.AtomicReference; /** * @author JavaEdge * @date 2021/4/18 */ public class CompletionServiceDemo { // public static void main(String[] args) throws InterruptedException, ExecutionException { // test1(); // // test2(); // // test3(); // } public static void main(String[] args) throws ExecutionException, InterruptedException { test0(); } private static void test0() throws ExecutionException, InterruptedException { ExecutorService executor = Executors.newFixedThreadPool(3); Future<Integer> f1 = executor.submit(() -> getPrice1()); Future<Integer> f2 = executor.submit(() -> getPrice2()); Future<Integer> f3 = executor.submit(() -> getPrice3()); executor.execute(() -> { try { save(f1.get()); } catch (InterruptedException e) { e.printStackTrace(); } catch (ExecutionException e) { e.printStackTrace(); } }); executor.execute(() -> { try { save(f2.get()); } catch (InterruptedException e) { e.printStackTrace(); } catch (ExecutionException e) { e.printStackTrace(); } }); executor.execute(() -> { try { save(f3.get()); } catch (InterruptedException e) { e.printStackTrace(); } catch (ExecutionException e) { e.printStackTrace(); } }); f3.get(); } private static AtomicReference<Integer> test3() { ExecutorService executor = Executors.newFixedThreadPool(3); CompletionService<Integer> completionService = new ExecutorCompletionService<>(executor); // 异步查询价格 completionService.submit(CompletionServiceDemo::getPrice1); completionService.submit(CompletionServiceDemo::getPrice2); completionService.submit(CompletionServiceDemo::getPrice3); // 将查询结果异步保存 // 并计算最低价格 AtomicReference<Integer> m = new AtomicReference<>(Integer.MAX_VALUE); for (int i = 0; i < 3; i++) { executor.execute(() -> { Integer r = null; try { r = completionService.take().get(); } catch (Exception e) { e.printStackTrace(); } save(r); m.set(Integer.min(m.get(), r)); }); } return m; } private static void test2() throws InterruptedException, ExecutionException { ExecutorService executor = Executors.newFixedThreadPool(3); CompletionService<Integer> cs = new ExecutorCompletionService<>(executor); // 异步查询 cs.submit(CompletionServiceDemo::getPrice1); cs.submit(CompletionServiceDemo::getPrice2); cs.submit(CompletionServiceDemo::getPrice3); // 将结果异步保存 for (int i = 0; i < 3; i++) { Integer r = cs.take().get(); executor.execute(() -> save(r)); } } private static void test1() throws ExecutionException, InterruptedException { ExecutorService executor = Executors.newFixedThreadPool(3); // 异步查询 Future<Integer> f1 = executor.submit(CompletionServiceDemo::getPrice1); Future<Integer> f2 = executor.submit(CompletionServiceDemo::getPrice2); Future<Integer> f3 = executor.submit(CompletionServiceDemo::getPrice3); Integer r; // 获取各个价格并保存 r = f1.get(); Integer finalR = r; executor.execute(() -> save(finalR)); r = f2.get(); Integer finalR1 = r; executor.execute(() -> save(finalR1)); r = f3.get(); Integer finalR2 = r; executor.execute(() -> save(finalR2)); } private static void save(Integer r) { System.out.println("r" + r); } private static Integer getPrice3() { return 3; } private static Integer getPrice2() { return 2; } private static Integer getPrice1() { return 1; } }
2,082
1,251
package cn.dblearn.blog.common.validator.group; /** * 更新校验组 */ public interface UpdateGroup { }
47
302
<filename>others/SoftwareHEN/SoftwareHEN/main.cpp #include "tabladatos.h" #include "unidadesentrada.h" #include "ventanaprincipal.h" #include <QApplication> #include <QDesktopWidget> #include <QMainWindow> int main(int argc, char *argv[]) { QApplication a(argc, argv); a.setStyle("fusion"); QDesktopWidget dw; VentanaPrincipal first; int x = dw.width() * 0.8; int y = dw.height() * 0.8; first.setFixedSize(x, y); // TablaDatos second; // QObject::connect(&first,SIGNAL(actionNew(int)),&second,SLOT(actionNewAC())); // QObject::connect(&first,SIGNAL(on_SIS_toggled(bool)),&fourth,SLOT(OnnewSignalSIS(bool))); // QObject::connect(&fourth, SIGNAL(subwindow(int)),&second, // SLOT(subwindowVALUE(int))); QObject::connect(&fourth, // SIGNAL(newSignalSIS(bool)),&second, SLOT(OnnewSignalSIS(bool))); first.show(); return a.exec(); }
364
4,182
{ "Align center": "Allineato al centro", "Align left": "Allineato a sinistra", "Align right": "Allineato a destra", "Apply": "Applica", "Attributes": "Attributi", "Bold": "Grassetto", "Bullet list": "Lista puntata", "Cancel": "Annulla", "Clear": "Pulisci", "Code": "Codice", "Crop marks": "Ritaglia", "Enter a link": "Inserisci un link", "Image": "Immagine", "Heading": "Titolo", "Indent": "Indentazione", "Insert": "Inserisci", "Insert image": "Inserisci una immagine", "Insert table": "Inserisci una tabella", "Insert video": "Inserisci un video", "Italic": "Italico", "Line break": "Interruzione di linea", "Link": "Link", "List": "Lista", "List item": "Elemento della lista", "Name": "Nome", "No styles available for this tag": "Non ci sono stili per questo tag", "Numbers list": "Lista numerata", "Paste YouTube or Vimeo URL": "Incolla la url di YouTube o di Vimeo", "Paragraph": "Paragrafo", "Preformatted": "Preformattato", "Properties": "Proprietà", "Redo": "Rifare", "Remove": "Rimuovi", "Rotate": "Ruota", "Styles": "Stili", "Subheading": "Sottotitolo", "Table": "Tabella", "Table body (columns)": "Corpo della tabella (colonne)", "Table foot": "Piede della tabella", "Table head": "Intestazione della tabella", "Table row": "Riga della tabella", "Text": "Testo", "Undo": "Annullare", "Unindent": "Togli indentazione", "Update table": "Aggiorna tabella", "Upload": "Carica", "Value": "Valore", "Video": "Video", "Your changes have not been saved, do you really want to lose them?": "I tuoi cambiamenti non sono stati salvati, vuoi perderli realmente?" }
751
876
#pragma once #include <vector> #include "rapidcheck/detail/Results.h" #include "rapidcheck/Maybe.h" #include "rapidcheck/Shrinkable.h" #include "rapidcheck/detail/Property.h" #include "rapidcheck/detail/TestParams.h" #include "rapidcheck/detail/TestListener.h" namespace rc { namespace detail { struct SearchResult { enum class Type { Success, Failure, GaveUp }; /// Represents information about a failure. struct Failure { Failure(Shrinkable<CaseDescription> shr, int sz, const Random &rnd) : shrinkable(shr) , size(sz) , random(rnd) {} /// The shrinkable of the failing test case. Shrinkable<CaseDescription> shrinkable; /// The size at which the property failed. int size; /// The Random state which produced the failure. Random random; }; /// The type of the result. Type type; /// The number of successful test cases. int numSuccess; /// The number of discarded test cases. int numDiscarded; /// The tags of successful test cases. std::vector<Tags> tags; /// On Failure or GiveUp, contains failure information. Maybe<Failure> failure; }; /// Searches for a failure in the given property. /// /// @param property The property to search. /// @param params The test parameters to use. /// @param listener Listener that will receive callbacks on test progress. /// /// @return A `SearchResult` structure describing the result of the search. SearchResult searchProperty(const Property &property, const TestParams &params, TestListener &listener); /// Shrinks the given case description shrinkable. /// /// @param shrinkable The shrinkable to shrink. /// @param listener A test listener to report progress to. /// /// @return A pair of the final shrink as well as the path leading there. std::pair<Shrinkable<CaseDescription>, std::vector<std::size_t>> shrinkTestCase(const Shrinkable<CaseDescription> &shrinkable, TestListener &listener); /// Combined search and shrink. Returns a test result. /// /// @param property The property to test. /// @param metadata Metadata about the test. /// @param params The test parameters. /// @param listener A test listener to report progress to. TestResult testProperty(const Property &property, const TestMetadata &metadata, const TestParams &params, TestListener &listener); /// Reproduces a test result for the given property using a `Reproduce` value. TestResult reproduceProperty(const Property &property, const Reproduce &reproduce); } // namespace detail } // namespace rc
908
331
package org.fordes.subview; import cn.hutool.core.util.StrUtil; import de.felixroske.jfxsupport.AbstractJavaFxApplicationSupport; import javafx.scene.image.Image; import javafx.stage.Stage; import javafx.stage.StageStyle; import lombok.SneakyThrows; import org.fordes.subview.utils.constants.CommonConstants; import org.fordes.subview.view.SplashView; import org.fordes.subview.view.StartView; import org.springframework.boot.autoconfigure.SpringBootApplication; import org.springframework.context.ConfigurableApplicationContext; import org.springframework.scheduling.annotation.EnableAsync; import org.springframework.scheduling.annotation.EnableScheduling; import java.util.Collection; import java.util.Collections; @SpringBootApplication @EnableScheduling @EnableAsync public class SubtitlesViewApplication extends AbstractJavaFxApplicationSupport { private static final Image logoImage = new Image(CommonConstants.APPLICATION_LOGO_ICON_URL.toExternalForm()); public static void main(String[] args) { launch(SubtitlesViewApplication.class, StartView.class, new SplashView(), args); } @SneakyThrows @Override public void beforeInitialView(Stage stage, ConfigurableApplicationContext ctx) { stage.initStyle(StageStyle.TRANSPARENT); stage.setTitle("SubView Alpha"); stage.setFullScreenExitHint(StrUtil.EMPTY); stage.getIcons().add(logoImage); } @Override public Collection<Image> loadDefaultIcons() { return Collections.singletonList(logoImage); } @Override public void stop() throws Exception { super.stop(); } }
556
778
#ifndef VEXCL_RANDOM_THREEFRY_HPP #define VEXCL_RANDOM_THREEFRY_HPP /* The MIT License Copyright (c) 2012-2018 <NAME> <<EMAIL>> Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ /** * \file vexcl/random/threefry.hpp * \author <NAME> <<EMAIL>> * \brief Threefry RNG. Threefry, based on the Threefish cipher, is a non cryptographic algorithm for pseudorandom number generation from the Random123 suite, see <http://www.deshawresearch.com/resources_random123.html> The original code came with the following copyright notice: \verbatim Copyright 2010-2011, <NAME>. Shaw Research. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions, and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions, and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of <NAME> nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \endverbatim */ namespace vex { namespace random { namespace detail { template <size_t bits, size_t w> struct rotation_table; template<> struct rotation_table<32, 2> { static std::array<unsigned, 8> get() { static const std::array<unsigned, 8> R = {{ 13, 15, 26, 6, 17, 29, 16, 24 }}; return R; } }; template <> struct rotation_table<32, 4> { static std::array<unsigned, 16> get() { static const std::array<unsigned, 16> R = {{ 10, 26, 11, 21, 13, 27, 23, 5, 6, 20, 17, 11, 25, 10, 18, 20 }}; return R; } }; template <> struct rotation_table<64, 2> { static std::array<unsigned, 8> get() { static const std::array<unsigned, 8> R = {{ 16, 42, 12, 31, 16, 32, 24, 21 }}; return R; } }; template <> struct rotation_table<64, 4> { static std::array<unsigned, 16> get() { static const std::array<unsigned, 16> R = {{ 14, 16, 52, 57, 23, 40, 5, 37, 25, 33, 46, 12, 58, 22, 32, 32 }}; return R; } }; } /// Threefry random number generator. /** * Threefry, based on the Threefish cipher, is a non cryptographic algorithm * for pseudorandom number generation from the Random123 suite. * \see http://www.deshawresearch.com/resources_random123.html * \sa vex::Random * \sa vex::RandomNormal */ struct threefry { static std::string name() { return "threefry"; } // Generates function threefry(ctr, key). // ctr will be modified, containing the random output. // key will be preserved. template <class T, size_t N, size_t R = 20> struct function { static const size_t K = N; static_assert( N == 2 || N == 4, "Only supports vectors with 2 or 4 components." ); static_assert( std::is_same<T, cl_uint>::value || std::is_same<T, cl_ulong>::value, "Only supports 32 or 64 bit integers." ); static std::string name() { std::ostringstream s; s << "threefry_" << type_name<T>() << "_" << N << "_" << R; return s.str(); } static void define(backend::source_generator &src) { const size_t bits = sizeof(T) * 8; auto rot = detail::rotation_table<bits, N>::get(); src.begin_function<void>( name() ); src.begin_function_parameters(); src.template parameter< regstr_ptr<T> >("ctr"); src.template parameter< regstr_ptr<T> >("key"); src.end_function_parameters(); #if defined(VEXCL_BACKEND_CUDA) || defined(VEXCL_BACKEND_JIT) src.new_line() << "#define rotate(x, b) " "(((x) << (b)) | ((x) >> (sizeof(x)*8 - (b))))"; #endif src.new_line() << "const " << type_name<T>() << " p = " << (bits == 32 ? "0x1BD11BDA" : "0x1BD11BDAA9FC1A22"); for(size_t i = 0; i < N; ++i) src << " ^ key[" << i << "]"; src << ";"; // Insert initial key before round 0 for(size_t i = 0; i < N; ++i) src.new_line() << "ctr[" << i << "] += key[" << i << "];"; for(size_t round = 0; round < R; ++round) { if(N == 2) { src.new_line() << "ctr[0] += ctr[1]; " << "ctr[1] = rotate(ctr[1], " << rot[round % 8] << "u); " << "ctr[1] ^= ctr[0];"; } else { const size_t r = 2 * (round % 8), r0 = r + (round % 2), r1 = r + ((round + 1) % 2); src.new_line() << "ctr[0] += ctr[1]; " << "ctr[1] = rotate(ctr[1], " << rot[r0] << "u); " << "ctr[1] ^= ctr[0];"; src.new_line() << "ctr[2] += ctr[3]; " << "ctr[3] = rotate(ctr[3], " << rot[r1] << "u); " << "ctr[3] ^= ctr[2];"; } // inject key if((round + 1) % 4 == 0) { const size_t j = round / 4 + 1; for(size_t i = 0; i < N; ++i) { const size_t ii = ((j + i) % (N + 1)); src.new_line() << "ctr[" << i << "] += "; if(ii == N) src << "p; "; else src << "key[" << ii << "]; "; } src << "ctr[" << (N - 1) << "] += " << j << ";"; } } #ifdef VEXCL_BACKEND_CUDA src.new_line() << "#undef rotate"; #endif src.end_function(); } }; }; } // namespace random } // namespace vex #endif
3,524
5,250
/* Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.flowable.eventregistry.rest.service.api.repository; import static net.javacrumbs.jsonunit.assertj.JsonAssertions.assertThatJson; import static org.assertj.core.api.Assertions.assertThat; import java.util.List; import org.apache.http.HttpStatus; import org.apache.http.client.methods.CloseableHttpResponse; import org.apache.http.client.methods.HttpDelete; import org.apache.http.client.methods.HttpGet; import org.apache.http.client.methods.HttpPost; import org.flowable.common.engine.impl.util.ReflectUtil; import org.flowable.eventregistry.api.EventDeployment; import org.flowable.eventregistry.rest.service.BaseSpringRestTestCase; import org.flowable.eventregistry.rest.service.HttpMultipartHelper; import org.flowable.eventregistry.rest.service.api.EventRestUrls; import org.flowable.eventregistry.test.EventDeploymentAnnotation; import com.fasterxml.jackson.databind.JsonNode; import net.javacrumbs.jsonunit.core.Option; /** * Test for all REST-operations related to a single Deployment resource. * * @author <NAME> */ public class DeploymentResourceTest extends BaseSpringRestTestCase { /** * Test deploying singe event definition file. POST event-registry-repository/deployments */ public void testPostNewDeploymentEventFile() throws Exception { try { HttpPost httpPost = new HttpPost(SERVER_URL_PREFIX + EventRestUrls.createRelativeResourceUrl(EventRestUrls.URL_DEPLOYMENT_COLLECTION)); httpPost.setEntity(HttpMultipartHelper.getMultiPartEntity("simpleEvent.event", "application/json", ReflectUtil.getResourceAsStream("org/flowable/eventregistry/rest/service/api/repository/simpleEvent.event"), null)); CloseableHttpResponse response = executeBinaryRequest(httpPost, HttpStatus.SC_CREATED); // Check deployment JsonNode responseNode = objectMapper.readTree(response.getEntity().getContent()); closeResponse(response); String newDeploymentId = responseNode.get("id").textValue(); assertThatJson(responseNode) .when(Option.IGNORING_EXTRA_FIELDS) .isEqualTo("{" + "url: '" + SERVER_URL_PREFIX + EventRestUrls.createRelativeResourceUrl(EventRestUrls.URL_DEPLOYMENT, newDeploymentId) + "'," + "name: 'simpleEvent'," + "tenantId: \"\"," + "category: null" + "}"); assertThat(repositoryService.createDeploymentQuery().deploymentId(newDeploymentId).count()).isEqualTo(1L); // Check if process is actually deployed in the deployment List<String> resources = repositoryService.getDeploymentResourceNames(newDeploymentId); assertThat(resources) .containsOnly("simpleEvent.event"); assertThat(repositoryService.createEventDefinitionQuery().deploymentId(newDeploymentId).count()).isEqualTo(1L); } finally { // Always cleanup any created deployments, even if the test failed List<EventDeployment> deployments = repositoryService.createDeploymentQuery().list(); for (EventDeployment deployment : deployments) { repositoryService.deleteDeployment(deployment.getId()); } } } /** * Test deploying an invalid file. POST repository/deployments */ public void testPostNewDeploymentInvalidFile() throws Exception { HttpPost httpPost = new HttpPost(SERVER_URL_PREFIX + EventRestUrls.createRelativeResourceUrl(EventRestUrls.URL_DEPLOYMENT_COLLECTION)); httpPost.setEntity(HttpMultipartHelper.getMultiPartEntity("simpleEvent.invalidfile", "application/json", ReflectUtil.getResourceAsStream("org/flowable/eventregistry/rest/service/api/repository/simpleEvent.event"), null)); closeResponse(executeBinaryRequest(httpPost, HttpStatus.SC_BAD_REQUEST)); } /** * Test getting a single deployment. GET repository/deployments/{deploymentId} */ @EventDeploymentAnnotation(resources = { "org/flowable/eventregistry/rest/service/api/repository/simpleEvent.event" }) public void testGetDeployment() throws Exception { EventDeployment existingDeployment = repositoryService.createDeploymentQuery().singleResult(); HttpGet httpGet = new HttpGet(SERVER_URL_PREFIX + EventRestUrls.createRelativeResourceUrl(EventRestUrls.URL_DEPLOYMENT, existingDeployment.getId())); CloseableHttpResponse response = executeRequest(httpGet, HttpStatus.SC_OK); JsonNode responseNode = objectMapper.readTree(response.getEntity().getContent()); closeResponse(response); String deploymentId = existingDeployment.getId(); assertThatJson(responseNode) .when(Option.IGNORING_EXTRA_FIELDS) .isEqualTo("{" + "url: '" + SERVER_URL_PREFIX + EventRestUrls.createRelativeResourceUrl(EventRestUrls.URL_DEPLOYMENT, deploymentId) + "'," + "name: '" + existingDeployment.getName() + "'," + "tenantId: \"\"," + "category: " + existingDeployment.getCategory() + "}"); } /** * Test getting an unexisting deployment. GET repository/deployments/{deploymentId} */ public void testGetUnexistingDeployment() throws Exception { HttpGet httpGet = new HttpGet(SERVER_URL_PREFIX + EventRestUrls.createRelativeResourceUrl(EventRestUrls.URL_DEPLOYMENT, "unexisting")); CloseableHttpResponse response = executeRequest(httpGet, HttpStatus.SC_NOT_FOUND); closeResponse(response); } /** * Test deleting a single deployment. DELETE repository/deployments/{deploymentId} */ public void testDeleteDeployment() throws Exception { EventDeployment existingDeployment = repositoryService.createDeployment().name("Deployment 1").category("DEF") .addClasspathResource("org/flowable/eventregistry/rest/service/api/repository/simpleEvent.event") .deploy(); // Delete the deployment HttpDelete httpDelete = new HttpDelete(SERVER_URL_PREFIX + EventRestUrls.createRelativeResourceUrl(EventRestUrls.URL_DEPLOYMENT, existingDeployment.getId())); CloseableHttpResponse response = executeRequest(httpDelete, HttpStatus.SC_NO_CONTENT); closeResponse(response); existingDeployment = repositoryService.createDeploymentQuery().deploymentId(existingDeployment.getId()).singleResult(); assertThat(existingDeployment).isNull(); } /** * Test deleting an unexisting deployment. DELETE repository/deployments/{deploymentId} */ public void testDeleteUnexistingDeployment() throws Exception { HttpDelete httpDelete = new HttpDelete(SERVER_URL_PREFIX + EventRestUrls.createRelativeResourceUrl(EventRestUrls.URL_DEPLOYMENT, "unexisting")); CloseableHttpResponse response = executeRequest(httpDelete, HttpStatus.SC_NOT_FOUND); closeResponse(response); } }
2,910