max_stars_count
int64
301
224k
text
stringlengths
6
1.05M
token_count
int64
3
727k
679
/************************************************************** * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. * *************************************************************/ #ifndef _IMAPDLG_HXX_ #define _IMAPDLG_HXX_ #include <svtools/inettbc.hxx> #include <sfx2/childwin.hxx> #include <sfx2/ctrlitem.hxx> #include <sfx2/basedlgs.hxx> #ifndef _FIXED_HXX //autogen #include <vcl/fixed.hxx> #endif #ifndef _COMBOBOX_HXX //autogen #include <vcl/combobox.hxx> #endif #ifndef _EDIT_HXX //autogen #include <vcl/edit.hxx> #endif #ifndef _TOOLBOX_HXX //autogen #include <vcl/toolbox.hxx> #endif #ifndef _STATUS_HXX //autogen #include <vcl/status.hxx> #endif #include "svx/svxdllapi.h" #ifndef _GOMISC_HXX class ImageMap; #endif /************************************************************************* |* |* Ableitung vom SfxChildWindow als "Behaelter" fuer Float |* \************************************************************************/ class Graphic; class TargetList; class SVX_DLLPUBLIC SvxIMapDlgChildWindow : public SfxChildWindow { public: SvxIMapDlgChildWindow( Window*, sal_uInt16, SfxBindings*, SfxChildWinInfo* ); SFX_DECL_CHILDWINDOW( SvxIMapDlgChildWindow ); static void UpdateIMapDlg( const Graphic& rGraphic, const ImageMap* pImageMap = NULL, const TargetList* pTargetList = NULL, void* pEditingObj = NULL ); }; #ifndef _REDUCED_IMAPDLG_HXX_ #define _REDUCED_IMAPDLG_HXX_ /************************************************************************* |* |* |* \************************************************************************/ class SvxIMapDlg; class SvxIMapDlgItem : public SfxControllerItem { SvxIMapDlg& rIMap; protected: virtual void StateChanged( sal_uInt16 nSID, SfxItemState eState, const SfxPoolItem* pState ); public: SvxIMapDlgItem( sal_uInt16 nId, SvxIMapDlg& rIMapDlg, SfxBindings& rBindings ); }; /************************************************************************* |* |* |* \************************************************************************/ class IMapOwnData; class IMapWindow; class SVX_DLLPUBLIC SvxIMapDlg : public SfxModelessDialog // SfxFloatingWindow { friend class IMapOwnData; friend class IMapWindow; using Window::Update; ToolBox aTbxIMapDlg1; FixedText aFtURL; SvtURLBox maURLBox; FixedText aFtText; Edit aEdtText; FixedText maFtTarget; ComboBox maCbbTarget; StatusBar aStbStatus; ImageList maImageList; ImageList maImageListH; Size aLastSize; IMapWindow* pIMapWnd; IMapOwnData* pOwnData; void* pCheckObj; SvxIMapDlgItem aIMapItem; virtual void Resize(); virtual sal_Bool Close(); #ifdef _IMAPDLG_PRIVATE DECL_LINK( TbxClickHdl, ToolBox* ); DECL_LINK( InfoHdl, IMapWindow* ); DECL_LINK( MousePosHdl, IMapWindow* ); DECL_LINK( GraphSizeHdl, IMapWindow* ); DECL_LINK( URLModifyHdl, void* ); DECL_LINK( URLLoseFocusHdl, void* ); DECL_LINK( UpdateHdl, Timer* ); DECL_LINK( TbxUpdateHdl, Timer* ); DECL_LINK( StateHdl, IMapWindow* ); DECL_LINK( MiscHdl, void* ); void DoOpen(); sal_Bool DoSave(); #endif public: SvxIMapDlg( SfxBindings *pBindings, SfxChildWindow *pCW, Window* pParent, const ResId& rResId ); ~SvxIMapDlg(); void SetExecState( sal_Bool bEnable ); void SetGraphic( const Graphic& rGraphic ); void SetEditingObject( void* pObj ) { pCheckObj = pObj; } const void* GetEditingObject() const { return pCheckObj; } void SetImageMap( const ImageMap& rImageMap ); const ImageMap& GetImageMap() const; void SetTargetList( const TargetList& rTargetList ); const TargetList& GetTargetList() const; void Update( const Graphic& rGraphic, const ImageMap* pImageMap = NULL, const TargetList* pTargetList = NULL, void* pEditingObj = NULL ); virtual void KeyInput( const KeyEvent& rKEvt ); virtual void DataChanged( const DataChangedEvent& rDCEvt ); void ApplyImageList(); }; /************************************************************************* |* |* Defines |* \************************************************************************/ #define SVXIMAPDLG() ( (SvxIMapDlg*) ( SfxViewFrame::Current()->GetChildWindow( \ SvxIMapDlgChildWindow::GetChildWindowId() )-> \ GetWindow() ) ) #endif // _REDUCED_IMAPDLG_HXX_ #endif // _IMAPDLG_HXX_
1,913
416
// // ws_options.h // libnetcore // // Copyright (c) 2018-2019 Apple Inc. All rights reserved. // #ifndef __NW_WS_OPTIONS_H__ #define __NW_WS_OPTIONS_H__ #ifndef __NW_INDIRECT__ #error "Please include <Network/Network.h> instead of this file directly." #endif // __NW_INDIRECT__ #include <Network/error.h> #include <Network/protocol_options.h> __BEGIN_DECLS NW_ASSUME_NONNULL_BEGIN # pragma mark - WebSocket Constants /*! * @typedef nw_ws_opcode_t * @abstract * WebSocket opcodes that denote the type of frame sent or received by * a WebSocket endpoint. Opcodes define the interpretation of their * associated payload data. */ typedef enum { /*! * @const nw_ws_opcode_invalid Denotes an invalid frame. */ nw_ws_opcode_invalid = -1, /*! * @const nw_ws_opcode_cont Denotes a continuation frame. */ nw_ws_opcode_cont = 0x0, /*! * @const nw_ws_opcode_text Denotes a text frame. */ nw_ws_opcode_text = 0x1, /*! * @const nw_ws_opcode_text Denotes a binary frame. */ nw_ws_opcode_binary = 0x2, /*! * @const nw_ws_opcode_text Denotes a close frame. */ nw_ws_opcode_close = 0x8, /*! * @const nw_ws_opcode_text Denotes a ping frame. */ nw_ws_opcode_ping = 0x9, /*! * @const nw_ws_opcode_text Denotes a pong frame. */ nw_ws_opcode_pong = 0xA, } nw_ws_opcode_t; /*! * @typedef nw_ws_close_code_t * @abstract * WebSocket close codes that describe the reason for closing a WebSocket * connection. Endpoints MAY use the following pre-defined status codes * when sending a Close frame. */ typedef enum { /*! * @const nw_ws_close_code_normal_closure Indicates a normal closure, * meaning that the purpose for which the connection was established * has been fulfilled. */ nw_ws_close_code_normal_closure = 1000, /*! * @const nw_ws_close_code_normal_going_away Indicates that an endpoint is * "going away", such as a server going down or a browser having * navigated away from a page. */ nw_ws_close_code_going_away = 1001, /*! * @const nw_ws_close_code_protocol_error Indicates that an endpoint is * terminating the connection due to a protocol error. */ nw_ws_close_code_protocol_error = 1002, /*! * @const nw_ws_close_code_normal_closure Indicates that an endpoint is * terminating the connection because it has received a type of data * it cannot accept (e.g., an endpoint that understands only text data * MAY send this if it receives a binary message). */ nw_ws_close_code_unsupported_data = 1003, /*! * @const nw_ws_close_code_no_status_received A reserved value and MUST NOT * be set as a status code in a Close control frame by an endpoint. It * is designated for use in applications expecting a status code to * indicate that no status code was actually present. */ nw_ws_close_code_no_status_received = 1005, /*! * @const nw_ws_close_code_abnormal_closure A reserved value and MUST NOT * be set as a status code in a Close control frame by an endpoint. * It is designated for use in applications expecting a status code to * indicate that the connection was closed abnormally, e.g., without * sending or receiving a Close control frame. */ nw_ws_close_code_abnormal_closure = 1006, /*! * @const nw_ws_close_code_invalid_frame_payload_data Indicates that an * endpoint is terminating the connection because it has received data * within a message that was not consistent with the type of the * message (e.g., non-UTF-8 [RFC3629] data within a text message). */ nw_ws_close_code_invalid_frame_payload_data = 1007, /*! * @const nw_ws_close_code_policy_violation Indicates that an endpoint is * terminating the connection because it has received a message that * violates its policy. This is a generic status code that can be * returned when there is no other more suitable status code (e.g., * 1003 or 1009) or if there is a need to hide specific details about * the policy. */ nw_ws_close_code_policy_violation = 1008, /*! * @const nw_ws_close_code_message_too_big Indicates that an endpoint is * terminating the connection because it has received a message that * is too big for it to process. */ nw_ws_close_code_message_too_big = 1009, /*! * @const nw_ws_close_code_mandatory_extension Indicates that an endpoint * (client) is terminating the connection because it has expected the * server to negotiate one or more extensions, but the server didn't * return them in the response message of the WebSocket handshake. The * list of extensions that are needed SHOULD appear in the /reason/ * part of the Close frame. Note that this status code is not used by * the server, because it can fail the WebSocket handshake instead. */ nw_ws_close_code_mandatory_extension = 1010, /*! * @const nw_ws_close_code_internal_server_error Indicates that a server is * terminating the connection because it encountered an unexpected * condition that prevented it from fulfilling the request. */ nw_ws_close_code_internal_server_error = 1011, /*! * @const nw_ws_close_code_tls_handshake A reserved value and MUST NOT * be set as a status code in a Close control frame by an endpoint. It * is designated for use in applications expecting a status code to * indicate that the connection was closed due to a failure to perform * a TLS handshake (e.g., the server certificate can't be verified). */ nw_ws_close_code_tls_handshake = 1015, } nw_ws_close_code_t; /*! * @typedef nw_ws_version_t * @abstract * The WebSocket Protocol version. */ typedef enum { /*! @const nw_ws_version_invalid An invalid WebSocket version */ nw_ws_version_invalid = 0, /*! @const nw_ws_version_13 WebSocket v13 as defined in RFC 6455 */ nw_ws_version_13 = 1, } nw_ws_version_t; # pragma mark - WebSocket Definition /*! * @function nw_protocol_copy_ws_definition * * @abstract * Access the definition of the default system implementation of the * WebSocket protocol. This protocol can be appended to a connection's * protocol stack. * * @result * Returns a retained protocol definition object. */ API_AVAILABLE(macos(10.15), ios(13.0), watchos(6.0), tvos(13.0)) NW_RETURNS_RETAINED nw_protocol_definition_t nw_protocol_copy_ws_definition(void); # pragma mark - WebSocket Options /*! * @function nw_ws_create_options * * @abstract * Create an instance of WebSocket protocol options. This object can be * added to an nw_protocol_stack_t to be used in an nw_connection_t or * an nw_listener_t. * * @result * Returns a retained protocol options object. */ API_AVAILABLE(macos(10.15), ios(13.0), watchos(6.0), tvos(13.0)) NW_RETURNS_RETAINED nw_protocol_options_t nw_ws_create_options(nw_ws_version_t version); /*! * @function nw_ws_options_add_additional_header * * @abstract * Set additional HTTP headers to be sent by the client during the * WebSocket handshake. * * @param options * The WebSocket protocol options object. * * @param name * The HTTP header name. * * @param value * The HTTP header value. */ API_AVAILABLE(macos(10.15), ios(13.0), watchos(6.0), tvos(13.0)) void nw_ws_options_add_additional_header(nw_protocol_options_t options, const char *name, const char *value); /*! * @function nw_ws_options_add_subprotocol * * @abstract * Add to the list of subprotocols that will be presented to a * WebSocket server during connection establishment. * * @param options * The WebSocket protocol options object. * * @param subprotocol * The subprotocol supported by the client. */ API_AVAILABLE(macos(10.15), ios(13.0), watchos(6.0), tvos(13.0)) void nw_ws_options_add_subprotocol(nw_protocol_options_t options, const char *subprotocol); /*! * @function nw_ws_options_set_auto_reply_ping * * @abstract * Set whether the WebSocket connection should automatically reply to all * incoming pings. * * @param options * The WebSocket protocol options object. * * @param auto_reply_ping * Whether the WebSocket connection should automatically reply to all * incoming pings. */ API_AVAILABLE(macos(10.15), ios(13.0), watchos(6.0), tvos(13.0)) void nw_ws_options_set_auto_reply_ping(nw_protocol_options_t options, bool auto_reply_ping); /*! * @function nw_ws_options_set_skip_handshake * * @abstract * Set whether the WebSocket protocol should skip the opening handshake * and begin framing data as soon as a connection is established. * * @param options * The WebSocket protocol options object. * * @param skip_handshake * Whether the WebSocket connection should skip the opening handshake. */ API_AVAILABLE(macos(10.15), ios(13.0), watchos(6.0), tvos(13.0)) void nw_ws_options_set_skip_handshake(nw_protocol_options_t options, bool skip_handshake); /*! * @function nw_ws_options_set_maximum_message_size * * @abstract * Set the maximum allowed message size to be received by the WebSocket * connection. This does not limit the sending message size. * * @param options * The WebSocket protocol options object. * * @param maximum_message_size * The maximum message size in bytes. A maximum message size of 0 means * there is no receive limit. The default maximum message size is 0. */ API_AVAILABLE(macos(10.15), ios(13.0), watchos(6.0), tvos(13.0)) void nw_ws_options_set_maximum_message_size(nw_protocol_options_t options, size_t maximum_message_size); # pragma mark - WebSocket Metadata /*! * @function nw_protocol_metadata_is_ws * * @abstract * Checks if a protocol metadata object is compatible with the * accessors defined in this file for the default system * implementation of WebSocket. * * @result * Returns true if the metadata is for the default system * implementation of WebSocket, false otherwise. */ API_AVAILABLE(macos(10.15), ios(13.0), watchos(6.0), tvos(13.0)) bool nw_protocol_metadata_is_ws(nw_protocol_metadata_t metadata); /*! * @function nw_ws_create_metadata * * @abstract * Creates a protocol metadata object that can be used to define the * content context of messages sent over a WebSocket connection. * * @param opcode * Set the opcode on a WebSocket frame. This must be defined on create. * * @result * Returns a retained metadata object representing the WebSocket frame. */ API_AVAILABLE(macos(10.15), ios(13.0), watchos(6.0), tvos(13.0)) NW_RETURNS_RETAINED nw_protocol_metadata_t nw_ws_create_metadata(nw_ws_opcode_t opcode); /*! * @function nw_ws_metadata_get_opcode * * @abstract * Get the opcode on a WebSocket frame. * * @param metadata * The metadata object representing the WebSocket frame. * * @result * The opcode on the WebSocket frame. */ API_AVAILABLE(macos(10.15), ios(13.0), watchos(6.0), tvos(13.0)) nw_ws_opcode_t nw_ws_metadata_get_opcode(nw_protocol_metadata_t metadata); /*! * @function nw_ws_metadata_set_close_code * * @abstract * Set the close code on a WebSocket frame. The WebSocket frame's opcode * should be nw_ws_opcode_close. * * @param metadata * The metadata object representing the WebSocket frame. * * @param close_code * The close code on the WebSocket frame. */ API_AVAILABLE(macos(10.15), ios(13.0), watchos(6.0), tvos(13.0)) void nw_ws_metadata_set_close_code(nw_protocol_metadata_t metadata, nw_ws_close_code_t close_code); /*! * @function nw_ws_metadata_get_close_code * * @abstract * Get the close code from a WebSocket frame. If the close code is equal * to nw_ws_close_code_no_status_received, it means that a close code * was not actually present in the WebSocket frame. * * @param metadata * The metadata object representing the WebSocket frame. * * @result * The close code on the WebSocket frame. */ API_AVAILABLE(macos(10.15), ios(13.0), watchos(6.0), tvos(13.0)) nw_ws_close_code_t nw_ws_metadata_get_close_code(nw_protocol_metadata_t metadata); #ifdef __BLOCKS__ /*! * @typedef nw_ws_pong_handler_t * * @abstract * A block to be invoked when a pong reply is received after sending a ping * message, or if the WebSocket connection has closed or failed. * * @param error * An optional error if the WebSocket connection fails or closes before a * pong reply is received. */ typedef void (^nw_ws_pong_handler_t)(nw_error_t _Nullable error); /*! * @function nw_ws_metadata_set_pong_handler * * @abstract * Set a callback that will notify the client when a pong message has been * received for a ping message sent. The metadata object's associated * opcode should be nw_ws_opcode_ping. * * @param metadata * The WebSocket metadata object. * * @param client_queue * The queue on which the pong handler will be delivered. * * @param pong_handler * The handler that gets called when a pong reply is received. */ API_AVAILABLE(macos(10.15), ios(13.0), watchos(6.0), tvos(13.0)) void nw_ws_metadata_set_pong_handler(nw_protocol_metadata_t metadata, dispatch_queue_t client_queue, nw_ws_pong_handler_t pong_handler); #endif // __BLOCKS__ # pragma mark - WebSocket Request /*! * @typedef nw_ws_request_t * @abstract * A WebSocket request represents a client's request to connect to a WebSocket * server. * * This type supports ARC and the -[description] method. In non-ARC files, use * nw_retain() and nw_release() to retain and release the object. */ #ifndef NW_WS_REQUEST_IMPL NW_OBJECT_DECL(nw_ws_request); #endif // NW_WS_REQUEST_IMPL #ifdef __BLOCKS__ /*! * @typedef nw_ws_subprotocol_enumerator_t * * @abstract * A block that can be applied to every subprotocol in a client's WebSocket * request. * * @param subprotocol * A subprotocol presented by the client. * * @result * A boolean value that indicating if enumeration should continue. */ typedef bool (^nw_ws_subprotocol_enumerator_t)(const char *subprotocol); /*! * @function nw_ws_request_enumerate_subprotocols * * @abstract * Enumerates the list of subprotocols on the client's request. * * @param request * The client request. * * @param enumerator * The enumerator block. * * @result * Whether the enumeration completed. */ API_AVAILABLE(macos(10.15), ios(13.0), watchos(6.0), tvos(13.0)) bool nw_ws_request_enumerate_subprotocols(nw_ws_request_t request, NW_NOESCAPE nw_ws_subprotocol_enumerator_t enumerator); /*! * @typedef nw_ws_additional_header_enumerator_t * * @abstract * A block that can be applied to every additional header in a client's * WebSocket request. * * @param name * The HTTP name. * * @param value * The HTTP value. */ typedef bool (^nw_ws_additional_header_enumerator_t)(const char *name, const char *value); /*! * @function nw_ws_request_enumerate_additional_headers * * @abstract * Enumerates the list of additional headers on the client's request. * * @param request * The client request. * * @param enumerator * The enumerator block. * * @result * Whether the enumeration completed. */ API_AVAILABLE(macos(10.15), ios(13.0), watchos(6.0), tvos(13.0)) bool nw_ws_request_enumerate_additional_headers(nw_ws_request_t request, NW_NOESCAPE nw_ws_additional_header_enumerator_t enumerator); #endif // __BLOCKS__ # pragma mark - WebSocket Response /*! * @typedef nw_ws_response_t * @abstract * A WebSocket response represents a server's response to a client's request * to connect. * * This type supports ARC and the -[description] method. In non-ARC files, use * nw_retain() and nw_release() to retain and release the object. */ #ifndef NW_WS_RESPONSE_IMPL NW_OBJECT_DECL(nw_ws_response); #endif // NW_WS_RESPONSE_IMPL /*! * @typedef nw_ws_response_status_t * @abstract * The status of a WebSocket server's response to a client's request to * connect. */ typedef enum { /*! * @const nw_ws_response_status_invalid The response is invalid. This should * be treated as an unexpected value. */ nw_ws_response_status_invalid = 0, /*! * @const nw_ws_response_status_accept Accept the request to connect. The * WebSocket connection will begin framing data. */ nw_ws_response_status_accept = 1, /*! * @const nw_ws_response_status_accept Reject the request to connect. The * WebSocket connection will be closed. */ nw_ws_response_status_reject = 2, } nw_ws_response_status_t; /*! * @function nw_ws_response_create * * @abstract * Create a server response to a WebSocket client's opening handshake. * * @param status * The status of the response. If the status is nw_ws_response_status_accept, * the server will accept the handshake and open the WebSocket connection. * If the status is nw_ws_response_status_reject, the server will reject the * handshake and respond with the HTTP error 400 Bad Request. * * @param selected_subprotocol * The server's selected protocol from the client's list of proposed * subprotocols. If the status of this response is nw_ws_response_status_reject, * this parameter is ignored. Pass NULL to indicate the server did not find * a suitable subprotocol, but has accepted the handshake anyways. * Passing an empty string is prohibited by the WebSocket protocol. * * @result * An instantiated WebSocket server response object. */ API_AVAILABLE(macos(10.15), ios(13.0), watchos(6.0), tvos(13.0)) NW_RETURNS_RETAINED nw_ws_response_t nw_ws_response_create(nw_ws_response_status_t status, const char * _Nullable selected_subprotocol); /*! * @function nw_ws_response_get_status * * @abstract * Get the status from a WebSocket server's response. If the response is * nil, the return value will be nw_ws_response_status_invalid. * * @param response * The server response. * * @result * The status of the server's response. */ API_AVAILABLE(macos(10.15), ios(13.0), watchos(6.0), tvos(13.0)) nw_ws_response_status_t nw_ws_response_get_status(nw_ws_response_t _Nullable response); /*! * @function nw_ws_response_get_selected_subprotocol * * @abstract * Get the selected subprotocol from a WebSocket server's response. * * @param response * The server response. * * @result * The status of the server's response. */ API_AVAILABLE(macos(10.15), ios(13.0), watchos(6.0), tvos(13.0)) const char * _Nullable nw_ws_response_get_selected_subprotocol(nw_ws_response_t response); /*! * @function nw_ws_response_add_additional_header * * @abstract * Add additional HTTP headers to be sent back to the WebSocket client in * the server's response. * * @param response * The server response. * * @param name * The HTTP name. * * @param value * The HTTP value. */ API_AVAILABLE(macos(10.15), ios(13.0), watchos(6.0), tvos(13.0)) void nw_ws_response_add_additional_header(nw_ws_response_t response, const char *name, const char *value); /*! * @function nw_ws_metadata_copy_server_response * * @abstract * Copy the WebSocket server's response to a client's request to connect. * If this is called on a WebSocket server, the response object will contain * the server's own response to the client. * * @param metadata * The metadata object representing the WebSocket connection. * * @result * The server response. */ API_AVAILABLE(macos(10.15), ios(13.0), watchos(6.0), tvos(13.0)) NW_RETURNS_RETAINED nw_ws_response_t nw_ws_metadata_copy_server_response(nw_protocol_metadata_t metadata); #ifdef __BLOCKS__ /*! * @function nw_ws_response_enumerate_additional_headers * * @abstract * Enumerates the list of additional headers on the server's response. * * @param response * The server response. * * @param enumerator * The enumerator block. * * @result * Whether the enumeration completed. */ API_AVAILABLE(macos(10.15), ios(13.0), watchos(6.0), tvos(13.0)) bool nw_ws_response_enumerate_additional_headers(nw_ws_response_t response, NW_NOESCAPE nw_ws_additional_header_enumerator_t enumerator); /*! * @typedef nw_ws_client_request_handler_t * * @abstract * A block to be invoked when a WebSocket server receives a WebSocket * client's request to connect. * * @param request * The client request. * * @result * The server response. */ typedef _Nonnull nw_ws_response_t (^nw_ws_client_request_handler_t)(_Nonnull nw_ws_request_t request); /*! * @typedef nw_ws_options_set_client_request_handler * * @abstract * Set callback handler to be invoked when a WebSocket server receives a * WebSocket client's request to connect. * * @param options * The protocol options object. * * @param client_queue * The queue on which the client request handler will be delivered. * * @param handler * The callback handler. */ API_AVAILABLE(macos(10.15), ios(13.0), watchos(6.0), tvos(13.0)) void nw_ws_options_set_client_request_handler(nw_protocol_options_t options, dispatch_queue_t client_queue, nw_ws_client_request_handler_t handler); #endif // __BLOCKS__ NW_ASSUME_NONNULL_END __END_DECLS #endif /* __NW_WS_OPTIONS_H__ */
7,495
2,236
// // TABAnimatedProductImpl.h // AnimatedDemo // // Created by tigerAndBull on 2020/4/1. // Copyright © 2020 tigerAndBull. All rights reserved. // #import <Foundation/Foundation.h> #import "TABAnimatedProductInterface.h" NS_ASSUME_NONNULL_BEGIN @interface TABAnimatedProductImpl : NSObject <TABAnimatedProductInterface> @end NS_ASSUME_NONNULL_END
128
777
// Copyright 2015 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "core/loader/DocumentLoader.h" #include "core/page/Page.h" #include "platform/testing/URLTestHelpers.h" #include "public/platform/Platform.h" #include "public/platform/WebURLLoaderClient.h" #include "public/platform/WebURLLoaderMockFactory.h" #include "public/web/WebCache.h" #include "testing/gtest/include/gtest/gtest.h" #include "web/WebLocalFrameImpl.h" #include "web/tests/FrameTestHelpers.h" #include "wtf/AutoReset.h" #include <queue> namespace blink { // TODO(dcheng): Ideally, enough of FrameTestHelpers would be in core/ that // placing a test for a core/ class in web/ wouldn't be necessary. class DocumentLoaderTest : public ::testing::Test { protected: void SetUp() override { m_webViewHelper.initialize(); URLTestHelpers::registerMockedURLLoad( URLTestHelpers::toKURL("https://example.com/foo.html"), "foo.html"); } void TearDown() override { Platform::current()->getURLLoaderMockFactory()->unregisterAllURLs(); WebCache::clear(); } WebLocalFrameImpl* mainFrame() { return m_webViewHelper.webView()->mainFrameImpl(); } FrameTestHelpers::WebViewHelper m_webViewHelper; }; TEST_F(DocumentLoaderTest, SingleChunk) { class TestDelegate : public WebURLLoaderTestDelegate { public: void didReceiveData(WebURLLoaderClient* originalClient, const char* data, int dataLength) override { EXPECT_EQ(34, dataLength) << "foo.html was not served in a single chunk"; originalClient->didReceiveData(data, dataLength); } } delegate; Platform::current()->getURLLoaderMockFactory()->setLoaderDelegate(&delegate); FrameTestHelpers::loadFrame(mainFrame(), "https://example.com/foo.html"); Platform::current()->getURLLoaderMockFactory()->setLoaderDelegate(nullptr); // TODO(dcheng): How should the test verify that the original callback is // invoked? The test currently still passes even if the test delegate // forgets to invoke the callback. } // Test normal case of DocumentLoader::dataReceived(): data in multiple chunks, // with no reentrancy. TEST_F(DocumentLoaderTest, MultiChunkNoReentrancy) { class TestDelegate : public WebURLLoaderTestDelegate { public: void didReceiveData(WebURLLoaderClient* originalClient, const char* data, int dataLength) override { EXPECT_EQ(34, dataLength) << "foo.html was not served in a single chunk"; // Chunk the reply into one byte chunks. for (int i = 0; i < dataLength; ++i) originalClient->didReceiveData(&data[i], 1); } } delegate; Platform::current()->getURLLoaderMockFactory()->setLoaderDelegate(&delegate); FrameTestHelpers::loadFrame(mainFrame(), "https://example.com/foo.html"); Platform::current()->getURLLoaderMockFactory()->setLoaderDelegate(nullptr); } // Finally, test reentrant callbacks to DocumentLoader::dataReceived(). TEST_F(DocumentLoaderTest, MultiChunkWithReentrancy) { // This test delegate chunks the response stage into three distinct stages: // 1. The first dataReceived() callback, which triggers frame detach due to // commiting a provisional load. // 2. The middle part of the response, which is dispatched to // dataReceived() reentrantly. // 3. The final chunk, which is dispatched normally at the top-level. class TestDelegate : public WebURLLoaderTestDelegate, public FrameTestHelpers::TestWebFrameClient { public: TestDelegate() : m_loaderClient(nullptr), m_dispatchingDidReceiveData(false), m_servedReentrantly(false) {} // WebURLLoaderTestDelegate overrides: void didReceiveData(WebURLLoaderClient* originalClient, const char* data, int dataLength) override { EXPECT_EQ(34, dataLength) << "foo.html was not served in a single chunk"; m_loaderClient = originalClient; for (int i = 0; i < dataLength; ++i) m_data.push(data[i]); { // Serve the first byte to the real WebURLLoaderCLient, which // should trigger frameDetach() due to committing a provisional // load. AutoReset<bool> dispatching(&m_dispatchingDidReceiveData, true); dispatchOneByte(); } // Serve the remaining bytes to complete the load. EXPECT_FALSE(m_data.empty()); while (!m_data.empty()) dispatchOneByte(); } // WebFrameClient overrides: void frameDetached(WebLocalFrame* frame, DetachType detachType) override { if (m_dispatchingDidReceiveData) { // This should be called by the first didReceiveData() call, since // it should commit the provisional load. EXPECT_GT(m_data.size(), 10u); // Dispatch dataReceived() callbacks for part of the remaining // data, saving the rest to be dispatched at the top-level as // normal. while (m_data.size() > 10) dispatchOneByte(); m_servedReentrantly = true; } TestWebFrameClient::frameDetached(frame, detachType); } void dispatchOneByte() { char c = m_data.front(); m_data.pop(); m_loaderClient->didReceiveData(&c, 1); } bool servedReentrantly() const { return m_servedReentrantly; } private: WebURLLoaderClient* m_loaderClient; std::queue<char> m_data; bool m_dispatchingDidReceiveData; bool m_servedReentrantly; } delegate; m_webViewHelper.initialize(false, &delegate); // This doesn't go through the mocked URL load path: it's just intended to // setup a situation where didReceiveData() can be invoked reentrantly. FrameTestHelpers::loadHTMLString(mainFrame(), "<iframe></iframe>", URLTestHelpers::toKURL("about:blank")); Platform::current()->getURLLoaderMockFactory()->setLoaderDelegate(&delegate); FrameTestHelpers::loadFrame(mainFrame(), "https://example.com/foo.html"); Platform::current()->getURLLoaderMockFactory()->setLoaderDelegate(nullptr); EXPECT_TRUE(delegate.servedReentrantly()); // delegate is a WebFrameClient and stack-allocated, so manually reset() the // WebViewHelper here. m_webViewHelper.reset(); } TEST_F(DocumentLoaderTest, isCommittedButEmpty) { WebViewImpl* webViewImpl = m_webViewHelper.initializeAndLoad("about:blank", true); EXPECT_TRUE(toLocalFrame(webViewImpl->page()->mainFrame()) ->loader() .documentLoader() ->isCommittedButEmpty()); } } // namespace blink
2,489
378
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.openejb.server.discovery; import org.apache.openejb.loader.Options; import org.apache.openejb.server.DiscoveryAgent; import org.apache.openejb.server.DiscoveryListener; import org.apache.openejb.server.SelfManaging; import org.apache.openejb.server.ServerService; import org.apache.openejb.server.ServiceException; import org.apache.openejb.util.LogCategory; import org.apache.openejb.util.Logger; import org.apache.openejb.util.OptionsLog; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import java.net.DatagramPacket; import java.net.InetAddress; import java.net.InetSocketAddress; import java.net.MulticastSocket; import java.net.Socket; import java.net.SocketTimeoutException; import java.net.URI; import java.util.Properties; import java.util.Timer; import java.util.TimerTask; import java.util.concurrent.atomic.AtomicBoolean; /** * @version $Rev$ $Date$ */ public class MulticastDiscoveryAgent implements DiscoveryAgent, ServerService, SelfManaging { private static final Logger LOGGER = Logger.getInstance(LogCategory.OPENEJB_SERVER.createChild("discovery").createChild("multicast"), MulticastDiscoveryAgent.class); private final AtomicBoolean running = new AtomicBoolean(false); private String host = "192.168.127.12"; private int port = 6142; private int timeToLive = 1; private boolean loopbackMode = false; private InetSocketAddress address; private long heartRate = 500; private Tracker tracker; private Multicast multicast; @Override public void init(final Properties props) { final Options options = new Options(props); options.setLogger(new OptionsLog(LOGGER)); host = props.getProperty("bind", host); loopbackMode = options.get("loopback_mode", loopbackMode); port = options.get("port", port); heartRate = options.get("heart_rate", heartRate); final Tracker.Builder builder = new Tracker.Builder(); builder.setGroup(props.getProperty("group", builder.getGroup())); builder.setHeartRate(heartRate); builder.setMaxMissedHeartbeats(options.get("max_missed_heartbeats", builder.getMaxMissedHeartbeats())); builder.setMaxReconnectDelay(options.get("max_reconnect_delay", builder.getMaxReconnectDelay())); builder.setReconnectDelay(options.get("reconnect_delay", builder.getReconnectDelay())); builder.setExponentialBackoff(options.get("exponential_backoff", builder.getExponentialBackoff())); builder.setMaxReconnectAttempts(options.get("max_reconnect_attempts", builder.getMaxReconnectAttempts())); tracker = builder.build(); } @Override public String getIP() { return host; } @Override public String getName() { return "multicast"; } @Override public int getPort() { return port; } @Override public void setDiscoveryListener(final DiscoveryListener listener) { this.tracker.setDiscoveryListener(listener); } @Override public void registerService(final URI serviceUri) throws IOException { tracker.registerService(serviceUri); } @Override public void unregisterService(final URI serviceUri) throws IOException { tracker.unregisterService(serviceUri); } @Override public void reportFailed(final URI serviceUri) { tracker.reportFailed(serviceUri); } public static void main(final String[] args) throws Exception { } /** * start the discovery agent * * @throws ServiceException */ @Override public void start() throws ServiceException { try { if (running.compareAndSet(false, true)) { final InetAddress inetAddress = InetAddress.getByName(host); this.address = new InetSocketAddress(inetAddress, port); multicast = new Multicast(tracker); } } catch (Exception e) { throw new ServiceException(e); } } /** * stop the channel * * @throws ServiceException */ @Override public void stop() throws ServiceException { if (running.compareAndSet(true, false)) { multicast.close(); } } @Override public void service(final InputStream in, final OutputStream out) throws ServiceException, IOException { } @Override public void service(final Socket socket) throws ServiceException, IOException { } class Multicast { private static final int BUFF_SIZE = 8192; private final Tracker tracker; private final MulticastSocket multicast; private final Timer timer; private final Thread listenerThread; Multicast(final Tracker tracker) throws IOException { this.tracker = tracker; multicast = new MulticastSocket(port); multicast.setLoopbackMode(loopbackMode); multicast.setTimeToLive(timeToLive); multicast.joinGroup(address.getAddress()); multicast.setSoTimeout((int) heartRate); listenerThread = new Thread(new Listener()); listenerThread.setName("MulticastDiscovery: Listener"); listenerThread.setDaemon(true); listenerThread.start(); final Broadcaster broadcaster = new Broadcaster(); timer = new Timer("MulticastDiscovery: Broadcaster", true); timer.scheduleAtFixedRate(broadcaster, 0, heartRate); } public void close() { timer.cancel(); } class Listener implements Runnable { @Override public void run() { final byte[] buf = new byte[BUFF_SIZE]; final DatagramPacket packet = new DatagramPacket(buf, 0, buf.length); while (running.get()) { tracker.checkServices(); try { multicast.receive(packet); if (packet.getLength() > 0) { final String str = new String(packet.getData(), packet.getOffset(), packet.getLength()); // System.out.println("read = " + str); tracker.processData(str); } } catch (SocketTimeoutException se) { // ignore } catch (IOException e) { if (running.get()) { LOGGER.error("failed to process packet: " + e); } } } } } class Broadcaster extends TimerTask { private IOException failed; @Override public void run() { if (running.get()) { heartbeat(); } } private void heartbeat() { for (final String uri : tracker.getRegisteredServices()) { try { final byte[] data = uri.getBytes(); final DatagramPacket packet = new DatagramPacket(data, 0, data.length, address); // System.out.println("ann = " + uri); multicast.send(packet); } catch (IOException e) { // If a send fails, chances are all subsequent sends will fail // too.. No need to keep reporting the // same error over and over. if (failed == null) { failed = e; LOGGER.error("Failed to advertise our service: " + uri, e); final String message = e.getMessage(); if (null != message && message.toLowerCase().contains("operation not permitted")) { LOGGER.error("The 'Operation not permitted' error has been know to be caused by improper firewall/network setup. " + "Please make sure that the OS is properly configured to allow multicast traffic over: " + multicast.getLocalAddress()); } } } } } } } public String getHost() { return host; } public void setHost(final String host) { this.host = host; } public boolean isLoopbackMode() { return loopbackMode; } public void setLoopbackMode(final boolean loopbackMode) { this.loopbackMode = loopbackMode; } public int getTimeToLive() { return timeToLive; } public void setTimeToLive(final int timeToLive) { this.timeToLive = timeToLive; } }
4,116
892
{ "schema_version": "1.2.0", "id": "GHSA-ggx4-p7x5-97cc", "modified": "2022-05-13T01:35:22Z", "published": "2022-05-13T01:35:22Z", "aliases": [ "CVE-2018-0342" ], "details": "A vulnerability in the configuration and monitoring service of the Cisco SD-WAN Solution could allow an authenticated, local attacker to execute arbitrary code with root privileges or cause a denial of service (DoS) condition on an affected device. The vulnerability is due to incomplete bounds checks for data that is provided by the configuration and monitoring service of the affected solution. An attacker could exploit this vulnerability by sending malicious data to the vDaemon listening service on an affected device. A successful exploit could allow the attacker to cause a buffer overflow condition on the affected device, which could allow the attacker to execute arbitrary code with root privileges on the device or cause the vDaemon listening service to reload and result in a DoS condition on the device. This vulnerability affects the following Cisco products if they are running a release of the Cisco SD-WAN Solution prior to Release 18.3.0: vBond Orchestrator Software, vEdge 100 Series Routers, vEdge 1000 Series Routers, vEdge 2000 Series Routers, vEdge 5000 Series Routers, vEdge Cloud Router Platform, vManage Network Management Software, vSmart Controller Software. Cisco Bug IDs: CSCvi70003.", "severity": [ { "type": "CVSS_V3", "score": "CVSS:3.0/AV:L/AC:L/PR:H/UI:N/S:U/C:H/I:H/A:H" } ], "affected": [ ], "references": [ { "type": "ADVISORY", "url": "https://nvd.nist.gov/vuln/detail/CVE-2018-0342" }, { "type": "WEB", "url": "https://tools.cisco.com/security/center/content/CiscoSecurityAdvisory/cisco-sa-20180718-sd-wan-bo" }, { "type": "WEB", "url": "http://www.securityfocus.com/bid/104877" } ], "database_specific": { "cwe_ids": [ "CWE-119" ], "severity": "HIGH", "github_reviewed": false } }
686
764
{"symbol": "PAINT","address": "0x4C6eC08CF3fc987c6C4BEB03184D335A2dFc4042","overview":{"en": "MurAll is a 2048x1024px digital mural that anyone can draw on without restriction. MurAll requires PAINT - a fixed supply token synonymous with real-life paint, which is burned in the painting transaction. Painting on MurAll also mints a MURALL NFT of the artwork for the artist to keep."},"email": "<EMAIL>","website": "https://murall.art/","state": "NORMAL","links": {"blog": "https://murall.medium.com/","twitter": "https://twitter.com/MurAll_art","telegram": "","github": "https://github.com/MurAll-canvas/"}}
189
716
import unittest from test_case import PinocchioTestCase as TestCase import pinocchio as pin import numpy as np import pickle class TestStdMap(TestCase): def setUp(self): pass def test_pickle(self): map = pin.StdMap_String_VectorXd() keys = [] for k in range(100): key_name = 'key_' + str(k+1) keys.append(key_name) map[key_name] = np.random.rand((10)) pickle.dump( map, open( "save_std_map.p", "wb" ) ) map_loaded = pickle.load( open( "save_std_map.p", "rb" ) ) for key in keys: self.assertApprox(map[key],map_loaded[key]) if __name__ == '__main__': unittest.main()
329
14,668
<filename>third_party/android_crazy_linker/src/src/crazy_linker_elf_loader.h // Copyright 2014 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef CRAZY_LINKER_ELF_LOADER_H #define CRAZY_LINKER_ELF_LOADER_H #include "crazy_linker_error.h" #include "crazy_linker_load_params.h" #include "crazy_linker_memory_mapping.h" #include "crazy_linker_system.h" // For ScopedFileDescriptor #include "elf_traits.h" namespace crazy { // Helper class used to load an ELF binary in memory. // // Note that this doesn't not perform any relocation, the purpose // of this class is strictly to map all loadable segments from the // file to their correct location. // class ElfLoader { public: // Result of the LoadAt method. In case of failure, an invalid instance // will be returned. struct Result { ELF::Addr load_start = 0; ELF::Addr load_size = 0; ELF::Addr load_bias = 0; const ELF::Phdr* phdr = nullptr; size_t phdr_count = 0; MemoryMapping reserved_mapping; constexpr bool IsValid() const { return this->load_start != 0; } }; // Try to load a library at a given address. On failure, return an // invalid Result instance, and sets |*error|. static Result LoadAt(const LoadParams& params, Error* error); }; } // namespace crazy #endif // CRAZY_LINKER_ELF_LOADER_H
479
330
<gh_stars>100-1000 { "acno": "N02296", "acquisitionYear": 1908, "all_artists": "<NAME>", "catalogueGroup": {}, "classification": "on paper, unique", "contributorCount": 1, "contributors": [ { "birthYear": 1845, "date": "1845\u20131932", "displayOrder": 1, "fc": "<NAME>", "gender": "Male", "id": 215, "mda": "<NAME>", "role": "artist", "startLetter": "G" } ], "creditLine": "Presented by <NAME> 1908", "dateRange": null, "dateText": "date not known", "depth": "", "dimensions": "support: 257 x 365 mm", "foreignTitle": null, "groupTitle": null, "height": "365", "id": 5507, "inscription": null, "medium": "Gouache on paper", "movementCount": 0, "subjectCount": 0, "thumbnailCopyright": null, "thumbnailUrl": null, "title": "The Gate of the Pass, Maloja", "units": "mm", "url": "http://www.tate.org.uk/art/artworks/goodwin-the-gate-of-the-pass-maloja-n02296", "width": "257" }
464
315
<gh_stars>100-1000 /*! * \file * \brief Class module::Decoder_RSC_BCJR_seq. */ #ifndef DECODER_RSC_BCJR_SEQ_HPP_ #define DECODER_RSC_BCJR_SEQ_HPP_ #include <vector> #include <mipp.h> #include "Module/Decoder/RSC/BCJR/Decoder_RSC_BCJR.hpp" namespace aff3ct { namespace module { template <typename B = int, typename R = float> class Decoder_RSC_BCJR_seq : public Decoder_RSC_BCJR<B,R> { protected: mipp::vector<R> alpha[8]; // node metric (left to right) mipp::vector<R> beta [8]; // node metric (right to left) mipp::vector<R> gamma[2]; // edge metric Decoder_RSC_BCJR_seq(const int &K, const std::vector<std::vector<int>> &trellis, const bool buffered_encoding = true); virtual ~Decoder_RSC_BCJR_seq() = default; virtual Decoder_RSC_BCJR_seq<B,R>* clone() const; }; } } #ifndef DOXYGEN_SHOULD_SKIP_THIS #include "Module/Decoder/RSC/BCJR/Seq/Decoder_RSC_BCJR_seq.hxx" #endif #endif /* DECODER_RSC_BCJR_SEQ_HPP_ */
448
4,640
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file topi/einsum.h * \brief Einstein summation op */ #ifndef TVM_TOPI_EINSUM_H_ #define TVM_TOPI_EINSUM_H_ #define LABELRANGE 128 #define NPY_MAXDIMS 16 #define NPY_MAXARGS 16 #include <tvm/te/operation.h> #include <tvm/tir/data_layout.h> #include <tvm/topi/detail/constant_utils.h> #include <tvm/topi/detail/ravel_unravel.h> #include <tvm/topi/detail/tensor_utils.h> #include <tvm/topi/tags.h> #include <algorithm> #include <bitset> #include <iterator> #include <string> #include <tuple> #include <unordered_set> #include <vector> namespace tvm { namespace topi { using namespace tvm::te; using namespace topi::detail; /*! * \brief Compute the stride of the given shape. * * \param shape for the operation. * * \return the stride of the shape. */ inline Array<PrimExpr> GetStride(const Array<PrimExpr> shape) { size_t ndim = shape.size(); int prod = 1; Array<PrimExpr> stride = Array<PrimExpr>(ndim, -1); for (int i = ndim - 1; i >= 0; i--) { stride.Set(i, if_then_else(shape[i] > 1, prod, 0)); prod = prod * GetConstInt(shape[i]); } return stride; } /*! * \brief Pad the shape with 1. * * \param shape the input shape to be padded * \param odim the padding size of the objective shape. * * \return the padded shape. */ inline Array<PrimExpr> Pad(const Array<PrimExpr> shape, int odim) { int ndim = shape.size(); CHECK_GE(odim, ndim); Array<PrimExpr> ret(static_cast<size_t>(odim), 1); for (int idim = 0; idim < ndim; ++idim) { ret.Set(idim, shape[idim]); } return ret; } /*! * \brief Parse the subscripts for one operand into an output of 'ndim' labels. * * \param subscripts the subscripts for to be parsed. * \param length subscripts[0: length] represents the current operand. * \param ndim the ndim of current operand. * \param iop the index of the operand. * \param op_labels the parsing result. * For Example: * subscripts="abbcbc", ndim=6 -> op_labels=[97, 98, -1, 99, -3, -2]. * subscripts="ab...bc", ndim=6 -> op_labels=[97, 98, 0, 0, -3, 99]. * \param label_counts Count the number the label appears. * \param min_label Save the minimal label according to ASCII. * \param max_label Save the maximal label according to ASCII. * * \return 0. */ inline int ParseOperandSubscripts(const char* subscripts, int length, int ndim, int iop, char* op_labels, char* label_counts, int* min_label, int* max_label) { int i; int idim = 0; int ellipsis = -1; /* Process all labels for this operand */ for (i = 0; i < length; ++i) { int label = subscripts[i]; /* A proper label for an axis. */ if (label > 0 && isalpha(label)) { /* Check we don't exceed the operator dimensions. */ CHECK(idim < ndim) << "einstein sum subscripts string contains " << "too many subscripts for operand " << iop; op_labels[idim++] = label; if (label < *min_label) { *min_label = label; } if (label > *max_label) { *max_label = label; } label_counts[label]++; } else if (label == '.') { /* The beginning of the ellipsis. */ /* Check it's a proper ellipsis. */ CHECK( !(ellipsis != -1 || i + 2 >= length || subscripts[++i] != '.' || subscripts[++i] != '.')) << "einstein sum subscripts string contains a " << "'.' that is not part of an ellipsis ('...') " << "in operand " << iop; ellipsis = idim; } else { CHECK(label == ' ') << "invalid subscript '" << static_cast<char>(label) << "' in einstein sum " << "subscripts string, subscripts must " << "be letters"; } } /* No ellipsis found, labels must match dimensions exactly. */ if (ellipsis == -1) { CHECK(idim == ndim) << "operand has more dimensions than subscripts " << "given in einstein sum, but no '...' ellipsis " << "provided to broadcast the extra dimensions."; } else if (idim < ndim) { /* Ellipsis found, may have to add broadcast dimensions. */ /* Move labels after ellipsis to the end. */ for (i = 0; i < idim - ellipsis; ++i) { op_labels[ndim - i - 1] = op_labels[idim - i - 1]; } /* Set all broadcast dimensions to zero. */ for (i = 0; i < ndim - idim; ++i) { op_labels[ellipsis + i] = 0; } } /* * Find any labels duplicated for this operand, and turn them * into negative offsets to the axis to merge with. * * In C, the char type may be signed or unsigned, but with * twos complement arithmetic the char is ok either way here, and * later where it matters the char is cast to a signed char. */ for (idim = 0; idim < ndim - 1; ++idim) { int label = op_labels[idim]; /* If it is a proper label, find any duplicates of it. */ if (label > 0) { /* Search for the next matching label. */ char* next = reinterpret_cast<char*>(memchr(op_labels + idim + 1, label, ndim - idim - 1)); while (next != nullptr) { /* The offset from next to op_labels[idim] (negative). */ *next = static_cast<char>((op_labels + idim) - next); /* Search for the next matching label. */ next = reinterpret_cast<char*>(memchr(next + 1, label, op_labels + ndim - 1 - next)); } } } return 0; } /*! * \brief Parse the subscripts for the output into an output that includes 'ndim_broadcast' * unlabeled dimensions. * * \param subscripts the subscripts for to be parsed. * \param length subscripts[0: length] represents the output operand. * \param ndim_broadcast the broadcast dimension number. * \param label_counts Count the number the label appears. * \param out_labels similar to the op_labels in ParseOperandSubscripts, for each * dimension, the ASCII code of the corresponding label. zero for the broadcasting dim. * * \return the total number of output dimensions or -1 if there is an error. */ inline int ParseOutputSubscripts(const char* subscripts, int length, int ndim_broadcast, const char* label_counts, char* out_labels) { int i, bdim; int ndim = 0; int ellipsis = 0; /* Process all the output labels. */ for (i = 0; i < length; ++i) { int label = subscripts[i]; /* A proper label for an axis. */ if (label > 0 && isalpha(label)) { /* Check that it doesn't occur again. */ CHECK(memchr(subscripts + i + 1, label, length - i - 1) == nullptr) << "einstein sum subscripts string includes " << "output subscript '" << static_cast<char>(label) << "' multiple times"; /* Check that it was used in the inputs. */ CHECK(label_counts[label] != 0) << "einstein sum subscripts string included " << "output subscript '" << static_cast<char>(label) << "' which never appeared " << "in an input"; /* Check that there is room in out_labels for this label. */ CHECK(ndim < NPY_MAXDIMS) << "einstein sum subscripts string contains " << "too many subscripts in the output"; out_labels[ndim++] = label; } else if (label == '.') { /* The beginning of the ellipsis. */ /* Check it is a proper ellipsis. */ CHECK(!(ellipsis || i + 2 >= length || subscripts[++i] != '.' || subscripts[++i] != '.')) << "einstein sum subscripts string " << "contains a '.' that is not part of " << "an ellipsis ('...') in the output"; /* Check there is room in out_labels for broadcast dims. */ CHECK(ndim + ndim_broadcast <= NPY_MAXDIMS) << "einstein sum subscripts string contains " << "too many subscripts in the output"; ellipsis = 1; for (bdim = 0; bdim < ndim_broadcast; ++bdim) { out_labels[ndim++] = 0; } } else { CHECK(label == ' ') << "invalid subscript '" << static_cast<char>(label) << "' in einstein sum " << "subscripts string, subscripts must " << "be letters"; } } /* If no ellipsis was found there should be no broadcast dimensions. */ CHECK(!(!ellipsis && ndim_broadcast > 0)) << "output has more dimensions than subscripts " << "given in einstein sum, but no '...' ellipsis " << "provided to broadcast the extra dimensions."; return ndim; } /*! * \brief If any dimensions are combined, create a view that combines them. * Shows in newshape and newstride. * * \param op the operand tensor. * \param iop the index of the operand. * \param labels the op_labels fot the operand. Like [97, 98, -2] for "aba". * \param newshape The combined shape. * \param newstride The combined stride. * * For example: * "aba -> ab", shape = [2,3,2] stride = [6,2,1] * op_labels = [97, 98, -2], newshape = [2,3], newstride = [7,2] */ inline void GetCombinedDimsView(const Tensor& op, int iop, char* labels, Array<PrimExpr>* newshape, Array<PrimExpr>* newstride) { int idim, ndim, icombine, combineoffset; int icombinemap[NPY_MAXDIMS]; int newdim; Array<PrimExpr> shape = op->shape; Array<PrimExpr> stride = GetStride(shape); ndim = op.ndim(); newdim = newshape->size(); /* Initialize the dimensions and strides to zero */ for (idim = 0; idim < newdim; ++idim) { newshape->Set(idim, 0); newstride->Set(idim, 0); } /* Copy the dimensions and strides, except when collapsing */ icombine = 0; for (idim = 0; idim < ndim; ++idim) { /* * The char type may be either signed or unsigned, we * need it to be signed here. */ int label = (signed char)labels[idim]; /* If this label says to merge axes, get the actual label */ if (label < 0) { combineoffset = label; label = labels[idim + label]; } else { combineoffset = 0; if (icombine != idim) { labels[icombine] = labels[idim]; } icombinemap[idim] = icombine; } /* If the label is 0, it's an unlabeled broadcast dimension */ if (label == 0) { newshape->Set(icombine, shape[idim]); newstride->Set(icombine, stride[idim]); } else { /* Update the combined axis dimensions and strides */ int i = icombinemap[idim + combineoffset]; CHECK(!((combineoffset < 0) && GetConstInt((*newshape)[i] != 0 && (*newshape)[i] != shape[idim]))) << "dimensions in operand " << iop << " for collapsing index '" << label << "' don't match (" << GetConstInt((*newshape)[i]) << " != " << shape[idim] << ")"; newshape->Set(i, shape[idim]); newstride->Set(i, (*newstride)[i] + stride[idim]); } /* If the label didn't say to combine axes, increment dest i */ if (combineoffset == 0) { icombine++; } } } /*! * \brief Prepare the operand axes to match each stride or shape pair. * * \param ndim the ndim of the operand tensor. * \param iop the index of the operand. * \param labels the op_labels fot the operand. [97, 98, -1, 99, -3, -2] for "abbcbc". * \param axes The matched axes to be calculated. * \param ndim_iter the dimension of iterating. Subscripts "ab, bc -> ac" ndim_iter = 3. * \param iter_labels output_labels with the iterating label. ['a', 'c', 'b'] for the case above. */ inline static int PrepareOpAxes(int ndim, int iop, char* labels, int* axes, int ndim_iter, char* iter_labels) { int i, label, ibroadcast; ibroadcast = ndim - 1; for (i = ndim_iter - 1; i >= 0; --i) { label = iter_labels[i]; /* * If it's an unlabeled broadcast dimension, choose * the next broadcast dimension from the operand. */ if (label == 0) { while (ibroadcast >= 0 && labels[ibroadcast] != 0) { --ibroadcast; } /* * If we used up all the operand broadcast dimensions, * extend it with a "newaxis" */ if (ibroadcast < 0) { axes[i] = -1; } else { /* Otherwise map to the broadcast axis */ axes[i] = ibroadcast; --ibroadcast; } } else { /* It's a labeled dimension, find the matching one */ char* match = reinterpret_cast<char*>(memchr(labels, label, ndim)); /* If the op doesn't have the label, broadcast it */ if (match == nullptr) { axes[i] = -1; } else { /* Otherwise use it */ axes[i] = match - labels; } } } return 0; } /*! * \brief Count SubString. * \param str the object string * \param sub the pattern string * * \return number of substring */ inline int CountSubstring(const std::string& str, const std::string& sub) { int count = 0; std::string::size_type pos = 0; while ((pos = str.find(sub, pos)) != std::string::npos) { ++count; pos += sub.length(); } return count; } /*! * \brief Transfer string to. * \param str input string. * * \return bitset. */ inline std::bitset<LABELRANGE> Str2Set(const std::string& str) { std::bitset<LABELRANGE> ret; for (const char& c : str) { ret.set(static_cast<int>(c)); } return ret; } /*! * \brief Split str according to substring. * \param str input string. * \param sub the split pattern string. * * \return vector contains the splited substring. */ inline std::vector<std::string> Split(const std::string& str, const std::string& sub) { std::string::size_type pos = 0; std::string::size_type start = 0; std::vector<std::string> ret; while ((pos = str.find(sub, start)) != std::string::npos) { ret.push_back(str.substr(start, pos - start)); start = pos + sub.length(); } ret.push_back(str.substr(start)); return ret; } /*! * \brief Parse the input subscripts into a vector of strings. * \param subscripts input subscripts. * \param operands operand tensors. * * \return vector of strings, vector[0] represents the input part, vector[1] represents the output. * if no output, the vector[1] is NULL. * "ab, bc -> ac" => ["ab,bc", "ac"] */ inline std::tuple<std::string, std::string> ParseEinsumInput( std::string subscripts, const std::vector<Array<PrimExpr>>& operands) { const std::string einsum_symbols = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"; std::bitset<LABELRANGE> einsum_symbols_set; for (const char& c : einsum_symbols) { einsum_symbols_set.set(c); } CHECK_NE(operands.size(), 0U) << "No input operands"; auto end_pos = std::remove(subscripts.begin(), subscripts.end(), ' '); subscripts.erase(end_pos, subscripts.end()); // Ensure all characters are valid for (const char& c : subscripts) { if (c == '.' || c == ',' || c == '-' || c == '>') { continue; } CHECK(einsum_symbols_set.test(c)) << "Character " << c << " is not a valid symbol."; } // Check for proper "->" if (subscripts.find('-') != std::string::npos || subscripts.find('>') != std::string::npos) { bool invalid = (std::count(subscripts.begin(), subscripts.end(), '-') > 1 || std::count(subscripts.begin(), subscripts.end(), '>') > 1); CHECK(!invalid && CountSubstring(subscripts, "->") == 1) << "Subscripts can only contain one '->'."; } // Parse ellipses if (subscripts.find('.') != std::string::npos) { std::string used = subscripts; used.erase( std::remove_if(used.begin(), used.end(), [](const char& c) { return c == '.' || c == ',' || c == '-' || c == '>'; }), used.end()); std::bitset<LABELRANGE> used_set = Str2Set(used); std::string ellipse_inds = ""; for (const char& c : einsum_symbols) { if (!used_set.test(static_cast<int>(c))) { ellipse_inds.append(1, c); } } int longest = 0; std::string input_tmp, output_sub; std::vector<std::string> split_subscripts; bool out_sub; if (subscripts.find("->") != std::string::npos) { std::vector<std::string> tmp = Split(subscripts, "->"); input_tmp = tmp[0]; output_sub = tmp[1]; split_subscripts = Split(input_tmp, ","); out_sub = true; } else { split_subscripts = Split(subscripts, ","); out_sub = false; } size_t size_split_subscripts = split_subscripts.size(); subscripts = ""; for (size_t i = 0; i < size_split_subscripts; ++i) { const std::string& sub = split_subscripts[i]; if (sub.find('.') != std::string::npos) { CHECK_EQ(std::count(sub.begin(), sub.end(), '.'), 3) << "Invalid Ellipses"; CHECK_EQ(CountSubstring(sub, "..."), 1) << "Invalid Ellipses"; // Take into account numerical values int ellipse_count = 0; if (operands[i].size() == 0) { ellipse_count = 0; } else { ellipse_count = std::max(operands[i].size(), static_cast<size_t>(1)); ellipse_count -= sub.length() - 3; } if (ellipse_count > longest) { longest = ellipse_count; } CHECK_GE(ellipse_count, 0) << "Ellipses lengths do not match."; if (ellipse_count == 0) { split_subscripts[i].erase(sub.find("..."), 3); } else { std::string rep_inds = ellipse_inds.substr(ellipse_inds.length() - ellipse_count); split_subscripts[i].replace(sub.find("..."), 3, rep_inds); } } subscripts += split_subscripts[i]; if (i + 1 < size_split_subscripts) { subscripts += ","; } } std::string out_ellipse; if (longest == 0) { out_ellipse = ""; } else { out_ellipse = ellipse_inds.substr(ellipse_inds.length() - longest); } if (out_sub) { output_sub.replace(output_sub.find("..."), 3, out_ellipse); subscripts += "->" + output_sub; } else { // Special care for outputless ellipses std::bitset<LABELRANGE> out_ellipse_set = Str2Set(out_ellipse); std::string tmp_subscripts = subscripts, output_subscript = ""; size_t len_tmp_subscripts = tmp_subscripts.length(); std::sort(tmp_subscripts.begin(), tmp_subscripts.end()); for (size_t i = 0; i < len_tmp_subscripts; ++i) { const char& c = tmp_subscripts[i]; if (c == ',') { continue; } CHECK(einsum_symbols_set.test(c)) << "Character " << c << " is not a valid symbol."; if ((i == 0 || tmp_subscripts[i - 1] != c) && (i == len_tmp_subscripts - 1 || tmp_subscripts[i + 1] != c) && !out_ellipse_set.test(c)) { output_subscript.append(1, c); } } subscripts += "->" + out_ellipse + output_subscript; } } // Build output string if does not exist std::tuple<std::string, std::string> ret; if (subscripts.find("->") != std::string::npos) { std::vector<std::string> tmp(2); tmp = Split(subscripts, "->"); ret = std::make_tuple(tmp[0], tmp[1]); } else { std::string first = subscripts; std::string second = ""; // Build output subscripts std::string tmp_subscripts = subscripts; size_t len_tmp_subscripts = tmp_subscripts.length(); std::sort(tmp_subscripts.begin(), tmp_subscripts.end()); for (size_t i = 0; i < len_tmp_subscripts; ++i) { const char& c = tmp_subscripts[i]; if (c == ',') { continue; } CHECK(einsum_symbols_set.test(c)) << "Character " << c << " is not a valid symbol."; if ((i == 0 || tmp_subscripts[i - 1] != c) && (i == len_tmp_subscripts - 1 || tmp_subscripts[i + 1] != c)) { second.append(1, c); } } ret = std::make_tuple(first, second); } // Make sure output subscripts are in the input std::bitset<LABELRANGE> input_subscripts_set = Str2Set(std::get<0>(ret)); for (const char& c : std::get<1>(ret)) { CHECK(input_subscripts_set.test(c)) << "Output character " << c << " did not appear in the input"; } // Make sure number operands is equivalent to the number of terms CHECK_EQ(std::count(std::get<0>(ret).begin(), std::get<0>(ret).end(), ',') + 1, operands.size()) << "Number of einsum subscripts must be equal to the " << "number of operands."; return ret; } /*! * \brief Compute the shape of the output. * \param subscripts input subscripts. * \param operands operand tensors. * * \return the shape of the output. */ inline Array<PrimExpr> NumpyEinsumShape(const std::string subscripts, const std::vector<Array<PrimExpr>>& operands) { // Parsing std::tuple<std::string, std::string> parsed_subscripts = ParseEinsumInput(subscripts, operands); // Build a few useful list and sets std::vector<std::string> input_list = Split(std::get<0>(parsed_subscripts), ","); size_t isize = input_list.size(); // Get length of each unique dimension and ensure all dimensions are correct int dimension_dict[LABELRANGE]; memset(dimension_dict, -1, sizeof(dimension_dict)); for (size_t i = 0; i < isize; ++i) { const std::string& term = input_list[i]; const Array<PrimExpr>& sh = operands[i]; CHECK_EQ(sh.size(), term.length()) << "Einstein sum subscript " << input_list[i] << " does not contain the " << "correct number of indices for operand " << i << "."; size_t len_term = term.length(); for (size_t j = 0; j < len_term; ++j) { int64_t dim = GetConstInt(sh[j]); const char& c = term[j]; if (dimension_dict[static_cast<int>(c)] != -1) { // For broadcasting cases we always want the largest dim size if (dimension_dict[static_cast<int>(c)] == 1) { dimension_dict[static_cast<int>(c)] = dim; } CHECK(dim == 1 || dim == dimension_dict[static_cast<int>(c)]) << "Size of label '" << c << "' for operand " << i << " (" << dimension_dict[static_cast<int>(c)] << ") does not match previous terms (" << dim << ")."; } else { dimension_dict[static_cast<int>(c)] = dim; } } } // Get oshape const std::string& output_str = std::get<1>(parsed_subscripts); size_t odim = output_str.size(); Array<PrimExpr> oshape(odim, -1); for (size_t i = 0; i < odim; ++i) { oshape.Set(i, dimension_dict[static_cast<int>(output_str[i])]); } // Neglecting oshape assign check temporally return oshape; } /*! * \brief Evaluates the Einstein summation convention on the operands. * * \param subscripts_str Specifies the subscripts for summation as comma separated list of * subscript labels. * \param inputs Arrays for the operation. * \param name The name of the operation. * \param tag The tag to mark the operation. * * \return The calculation based on the Einstein summation convention. */ inline Tensor einsum(const std::string& subscripts_str, const Array<Tensor> inputs, std::string name = "T_einsum", std::string tag = kEinsum) { bool back = false; const char* subscripts = subscripts_str.data(); const char* head = subscripts; const int nop = inputs.size(); /* Step 1: Parse the subscripts string into label_counts and op_labels */ int iop, idim, min_label = LABELRANGE - 1, max_label = 0; char label_counts[LABELRANGE], op_labels[NPY_MAXARGS][NPY_MAXDIMS]; memset(label_counts, 0, sizeof(label_counts)); for (iop = 0; iop < nop; ++iop) { int length = static_cast<int>(strcspn(subscripts, ",-")); CHECK(!(iop == nop - 1 && subscripts[length] == ',')) << "more operands provided to einstein sum function " << "than specified in the subscripts string"; CHECK(!(iop < nop - 1 && subscripts[length] != ',')) << "fewer operands provided to einstein sum function " << "than specified in the subscripts string"; CHECK_EQ(ParseOperandSubscripts(subscripts, length, inputs[iop + back].ndim(), iop, op_labels[iop], label_counts, &min_label, &max_label), 0); /* Move subscripts to the start of the labels for the next op */ subscripts += length; if (iop < nop - 1) { CHECK_LT(subscripts - head, subscripts_str.length()) << "subscripts out of range"; subscripts++; } } /* * Find the number of broadcast dimensions, which is the maximum * number of labels == 0 in an op_labels array. */ int ndim_broadcast = 0; for (iop = 0; iop < nop; ++iop) { int count_zeros = 0; int ndim; char* labels = op_labels[iop]; ndim = inputs[iop + back].ndim(); for (idim = 0; idim < ndim; ++idim) { if (labels[idim] == 0) { ++count_zeros; } } if (count_zeros > ndim_broadcast) { ndim_broadcast = count_zeros; } } /* * If there is no output signature, fill output_labels and ndim_output * using each label that appeared once, in alphabetical order. */ int label, ndim_output; char output_labels[NPY_MAXDIMS]; if (subscripts[0] == '\0') { /* If no output was specified, always broadcast left, as usual. */ for (ndim_output = 0; ndim_output < ndim_broadcast; ++ndim_output) { output_labels[ndim_output] = 0; } for (label = min_label; label <= max_label; ++label) { if (label_counts[label] == 1) { CHECK(ndim_output < NPY_MAXDIMS) << "einstein sum subscript string has too many " << "distinct labels"; output_labels[ndim_output++] = label; } } } else { CHECK(subscripts[0] == '-' && subscripts[1] == '>') << "einstein sum subscript string does not " << "contain proper '->' output specified"; subscripts += 2; /* Parse the output subscript string. */ ndim_output = ParseOutputSubscripts(subscripts, strlen(subscripts), ndim_broadcast, label_counts, output_labels); CHECK_GE(ndim_output, 0); } /* * Step 2: * Process all the input ops, combining dimensions into their * diagonal where specified. */ std::vector<Array<PrimExpr>> opshape(nop), opstride_true(nop); for (iop = 0; iop < nop; ++iop) { char* labels = op_labels[iop]; int combine, ndim; ndim = inputs[iop + back].ndim(); /* * Check whether any dimensions need to be combined * * The char type may be either signed or unsigned, we * need it to be signed here. */ combine = 0; for (idim = 0; idim < ndim; ++idim) { if ((signed char)labels[idim] < 0) { combine++; } } /* If any dimensions are combined, create a view which combines them */ if (combine) { Array<PrimExpr> tshape(static_cast<size_t>(ndim - combine), -1); Array<PrimExpr> tstride(static_cast<size_t>(ndim - combine), -1); GetCombinedDimsView(inputs[iop + back], iop, labels, &tshape, &tstride); opshape[iop] = tshape; opstride_true[iop] = tstride; } else { /* No combining needed */ opshape[iop] = inputs[iop + back]->shape; opstride_true[iop] = GetStride(opshape[iop]); } } /* * Step 3: * Set up the labels for the iterator (output + combined labels). * Can just share the output_labels memory, because iter_labels * is output_labels with some more labels appended. */ char* iter_labels = output_labels; int ndim_iter = ndim_output; for (label = min_label; label <= max_label; ++label) { if (label_counts[label] > 0 && memchr(output_labels, label, ndim_output) == nullptr) { CHECK(ndim_iter < NPY_MAXDIMS) << "too many subscripts in einsum"; iter_labels[ndim_iter++] = label; } } /* Step 4: Set up the op_axes for the iterator */ Array<PrimExpr> itershape(static_cast<size_t>(ndim_iter), -1); std::vector<Array<PrimExpr>> iterstride(nop + 1, Array<PrimExpr>(static_cast<size_t>(ndim_iter), 0)); // output_shape std::vector<Array<PrimExpr>> operands; for (size_t i = 0; i < inputs.size(); i++) { operands.push_back(inputs[i]->shape); } Array<PrimExpr> oshape = NumpyEinsumShape(subscripts_str, operands); Array<PrimExpr> ostride_true = GetStride(oshape); Array<PrimExpr> reduceshape; std::vector<Array<PrimExpr>> remainshape(nop); int op_axes_arrays[NPY_MAXARGS][NPY_MAXDIMS]; int* op_axes[NPY_MAXARGS]; for (iop = 0; iop < nop; ++iop) { op_axes[iop] = op_axes_arrays[iop]; CHECK_GE(PrepareOpAxes(opshape[iop].size(), iop, op_labels[iop], op_axes[iop], ndim_iter, iter_labels), 0); for (idim = 0; idim < ndim_iter; idim++) { if (op_axes[iop][idim] != -1) { iterstride[iop].Set(idim, opstride_true[iop][op_axes[iop][idim]]); if (GetConstInt(itershape[idim]) != -1) { if (GetConstInt(itershape[idim]) == 1) { itershape.Set(idim, opshape[iop][op_axes[iop][idim]]); } } else { itershape.Set(idim, opshape[iop][op_axes[iop][idim]]); } } } } for (idim = 0; idim < ndim_output; ++idim) { iterstride[nop].Set(idim, ostride_true[idim]); } reduceshape = Array<PrimExpr>(static_cast<size_t>(ndim_iter - ndim_output), 0); for (idim = ndim_output; idim < ndim_iter; ++idim) { reduceshape.Set(idim - ndim_output, itershape[idim]); } for (iop = 0; iop < nop; iop++) { Array<Integer> rsh; for (idim = 0; idim < ndim_iter; idim++) { if (op_axes_arrays[iop][idim] == -1) { rsh.push_back(GetConstInt(itershape[idim])); } else { if (GetConstInt(itershape[idim] != opshape[iop][op_axes_arrays[iop][idim]])) { rsh.push_back(GetConstInt(itershape[idim])); } } } remainshape[iop] = Array<PrimExpr>(rsh.begin(), rsh.end()); } // exclude the 0-dim case if (ndim_iter == 0) { ndim_iter = 1; } itershape = Pad(itershape, ndim_iter); for (iop = 0; iop <= nop; ++iop) { iterstride[iop] = Pad(iterstride[iop], ndim_iter); } // oshape = Pad(oshape, ndim_iter); reduceshape = Pad(reduceshape, ndim_iter); for (iop = 0; iop < nop; ++iop) { opshape[iop] = Pad(opshape[iop], ndim_iter); remainshape[iop] = Pad(remainshape[iop], ndim_iter); } // ostride and rstride Array<Array<PrimExpr>> ostride; Array<Array<PrimExpr>> rstride; for (iop = 0; iop < nop; ++iop) { Array<PrimExpr> otmp(static_cast<size_t>(ndim_iter), 0); Array<PrimExpr> rtmp(static_cast<size_t>(ndim_iter), 0); for (idim = 0; idim < ndim_iter; ++idim) { otmp.Set(idim, idim < ndim_output ? iterstride[iop][idim] : 1); rtmp.Set(idim, idim < ndim_iter - ndim_output ? iterstride[iop][idim + ndim_output] : 1); } ostride.push_back(otmp); rstride.push_back(rtmp); } // func: input indices => return cooresponding value auto func = [inputs, oshape, ostride, reduceshape, ndim_iter, rstride, nop](const Array<Var>& input_indices) -> PrimExpr { for (int rdim = 0; rdim < ndim_iter; ++rdim) { if (GetConstInt(reduceshape[rdim]) == 0) { return 0; // } } Array<PrimExpr> ridx = UnravelIndex(0, reduceshape); PrimExpr sum = 0; bool rec_flag = false; do { PrimExpr tmp = 1; for (int iop = 0; iop < nop; ++iop) { if (iop != -1) { PrimExpr k = 0; for (size_t i = 0; i < input_indices.size(); ++i) { k += input_indices[i] * ostride[iop][i]; } for (size_t i = 0; i < ridx.size(); ++i) { k += ridx[i] * rstride[iop][i]; } Array<PrimExpr> temp_indices = UnravelIndex(k, inputs[iop]->shape); tmp = tmp * inputs[iop](temp_indices); } } sum += tmp; ridx.Set(ridx.size() - 1, ridx[ridx.size() - 1] + 1); for (int i = static_cast<int>(ridx.size() - 1); (i > 0) && GetConstInt(ridx[i] >= reduceshape[i]); --i) { ridx.Set(i, ridx[i] - reduceshape[i]); ridx.Set(i - 1, ridx[i - 1] + 1); } rec_flag = GetConstInt(ridx[0] < reduceshape[0]); } while (rec_flag); return sum; }; return compute(oshape, func, name, tag); } } // namespace topi } // namespace tvm #endif // TVM_TOPI_EINSUM_H_
14,032
544
# Binary trees benchmark - Python version by <NAME> et al. from __future__ import print_function import time # Map "range" to an efficient range in both Python 2 and 3. try: range = xrange except NameError: pass def make_tree(item, depth): if not depth: return item, None, None item2 = item + item depth -= 1 return item, make_tree(item2 - 1, depth), make_tree(item2, depth) def check_tree(node): item, left, right = node if not left: return item return item + check_tree(left) - check_tree(right) min_depth = 4 max_depth = 12 stretch_depth = max_depth + 1 start = time.clock() print("stretch tree of depth %d check:" % stretch_depth, check_tree(make_tree(0, stretch_depth))) long_lived_tree = make_tree(0, max_depth) iterations = 2 ** max_depth for depth in range(min_depth, stretch_depth, 2): check = 0 for i in range(1, iterations + 1): check += check_tree(make_tree(i, depth)) + check_tree(make_tree(-i, depth)) print("%d trees of depth %d check:" % (iterations * 2, depth), check) iterations //= 4 print("long lived tree of depth %d check:" % max_depth, check_tree(long_lived_tree)) print("elapsed: " + str(time.clock() - start))
430
365
<gh_stars>100-1000 /* * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ /** \file * \ingroup imbuf */ #include "MEM_guardedalloc.h" #include "BLI_fileops.h" #include "BLI_math.h" #include "IMB_filetype.h" #include "IMB_imbuf.h" #include "IMB_imbuf_types.h" #include "IMB_colormanagement.h" #include "IMB_colormanagement_intern.h" #include "openjpeg.h" #define JP2_FILEHEADER_SIZE 12 static const char JP2_HEAD[] = { 0x0, 0x0, 0x0, 0x0C, 0x6A, 0x50, 0x20, 0x20, 0x0D, 0x0A, 0x87, 0x0A}; static const char J2K_HEAD[] = {0xFF, 0x4F, 0xFF, 0x51, 0x00}; /* We only need this because of how the presets are set */ /* this typedef is copied from 'openjpeg-1.5.0/applications/codec/image_to_j2k.c' */ typedef struct img_folder { /** The directory path of the folder containing input images*/ char *imgdirpath; /** Output format*/ char *out_format; /** Enable option*/ char set_imgdir; /** Enable Cod Format for output*/ char set_out_format; /** User specified rate stored in case of cinema option*/ float *rates; } img_fol_t; static bool check_jp2(const unsigned char *mem, const size_t size) /* J2K_CFMT */ { if (size < sizeof(JP2_HEAD)) { return false; } return memcmp(JP2_HEAD, mem, sizeof(JP2_HEAD)) ? 0 : 1; } static bool check_j2k(const unsigned char *mem, const size_t size) /* J2K_CFMT */ { if (size < sizeof(J2K_HEAD)) { return false; } return memcmp(J2K_HEAD, mem, sizeof(J2K_HEAD)) ? 0 : 1; } static OPJ_CODEC_FORMAT format_from_header(const unsigned char mem[JP2_FILEHEADER_SIZE], const size_t size) { if (check_jp2(mem, size)) { return OPJ_CODEC_JP2; } if (check_j2k(mem, size)) { return OPJ_CODEC_J2K; } return OPJ_CODEC_UNKNOWN; } bool imb_is_a_jp2(const unsigned char *buf, size_t size) { return (check_jp2(buf, size) || check_j2k(buf, size)); } /** * sample error callback expecting a FILE* client object */ static void error_callback(const char *msg, void *client_data) { FILE *stream = (FILE *)client_data; fprintf(stream, "[ERROR] %s", msg); } /** * sample warning callback expecting a FILE* client object */ static void warning_callback(const char *msg, void *client_data) { FILE *stream = (FILE *)client_data; fprintf(stream, "[WARNING] %s", msg); } #ifdef DEBUG /** * sample debug callback expecting no client object */ static void info_callback(const char *msg, void *client_data) { FILE *stream = (FILE *)client_data; fprintf(stream, "[INFO] %s", msg); } #endif #define PIXEL_LOOPER_BEGIN(_rect) \ for (y = h - 1; y != (unsigned int)(-1); y--) { \ for (i = y * w, i_next = (y + 1) * w; i < i_next; i++, _rect += 4) { #define PIXEL_LOOPER_BEGIN_CHANNELS(_rect, _channels) \ for (y = h - 1; y != (unsigned int)(-1); y--) { \ for (i = y * w, i_next = (y + 1) * w; i < i_next; i++, _rect += _channels) { #define PIXEL_LOOPER_END \ } \ } \ (void)0 /* -------------------------------------------------------------------- */ /** \name Buffer Stream * \{ */ struct BufInfo { const unsigned char *buf; const unsigned char *cur; OPJ_OFF_T len; }; static void opj_read_from_buffer_free(void *UNUSED(p_user_data)) { /* nop */ } static OPJ_SIZE_T opj_read_from_buffer(void *p_buffer, OPJ_SIZE_T p_nb_bytes, void *p_user_data) { struct BufInfo *p_file = p_user_data; OPJ_UINT32 l_nb_read; if (p_file->cur + p_nb_bytes < p_file->buf + p_file->len) { l_nb_read = p_nb_bytes; } else { l_nb_read = (OPJ_UINT32)(p_file->buf + p_file->len - p_file->cur); } memcpy(p_buffer, p_file->cur, l_nb_read); p_file->cur += l_nb_read; return l_nb_read ? l_nb_read : ((OPJ_SIZE_T)-1); } #if 0 static OPJ_SIZE_T opj_write_from_buffer(void *p_buffer, OPJ_SIZE_T p_nb_bytes, void *p_user_data) { struct BufInfo *p_file = p_user_data; memcpy(p_file->cur, p_buffer, p_nb_bytes); p_file->cur += p_nb_bytes; p_file->len += p_nb_bytes; return p_nb_bytes; } #endif static OPJ_OFF_T opj_skip_from_buffer(OPJ_OFF_T p_nb_bytes, void *p_user_data) { struct BufInfo *p_file = p_user_data; if (p_file->cur + p_nb_bytes < p_file->buf + p_file->len) { p_file->cur += p_nb_bytes; return p_nb_bytes; } p_file->cur = p_file->buf + p_file->len; return (OPJ_OFF_T)-1; } static OPJ_BOOL opj_seek_from_buffer(OPJ_OFF_T p_nb_bytes, void *p_user_data) { struct BufInfo *p_file = p_user_data; if (p_nb_bytes < p_file->len) { p_file->cur = p_file->buf + p_nb_bytes; return OPJ_TRUE; } p_file->cur = p_file->buf + p_file->len; return OPJ_FALSE; } /** * Stream wrapper for memory buffer * (would be nice if this was supported by the API). */ static opj_stream_t *opj_stream_create_from_buffer(struct BufInfo *p_file, OPJ_UINT32 p_size, OPJ_BOOL p_is_read_stream) { opj_stream_t *l_stream = opj_stream_create(p_size, p_is_read_stream); if (l_stream == NULL) { return NULL; } opj_stream_set_user_data(l_stream, p_file, opj_read_from_buffer_free); opj_stream_set_user_data_length(l_stream, p_file->len); opj_stream_set_read_function(l_stream, opj_read_from_buffer); #if 0 /* UNUSED */ opj_stream_set_write_function(l_stream, opj_write_from_buffer); #endif opj_stream_set_skip_function(l_stream, opj_skip_from_buffer); opj_stream_set_seek_function(l_stream, opj_seek_from_buffer); return l_stream; } /** \} */ /* -------------------------------------------------------------------- */ /** \name File Stream * \{ */ static void opj_free_from_file(void *p_user_data) { FILE *f = p_user_data; fclose(f); } static OPJ_UINT64 opj_get_data_length_from_file(void *p_user_data) { FILE *p_file = p_user_data; OPJ_OFF_T file_length = 0; fseek(p_file, 0, SEEK_END); file_length = ftell(p_file); fseek(p_file, 0, SEEK_SET); return (OPJ_UINT64)file_length; } static OPJ_SIZE_T opj_read_from_file(void *p_buffer, OPJ_SIZE_T p_nb_bytes, void *p_user_data) { FILE *p_file = p_user_data; OPJ_SIZE_T l_nb_read = fread(p_buffer, 1, p_nb_bytes, p_file); return l_nb_read ? l_nb_read : (OPJ_SIZE_T)-1; } static OPJ_SIZE_T opj_write_from_file(void *p_buffer, OPJ_SIZE_T p_nb_bytes, void *p_user_data) { FILE *p_file = p_user_data; return fwrite(p_buffer, 1, p_nb_bytes, p_file); } static OPJ_OFF_T opj_skip_from_file(OPJ_OFF_T p_nb_bytes, void *p_user_data) { FILE *p_file = p_user_data; if (fseek(p_file, p_nb_bytes, SEEK_CUR)) { return -1; } return p_nb_bytes; } static OPJ_BOOL opj_seek_from_file(OPJ_OFF_T p_nb_bytes, void *p_user_data) { FILE *p_file = p_user_data; if (fseek(p_file, p_nb_bytes, SEEK_SET)) { return OPJ_FALSE; } return OPJ_TRUE; } /** * Stream wrapper for memory file * (would be nice if this was supported by the API). */ static opj_stream_t *opj_stream_create_from_file(const char *filepath, OPJ_UINT32 p_size, OPJ_BOOL p_is_read_stream, FILE **r_file) { FILE *p_file = BLI_fopen(filepath, p_is_read_stream ? "rb" : "wb"); if (p_file == NULL) { return NULL; } opj_stream_t *l_stream = opj_stream_create(p_size, p_is_read_stream); if (l_stream == NULL) { fclose(p_file); return NULL; } opj_stream_set_user_data(l_stream, p_file, opj_free_from_file); opj_stream_set_user_data_length(l_stream, opj_get_data_length_from_file(p_file)); opj_stream_set_write_function(l_stream, opj_write_from_file); opj_stream_set_read_function(l_stream, opj_read_from_file); opj_stream_set_skip_function(l_stream, opj_skip_from_file); opj_stream_set_seek_function(l_stream, opj_seek_from_file); if (r_file) { *r_file = p_file; } return l_stream; } /** \} */ static ImBuf *imb_load_jp2_stream(opj_stream_t *stream, OPJ_CODEC_FORMAT p_format, int flags, char colorspace[IM_MAX_SPACE]); ImBuf *imb_load_jp2(const unsigned char *mem, size_t size, int flags, char colorspace[IM_MAX_SPACE]) { const OPJ_CODEC_FORMAT format = (size > JP2_FILEHEADER_SIZE) ? format_from_header(mem, size) : OPJ_CODEC_UNKNOWN; struct BufInfo buf_wrapper = { .buf = mem, .cur = mem, .len = size, }; opj_stream_t *stream = opj_stream_create_from_buffer( &buf_wrapper, OPJ_J2K_STREAM_CHUNK_SIZE, true); ImBuf *ibuf = imb_load_jp2_stream(stream, format, flags, colorspace); opj_stream_destroy(stream); return ibuf; } ImBuf *imb_load_jp2_filepath(const char *filepath, int flags, char colorspace[IM_MAX_SPACE]) { FILE *p_file = NULL; unsigned char mem[JP2_FILEHEADER_SIZE]; opj_stream_t *stream = opj_stream_create_from_file( filepath, OPJ_J2K_STREAM_CHUNK_SIZE, true, &p_file); if (stream) { return NULL; } if (fread(mem, sizeof(mem), 1, p_file) != sizeof(mem)) { opj_stream_destroy(stream); return NULL; } fseek(p_file, 0, SEEK_SET); const OPJ_CODEC_FORMAT format = format_from_header(mem, sizeof(mem)); ImBuf *ibuf = imb_load_jp2_stream(stream, format, flags, colorspace); opj_stream_destroy(stream); return ibuf; } static ImBuf *imb_load_jp2_stream(opj_stream_t *stream, const OPJ_CODEC_FORMAT format, int flags, char colorspace[IM_MAX_SPACE]) { if (format == OPJ_CODEC_UNKNOWN) { return NULL; } struct ImBuf *ibuf = NULL; bool use_float = false; /* for precision higher than 8 use float */ bool use_alpha = false; long signed_offsets[4] = {0, 0, 0, 0}; int float_divs[4] = {1, 1, 1, 1}; unsigned int i, i_next, w, h, planes; unsigned int y; int *r, *g, *b, *a; /* matching 'opj_image_comp.data' type */ opj_dparameters_t parameters; /* decompression parameters */ opj_image_t *image = NULL; opj_codec_t *codec = NULL; /* handle to a decompressor */ /* both 8, 12 and 16 bit JP2Ks are default to standard byte colorspace */ colorspace_set_default_role(colorspace, IM_MAX_SPACE, COLOR_ROLE_DEFAULT_BYTE); /* set decoding parameters to default values */ opj_set_default_decoder_parameters(&parameters); /* JPEG 2000 compressed image data */ /* get a decoder handle */ codec = opj_create_decompress(format); /* configure the event callbacks (not required) */ opj_set_error_handler(codec, error_callback, stderr); opj_set_warning_handler(codec, warning_callback, stderr); #ifdef DEBUG /* too noisy */ opj_set_info_handler(codec, info_callback, stderr); #endif /* setup the decoder decoding parameters using the current image and user parameters */ if (opj_setup_decoder(codec, &parameters) == false) { goto finally; } if (opj_read_header(stream, codec, &image) == false) { printf("OpenJPEG error: failed to read the header\n"); goto finally; } /* decode the stream and fill the image structure */ if (opj_decode(codec, stream, image) == false) { fprintf(stderr, "ERROR -> j2k_to_image: failed to decode image!\n"); goto finally; } if ((image->numcomps * image->x1 * image->y1) == 0) { fprintf(stderr, "\nError: invalid raw image parameters\n"); goto finally; } w = image->comps[0].w; h = image->comps[0].h; switch (image->numcomps) { case 1: /* Grayscale */ case 3: /* Color */ planes = 24; use_alpha = false; break; default: /* 2 or 4 - Grayscale or Color + alpha */ planes = 32; /* grayscale + alpha */ use_alpha = true; break; } i = image->numcomps; if (i > 4) { i = 4; } while (i) { i--; if (image->comps[i].prec > 8) { use_float = true; } if (image->comps[i].sgnd) { signed_offsets[i] = 1 << (image->comps[i].prec - 1); } /* only needed for float images but doesn't hurt to calc this */ float_divs[i] = (1 << image->comps[i].prec) - 1; } ibuf = IMB_allocImBuf(w, h, planes, use_float ? IB_rectfloat : IB_rect); if (ibuf == NULL) { goto finally; } ibuf->ftype = IMB_FTYPE_JP2; if (1 /* is_jp2 */) { ibuf->foptions.flag |= JP2_JP2; } else { ibuf->foptions.flag |= JP2_J2K; } if (use_float) { float *rect_float = ibuf->rect_float; if (image->numcomps < 3) { r = image->comps[0].data; a = (use_alpha) ? image->comps[1].data : NULL; /* Gray-scale 12bits+ */ if (use_alpha) { a = image->comps[1].data; PIXEL_LOOPER_BEGIN (rect_float) { rect_float[0] = rect_float[1] = rect_float[2] = (float)(r[i] + signed_offsets[0]) / float_divs[0]; rect_float[3] = (a[i] + signed_offsets[1]) / float_divs[1]; } PIXEL_LOOPER_END; } else { PIXEL_LOOPER_BEGIN (rect_float) { rect_float[0] = rect_float[1] = rect_float[2] = (float)(r[i] + signed_offsets[0]) / float_divs[0]; rect_float[3] = 1.0f; } PIXEL_LOOPER_END; } } else { r = image->comps[0].data; g = image->comps[1].data; b = image->comps[2].data; /* RGB or RGBA 12bits+ */ if (use_alpha) { a = image->comps[3].data; PIXEL_LOOPER_BEGIN (rect_float) { rect_float[0] = (float)(r[i] + signed_offsets[0]) / float_divs[0]; rect_float[1] = (float)(g[i] + signed_offsets[1]) / float_divs[1]; rect_float[2] = (float)(b[i] + signed_offsets[2]) / float_divs[2]; rect_float[3] = (float)(a[i] + signed_offsets[3]) / float_divs[3]; } PIXEL_LOOPER_END; } else { PIXEL_LOOPER_BEGIN (rect_float) { rect_float[0] = (float)(r[i] + signed_offsets[0]) / float_divs[0]; rect_float[1] = (float)(g[i] + signed_offsets[1]) / float_divs[1]; rect_float[2] = (float)(b[i] + signed_offsets[2]) / float_divs[2]; rect_float[3] = 1.0f; } PIXEL_LOOPER_END; } } } else { unsigned char *rect_uchar = (unsigned char *)ibuf->rect; if (image->numcomps < 3) { r = image->comps[0].data; a = (use_alpha) ? image->comps[1].data : NULL; /* grayscale */ if (use_alpha) { a = image->comps[3].data; PIXEL_LOOPER_BEGIN (rect_uchar) { rect_uchar[0] = rect_uchar[1] = rect_uchar[2] = (r[i] + signed_offsets[0]); rect_uchar[3] = a[i] + signed_offsets[1]; } PIXEL_LOOPER_END; } else { PIXEL_LOOPER_BEGIN (rect_uchar) { rect_uchar[0] = rect_uchar[1] = rect_uchar[2] = (r[i] + signed_offsets[0]); rect_uchar[3] = 255; } PIXEL_LOOPER_END; } } else { r = image->comps[0].data; g = image->comps[1].data; b = image->comps[2].data; /* 8bit RGB or RGBA */ if (use_alpha) { a = image->comps[3].data; PIXEL_LOOPER_BEGIN (rect_uchar) { rect_uchar[0] = r[i] + signed_offsets[0]; rect_uchar[1] = g[i] + signed_offsets[1]; rect_uchar[2] = b[i] + signed_offsets[2]; rect_uchar[3] = a[i] + signed_offsets[3]; } PIXEL_LOOPER_END; } else { PIXEL_LOOPER_BEGIN (rect_uchar) { rect_uchar[0] = r[i] + signed_offsets[0]; rect_uchar[1] = g[i] + signed_offsets[1]; rect_uchar[2] = b[i] + signed_offsets[2]; rect_uchar[3] = 255; } PIXEL_LOOPER_END; } } } if (flags & IB_rect) { IMB_rect_from_float(ibuf); } finally: /* free remaining structures */ if (codec) { opj_destroy_codec(codec); } if (image) { opj_image_destroy(image); } return ibuf; } #if 0 static opj_image_t *rawtoimage(const char *filename, opj_cparameters_t *parameters, raw_cparameters_t *raw_cp) #endif /* prec can be 8, 12, 16 */ /* Use inline because the float passed can be a function call * that would end up being called many times. */ #if 0 # define UPSAMPLE_8_TO_12(_val) ((_val << 4) | (_val & ((1 << 4) - 1))) # define UPSAMPLE_8_TO_16(_val) ((_val << 8) + _val) # define DOWNSAMPLE_FLOAT_TO_8BIT(_val) \ (_val) <= 0.0f ? 0 : ((_val) >= 1.0f ? 255 : (int)(255.0f * (_val))) # define DOWNSAMPLE_FLOAT_TO_12BIT(_val) \ (_val) <= 0.0f ? 0 : ((_val) >= 1.0f ? 4095 : (int)(4095.0f * (_val))) # define DOWNSAMPLE_FLOAT_TO_16BIT(_val) \ (_val) <= 0.0f ? 0 : ((_val) >= 1.0f ? 65535 : (int)(65535.0f * (_val))) #else BLI_INLINE int UPSAMPLE_8_TO_12(const unsigned char _val) { return (_val << 4) | (_val & ((1 << 4) - 1)); } BLI_INLINE int UPSAMPLE_8_TO_16(const unsigned char _val) { return (_val << 8) + _val; } BLI_INLINE int DOWNSAMPLE_FLOAT_TO_8BIT(const float _val) { return (_val) <= 0.0f ? 0 : ((_val) >= 1.0f ? 255 : (int)(255.0f * (_val))); } BLI_INLINE int DOWNSAMPLE_FLOAT_TO_12BIT(const float _val) { return (_val) <= 0.0f ? 0 : ((_val) >= 1.0f ? 4095 : (int)(4095.0f * (_val))); } BLI_INLINE int DOWNSAMPLE_FLOAT_TO_16BIT(const float _val) { return (_val) <= 0.0f ? 0 : ((_val) >= 1.0f ? 65535 : (int)(65535.0f * (_val))); } #endif /* * 2048x1080 (2K) at 24 fps or 48 fps, or 4096x2160 (4K) at 24 fps; * 3x12 bits per pixel, XYZ color space * * - In 2K, for Scope (2.39:1) presentation 2048x858 pixels of the image is used * - In 2K, for Flat (1.85:1) presentation 1998x1080 pixels of the image is used */ /* ****************************** COPIED FROM image_to_j2k.c */ /* ----------------------------------------------------------------------- */ #define CINEMA_24_CS 1302083 /*Codestream length for 24fps*/ #define CINEMA_48_CS 651041 /*Codestream length for 48fps*/ #define COMP_24_CS 1041666 /*Maximum size per color component for 2K & 4K @ 24fps*/ #define COMP_48_CS 520833 /*Maximum size per color component for 2K @ 48fps*/ static int init_4K_poc(opj_poc_t *POC, int numres) { POC[0].tile = 1; POC[0].resno0 = 0; POC[0].compno0 = 0; POC[0].layno1 = 1; POC[0].resno1 = numres - 1; POC[0].compno1 = 3; POC[0].prg1 = OPJ_CPRL; POC[1].tile = 1; POC[1].resno0 = numres - 1; POC[1].compno0 = 0; POC[1].layno1 = 1; POC[1].resno1 = numres; POC[1].compno1 = 3; POC[1].prg1 = OPJ_CPRL; return 2; } static void cinema_parameters(opj_cparameters_t *parameters) { parameters->tile_size_on = 0; /* false */ parameters->cp_tdx = 1; parameters->cp_tdy = 1; /*Tile part*/ parameters->tp_flag = 'C'; parameters->tp_on = 1; /*Tile and Image shall be at (0, 0)*/ parameters->cp_tx0 = 0; parameters->cp_ty0 = 0; parameters->image_offset_x0 = 0; parameters->image_offset_y0 = 0; /*Codeblock size = 32 * 32*/ parameters->cblockw_init = 32; parameters->cblockh_init = 32; parameters->csty |= 0x01; /*The progression order shall be CPRL*/ parameters->prog_order = OPJ_CPRL; /* No ROI */ parameters->roi_compno = -1; parameters->subsampling_dx = 1; parameters->subsampling_dy = 1; /* 9-7 transform */ parameters->irreversible = 1; } static void cinema_setup_encoder(opj_cparameters_t *parameters, opj_image_t *image, img_fol_t *img_fol) { int i; float temp_rate; switch (parameters->cp_cinema) { case OPJ_CINEMA2K_24: case OPJ_CINEMA2K_48: if (parameters->numresolution > 6) { parameters->numresolution = 6; } if (!((image->comps[0].w == 2048) || (image->comps[0].h == 1080))) { fprintf(stdout, "Image coordinates %u x %u is not 2K compliant.\nJPEG Digital Cinema Profile-3 " "(2K profile) compliance requires that at least one of coordinates match 2048 x " "1080\n", image->comps[0].w, image->comps[0].h); parameters->cp_rsiz = OPJ_STD_RSIZ; } else { parameters->cp_rsiz = OPJ_CINEMA2K; } break; case OPJ_CINEMA4K_24: if (parameters->numresolution < 1) { parameters->numresolution = 1; } else if (parameters->numresolution > 7) { parameters->numresolution = 7; } if (!((image->comps[0].w == 4096) || (image->comps[0].h == 2160))) { fprintf(stdout, "Image coordinates %u x %u is not 4K compliant.\nJPEG Digital Cinema Profile-4" "(4K profile) compliance requires that at least one of coordinates match 4096 x " "2160\n", image->comps[0].w, image->comps[0].h); parameters->cp_rsiz = OPJ_STD_RSIZ; } else { parameters->cp_rsiz = OPJ_CINEMA4K; } parameters->numpocs = init_4K_poc(parameters->POC, parameters->numresolution); break; case OPJ_OFF: /* do nothing */ break; } switch (parameters->cp_cinema) { case OPJ_CINEMA2K_24: case OPJ_CINEMA4K_24: for (i = 0; i < parameters->tcp_numlayers; i++) { temp_rate = 0; if (img_fol->rates[i] == 0) { parameters->tcp_rates[0] = ((float)(image->numcomps * image->comps[0].w * image->comps[0].h * image->comps[0].prec)) / (CINEMA_24_CS * 8 * image->comps[0].dx * image->comps[0].dy); } else { temp_rate = ((float)(image->numcomps * image->comps[0].w * image->comps[0].h * image->comps[0].prec)) / (img_fol->rates[i] * 8 * image->comps[0].dx * image->comps[0].dy); if (temp_rate > CINEMA_24_CS) { parameters->tcp_rates[i] = ((float)(image->numcomps * image->comps[0].w * image->comps[0].h * image->comps[0].prec)) / (CINEMA_24_CS * 8 * image->comps[0].dx * image->comps[0].dy); } else { parameters->tcp_rates[i] = img_fol->rates[i]; } } } parameters->max_comp_size = COMP_24_CS; break; case OPJ_CINEMA2K_48: for (i = 0; i < parameters->tcp_numlayers; i++) { temp_rate = 0; if (img_fol->rates[i] == 0) { parameters->tcp_rates[0] = ((float)(image->numcomps * image->comps[0].w * image->comps[0].h * image->comps[0].prec)) / (CINEMA_48_CS * 8 * image->comps[0].dx * image->comps[0].dy); } else { temp_rate = ((float)(image->numcomps * image->comps[0].w * image->comps[0].h * image->comps[0].prec)) / (img_fol->rates[i] * 8 * image->comps[0].dx * image->comps[0].dy); if (temp_rate > CINEMA_48_CS) { parameters->tcp_rates[0] = ((float)(image->numcomps * image->comps[0].w * image->comps[0].h * image->comps[0].prec)) / (CINEMA_48_CS * 8 * image->comps[0].dx * image->comps[0].dy); } else { parameters->tcp_rates[i] = img_fol->rates[i]; } } } parameters->max_comp_size = COMP_48_CS; break; case OPJ_OFF: /* do nothing */ break; } parameters->cp_disto_alloc = 1; } static float channel_colormanage_noop(float value) { return value; } static opj_image_t *ibuftoimage(ImBuf *ibuf, opj_cparameters_t *parameters) { unsigned char *rect_uchar; float *rect_float, from_straight[4]; unsigned int subsampling_dx = parameters->subsampling_dx; unsigned int subsampling_dy = parameters->subsampling_dy; unsigned int i, i_next, numcomps, w, h, prec; unsigned int y; int *r, *g, *b, *a; /* matching 'opj_image_comp.data' type */ OPJ_COLOR_SPACE color_space; opj_image_cmptparm_t cmptparm[4]; /* maximum of 4 components */ opj_image_t *image = NULL; float (*chanel_colormanage_cb)(float); img_fol_t img_fol; /* only needed for cinema presets */ memset(&img_fol, 0, sizeof(img_fol_t)); if (ibuf->float_colorspace || (ibuf->colormanage_flag & IMB_COLORMANAGE_IS_DATA)) { /* float buffer was managed already, no need in color space conversion */ chanel_colormanage_cb = channel_colormanage_noop; } else { /* standard linear-to-srgb conversion if float buffer wasn't managed */ chanel_colormanage_cb = linearrgb_to_srgb; } if (ibuf->foptions.flag & JP2_CINE) { if (ibuf->x == 4096 || ibuf->y == 2160) { parameters->cp_cinema = OPJ_CINEMA4K_24; } else { if (ibuf->foptions.flag & JP2_CINE_48FPS) { parameters->cp_cinema = OPJ_CINEMA2K_48; } else { parameters->cp_cinema = OPJ_CINEMA2K_24; } } if (parameters->cp_cinema) { img_fol.rates = (float *)MEM_mallocN(parameters->tcp_numlayers * sizeof(float), "jp2_rates"); for (i = 0; i < parameters->tcp_numlayers; i++) { img_fol.rates[i] = parameters->tcp_rates[i]; } cinema_parameters(parameters); } color_space = (ibuf->foptions.flag & JP2_YCC) ? OPJ_CLRSPC_SYCC : OPJ_CLRSPC_SRGB; prec = 12; numcomps = 3; } else { /* Get settings from the imbuf */ color_space = (ibuf->foptions.flag & JP2_YCC) ? OPJ_CLRSPC_SYCC : OPJ_CLRSPC_SRGB; if (ibuf->foptions.flag & JP2_16BIT) { prec = 16; } else if (ibuf->foptions.flag & JP2_12BIT) { prec = 12; } else { prec = 8; } /* 32bit images == alpha channel */ /* grayscale not supported yet */ numcomps = (ibuf->planes == 32) ? 4 : 3; } w = ibuf->x; h = ibuf->y; /* initialize image components */ memset(&cmptparm, 0, sizeof(opj_image_cmptparm_t[4])); for (i = 0; i < numcomps; i++) { cmptparm[i].prec = prec; cmptparm[i].bpp = prec; cmptparm[i].sgnd = 0; cmptparm[i].dx = subsampling_dx; cmptparm[i].dy = subsampling_dy; cmptparm[i].w = w; cmptparm[i].h = h; } /* create the image */ image = opj_image_create(numcomps, &cmptparm[0], color_space); if (!image) { printf("Error: opj_image_create() failed\n"); return NULL; } /* set image offset and reference grid */ image->x0 = parameters->image_offset_x0; image->y0 = parameters->image_offset_y0; image->x1 = image->x0 + (w - 1) * subsampling_dx + 1 + image->x0; image->y1 = image->y0 + (h - 1) * subsampling_dy + 1 + image->y0; /* set image data */ rect_uchar = (unsigned char *)ibuf->rect; rect_float = ibuf->rect_float; /* set the destination channels */ r = image->comps[0].data; g = image->comps[1].data; b = image->comps[2].data; a = (numcomps == 4) ? image->comps[3].data : NULL; if (rect_float && rect_uchar && prec == 8) { /* No need to use the floating point buffer, just write the 8 bits from the char buffer */ rect_float = NULL; } if (rect_float) { int channels_in_float = ibuf->channels ? ibuf->channels : 4; switch (prec) { case 8: /* Convert blenders float color channels to 8, 12 or 16bit ints */ if (numcomps == 4) { if (channels_in_float == 4) { PIXEL_LOOPER_BEGIN (rect_float) { premul_to_straight_v4_v4(from_straight, rect_float); r[i] = DOWNSAMPLE_FLOAT_TO_8BIT(chanel_colormanage_cb(from_straight[0])); g[i] = DOWNSAMPLE_FLOAT_TO_8BIT(chanel_colormanage_cb(from_straight[1])); b[i] = DOWNSAMPLE_FLOAT_TO_8BIT(chanel_colormanage_cb(from_straight[2])); a[i] = DOWNSAMPLE_FLOAT_TO_8BIT(from_straight[3]); } PIXEL_LOOPER_END; } else if (channels_in_float == 3) { PIXEL_LOOPER_BEGIN_CHANNELS (rect_float, 3) { r[i] = DOWNSAMPLE_FLOAT_TO_8BIT(chanel_colormanage_cb(rect_float[0])); g[i] = DOWNSAMPLE_FLOAT_TO_8BIT(chanel_colormanage_cb(rect_float[1])); b[i] = DOWNSAMPLE_FLOAT_TO_8BIT(chanel_colormanage_cb(rect_float[2])); a[i] = 255; } PIXEL_LOOPER_END; } else { PIXEL_LOOPER_BEGIN_CHANNELS (rect_float, 1) { r[i] = DOWNSAMPLE_FLOAT_TO_8BIT(chanel_colormanage_cb(rect_float[0])); g[i] = b[i] = r[i]; a[i] = 255; } PIXEL_LOOPER_END; } } else { if (channels_in_float == 4) { PIXEL_LOOPER_BEGIN (rect_float) { premul_to_straight_v4_v4(from_straight, rect_float); r[i] = DOWNSAMPLE_FLOAT_TO_8BIT(chanel_colormanage_cb(from_straight[0])); g[i] = DOWNSAMPLE_FLOAT_TO_8BIT(chanel_colormanage_cb(from_straight[1])); b[i] = DOWNSAMPLE_FLOAT_TO_8BIT(chanel_colormanage_cb(from_straight[2])); } PIXEL_LOOPER_END; } else if (channels_in_float == 3) { PIXEL_LOOPER_BEGIN_CHANNELS (rect_float, 3) { r[i] = DOWNSAMPLE_FLOAT_TO_8BIT(chanel_colormanage_cb(rect_float[0])); g[i] = DOWNSAMPLE_FLOAT_TO_8BIT(chanel_colormanage_cb(rect_float[1])); b[i] = DOWNSAMPLE_FLOAT_TO_8BIT(chanel_colormanage_cb(rect_float[2])); } PIXEL_LOOPER_END; } else { PIXEL_LOOPER_BEGIN_CHANNELS (rect_float, 1) { r[i] = DOWNSAMPLE_FLOAT_TO_8BIT(chanel_colormanage_cb(rect_float[0])); g[i] = b[i] = r[i]; } PIXEL_LOOPER_END; } } break; case 12: if (numcomps == 4) { if (channels_in_float == 4) { PIXEL_LOOPER_BEGIN (rect_float) { premul_to_straight_v4_v4(from_straight, rect_float); r[i] = DOWNSAMPLE_FLOAT_TO_12BIT(chanel_colormanage_cb(from_straight[0])); g[i] = DOWNSAMPLE_FLOAT_TO_12BIT(chanel_colormanage_cb(from_straight[1])); b[i] = DOWNSAMPLE_FLOAT_TO_12BIT(chanel_colormanage_cb(from_straight[2])); a[i] = DOWNSAMPLE_FLOAT_TO_12BIT(from_straight[3]); } PIXEL_LOOPER_END; } else if (channels_in_float == 3) { PIXEL_LOOPER_BEGIN_CHANNELS (rect_float, 3) { r[i] = DOWNSAMPLE_FLOAT_TO_12BIT(chanel_colormanage_cb(rect_float[0])); g[i] = DOWNSAMPLE_FLOAT_TO_12BIT(chanel_colormanage_cb(rect_float[1])); b[i] = DOWNSAMPLE_FLOAT_TO_12BIT(chanel_colormanage_cb(rect_float[2])); a[i] = 4095; } PIXEL_LOOPER_END; } else { PIXEL_LOOPER_BEGIN_CHANNELS (rect_float, 1) { r[i] = DOWNSAMPLE_FLOAT_TO_12BIT(chanel_colormanage_cb(rect_float[0])); g[i] = b[i] = r[i]; a[i] = 4095; } PIXEL_LOOPER_END; } } else { if (channels_in_float == 4) { PIXEL_LOOPER_BEGIN (rect_float) { premul_to_straight_v4_v4(from_straight, rect_float); r[i] = DOWNSAMPLE_FLOAT_TO_12BIT(chanel_colormanage_cb(from_straight[0])); g[i] = DOWNSAMPLE_FLOAT_TO_12BIT(chanel_colormanage_cb(from_straight[1])); b[i] = DOWNSAMPLE_FLOAT_TO_12BIT(chanel_colormanage_cb(from_straight[2])); } PIXEL_LOOPER_END; } else if (channels_in_float == 3) { PIXEL_LOOPER_BEGIN_CHANNELS (rect_float, 3) { r[i] = DOWNSAMPLE_FLOAT_TO_12BIT(chanel_colormanage_cb(rect_float[0])); g[i] = DOWNSAMPLE_FLOAT_TO_12BIT(chanel_colormanage_cb(rect_float[1])); b[i] = DOWNSAMPLE_FLOAT_TO_12BIT(chanel_colormanage_cb(rect_float[2])); } PIXEL_LOOPER_END; } else { PIXEL_LOOPER_BEGIN_CHANNELS (rect_float, 1) { r[i] = DOWNSAMPLE_FLOAT_TO_12BIT(chanel_colormanage_cb(rect_float[0])); g[i] = b[i] = r[i]; } PIXEL_LOOPER_END; } } break; case 16: if (numcomps == 4) { if (channels_in_float == 4) { PIXEL_LOOPER_BEGIN (rect_float) { premul_to_straight_v4_v4(from_straight, rect_float); r[i] = DOWNSAMPLE_FLOAT_TO_16BIT(chanel_colormanage_cb(from_straight[0])); g[i] = DOWNSAMPLE_FLOAT_TO_16BIT(chanel_colormanage_cb(from_straight[1])); b[i] = DOWNSAMPLE_FLOAT_TO_16BIT(chanel_colormanage_cb(from_straight[2])); a[i] = DOWNSAMPLE_FLOAT_TO_16BIT(from_straight[3]); } PIXEL_LOOPER_END; } else if (channels_in_float == 3) { PIXEL_LOOPER_BEGIN_CHANNELS (rect_float, 3) { r[i] = DOWNSAMPLE_FLOAT_TO_16BIT(chanel_colormanage_cb(rect_float[0])); g[i] = DOWNSAMPLE_FLOAT_TO_16BIT(chanel_colormanage_cb(rect_float[1])); b[i] = DOWNSAMPLE_FLOAT_TO_16BIT(chanel_colormanage_cb(rect_float[2])); a[i] = 65535; } PIXEL_LOOPER_END; } else { PIXEL_LOOPER_BEGIN_CHANNELS (rect_float, 1) { r[i] = DOWNSAMPLE_FLOAT_TO_16BIT(chanel_colormanage_cb(rect_float[0])); g[i] = b[i] = r[i]; a[i] = 65535; } PIXEL_LOOPER_END; } } else { if (channels_in_float == 4) { PIXEL_LOOPER_BEGIN (rect_float) { premul_to_straight_v4_v4(from_straight, rect_float); r[i] = DOWNSAMPLE_FLOAT_TO_16BIT(chanel_colormanage_cb(from_straight[0])); g[i] = DOWNSAMPLE_FLOAT_TO_16BIT(chanel_colormanage_cb(from_straight[1])); b[i] = DOWNSAMPLE_FLOAT_TO_16BIT(chanel_colormanage_cb(from_straight[2])); } PIXEL_LOOPER_END; } else if (channels_in_float == 3) { PIXEL_LOOPER_BEGIN_CHANNELS (rect_float, 3) { r[i] = DOWNSAMPLE_FLOAT_TO_16BIT(chanel_colormanage_cb(rect_float[0])); g[i] = DOWNSAMPLE_FLOAT_TO_16BIT(chanel_colormanage_cb(rect_float[1])); b[i] = DOWNSAMPLE_FLOAT_TO_16BIT(chanel_colormanage_cb(rect_float[2])); } PIXEL_LOOPER_END; } else { PIXEL_LOOPER_BEGIN_CHANNELS (rect_float, 1) { r[i] = DOWNSAMPLE_FLOAT_TO_16BIT(chanel_colormanage_cb(rect_float[0])); g[i] = b[i] = r[i]; } PIXEL_LOOPER_END; } } break; } } else { /* just use rect*/ switch (prec) { case 8: if (numcomps == 4) { PIXEL_LOOPER_BEGIN (rect_uchar) { r[i] = rect_uchar[0]; g[i] = rect_uchar[1]; b[i] = rect_uchar[2]; a[i] = rect_uchar[3]; } PIXEL_LOOPER_END; } else { PIXEL_LOOPER_BEGIN (rect_uchar) { r[i] = rect_uchar[0]; g[i] = rect_uchar[1]; b[i] = rect_uchar[2]; } PIXEL_LOOPER_END; } break; case 12: /* Up Sampling, a bit pointless but best write the bit depth requested */ if (numcomps == 4) { PIXEL_LOOPER_BEGIN (rect_uchar) { r[i] = UPSAMPLE_8_TO_12(rect_uchar[0]); g[i] = UPSAMPLE_8_TO_12(rect_uchar[1]); b[i] = UPSAMPLE_8_TO_12(rect_uchar[2]); a[i] = UPSAMPLE_8_TO_12(rect_uchar[3]); } PIXEL_LOOPER_END; } else { PIXEL_LOOPER_BEGIN (rect_uchar) { r[i] = UPSAMPLE_8_TO_12(rect_uchar[0]); g[i] = UPSAMPLE_8_TO_12(rect_uchar[1]); b[i] = UPSAMPLE_8_TO_12(rect_uchar[2]); } PIXEL_LOOPER_END; } break; case 16: if (numcomps == 4) { PIXEL_LOOPER_BEGIN (rect_uchar) { r[i] = UPSAMPLE_8_TO_16(rect_uchar[0]); g[i] = UPSAMPLE_8_TO_16(rect_uchar[1]); b[i] = UPSAMPLE_8_TO_16(rect_uchar[2]); a[i] = UPSAMPLE_8_TO_16(rect_uchar[3]); } PIXEL_LOOPER_END; } else { PIXEL_LOOPER_BEGIN (rect_uchar) { r[i] = UPSAMPLE_8_TO_16(rect_uchar[0]); g[i] = UPSAMPLE_8_TO_16(rect_uchar[1]); b[i] = UPSAMPLE_8_TO_16(rect_uchar[2]); } PIXEL_LOOPER_END; } break; } } /* Decide if MCT should be used */ parameters->tcp_mct = image->numcomps == 3 ? 1 : 0; if (parameters->cp_cinema) { cinema_setup_encoder(parameters, image, &img_fol); } if (img_fol.rates) { MEM_freeN(img_fol.rates); } return image; } bool imb_save_jp2_stream(struct ImBuf *ibuf, opj_stream_t *stream, int flags); bool imb_save_jp2(struct ImBuf *ibuf, const char *filepath, int flags) { opj_stream_t *stream = opj_stream_create_from_file( filepath, OPJ_J2K_STREAM_CHUNK_SIZE, false, NULL); if (stream == NULL) { return 0; } const bool ok = imb_save_jp2_stream(ibuf, stream, flags); opj_stream_destroy(stream); return ok; } /* Found write info at http://users.ece.gatech.edu/~slabaugh/personal/c/bitmapUnix.c */ bool imb_save_jp2_stream(struct ImBuf *ibuf, opj_stream_t *stream, int UNUSED(flags)) { int quality = ibuf->foptions.quality; opj_cparameters_t parameters; /* compression parameters */ opj_image_t *image = NULL; /* set encoding parameters to default values */ opj_set_default_encoder_parameters(&parameters); /* compression ratio */ /* invert range, from 10-100, 100-1 * where jpeg see's 1 and highest quality (lossless) and 100 is very low quality*/ parameters.tcp_rates[0] = ((100 - quality) / 90.0f * 99.0f) + 1; parameters.tcp_numlayers = 1; /* only one resolution */ parameters.cp_disto_alloc = 1; image = ibuftoimage(ibuf, &parameters); opj_codec_t *codec = NULL; bool ok = false; /* JP2 format output */ { /* get a JP2 compressor handle */ OPJ_CODEC_FORMAT format = OPJ_CODEC_JP2; if (ibuf->foptions.flag & JP2_J2K) { format = OPJ_CODEC_J2K; } else if (ibuf->foptions.flag & JP2_JP2) { format = OPJ_CODEC_JP2; } codec = opj_create_compress(format); /* configure the event callbacks (not required) */ opj_set_error_handler(codec, error_callback, stderr); opj_set_warning_handler(codec, warning_callback, stderr); #ifdef DEBUG /* too noisy */ opj_set_info_handler(codec, info_callback, stderr); #endif /* setup the encoder parameters using the current image and using user parameters */ if (opj_setup_encoder(codec, &parameters, image) == false) { goto finally; } if (opj_start_compress(codec, image, stream) == false) { goto finally; } if (opj_encode(codec, stream) == false) { goto finally; } if (opj_end_compress(codec, stream) == false) { goto finally; } } ok = true; finally: /* free remaining compression structures */ if (codec) { opj_destroy_codec(codec); } /* free image data */ if (image) { opj_image_destroy(image); } if (ok == false) { fprintf(stderr, "failed to encode image\n"); } return ok; }
20,251
456
/* ** upb_encode: parsing into a upb_msg using a upb_msglayout. */ #ifndef UPB_ENCODE_H_ #define UPB_ENCODE_H_ #include "upb/msg.h" #ifdef __cplusplus extern "C" { #endif char *upb_encode(const void *msg, const upb_msglayout *l, upb_arena *arena, size_t *size); #ifdef __cplusplus } /* extern "C" */ #endif #endif /* UPB_ENCODE_H_ */
180
1,056
<reponame>arusinha/incubator-netbeans<filename>java/beans/src/org/netbeans/modules/beans/addproperty/AddPropertyGenerator.java /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.netbeans.modules.beans.addproperty; import java.io.IOException; import java.io.InputStreamReader; import java.io.Reader; import java.io.StringWriter; import javax.script.ScriptContext; import javax.script.ScriptEngine; import javax.script.ScriptException; import org.netbeans.api.java.source.CodeStyle; import org.netbeans.api.java.source.CodeStyleUtils; import org.netbeans.api.scripting.Scripting; import org.openide.filesystems.FileObject; import org.openide.filesystems.FileUtil; import org.openide.util.Exceptions; /** * * @author <NAME> (<EMAIL>) */ public class AddPropertyGenerator { public String generate(AddPropertyConfig addPropertyConfig, CodeStyle cs) { ScriptEngine scriptEngine = getScriptEngine(); if (scriptEngine != null) { FileObject template = getTemplateFileObject(addPropertyConfig.getTEMPLATE_PATH()); if (template != null && template.isValid()) { final String type = addPropertyConfig.getType().trim(); final String name = addPropertyConfig.getName().trim(); final String fieldName = CodeStyleUtils.addPrefixSuffix(name, addPropertyConfig.isStatic() ? cs.getStaticFieldNamePrefix() : cs.getFieldNamePrefix(), addPropertyConfig.isStatic() ? cs.getStaticFieldNameSuffix() : cs.getFieldNameSuffix()); final String paramName = CodeStyleUtils.addPrefixSuffix(name, cs.getParameterNamePrefix(), cs.getParameterNameSuffix()); final String paramIndex = CodeStyleUtils.addPrefixSuffix("index", //NOI18N cs.getParameterNamePrefix(), cs.getParameterNameSuffix()); final String propName = CodeStyleUtils.addPrefixSuffix( addPropertyConfig.getPopName().trim(), cs.getStaticFieldNamePrefix(), cs.getStaticFieldNameSuffix()); final String initializer = addPropertyConfig.getInitializer().trim(); String access; switch (addPropertyConfig.getAccess()) { case PRIVATE: access = "private "; // NOI18N break; case PROTECTED: access = "protected "; // NOI18N break; case PUBLIC: access = "public "; // NOI18N break; default: access = ""; break; } ScriptContext scriptContext = scriptEngine.getContext(); StringWriter writer = new StringWriter(); scriptContext.setWriter(writer); scriptContext.setAttribute(FileObject.class.getName(), template, ScriptContext.ENGINE_SCOPE); scriptContext.setAttribute(ScriptEngine.FILENAME, template.getNameExt(), ScriptContext.ENGINE_SCOPE); scriptContext.setAttribute("access", access, ScriptContext.ENGINE_SCOPE); // NOI18N scriptContext.setAttribute("type", type, ScriptContext.ENGINE_SCOPE); // NOI18N scriptContext.setAttribute("className", addPropertyConfig.getClassName(), ScriptContext.ENGINE_SCOPE); // NOI18N scriptContext.setAttribute("name", name, ScriptContext.ENGINE_SCOPE); // NOI18N scriptContext.setAttribute("fieldName", fieldName, ScriptContext.ENGINE_SCOPE); // NOI18N scriptContext.setAttribute("paramName", paramName, ScriptContext.ENGINE_SCOPE); // NOI18N scriptContext.setAttribute("paramIndex", paramIndex, ScriptContext.ENGINE_SCOPE); // NOI18N scriptContext.setAttribute("initializer", initializer, ScriptContext.ENGINE_SCOPE); // NOI18N scriptContext.setAttribute("capitalizedName", CodeStyleUtils.getCapitalizedName(name), ScriptContext.ENGINE_SCOPE); // NOI18N scriptContext.setAttribute("getterName", CodeStyleUtils.computeGetterName(fieldName, type.equalsIgnoreCase("boolean"), addPropertyConfig.isStatic(), cs), ScriptContext.ENGINE_SCOPE); // NOI18N scriptContext.setAttribute("setterName", CodeStyleUtils.computeSetterName(fieldName, addPropertyConfig.isStatic(), cs), ScriptContext.ENGINE_SCOPE); // NOI18N scriptContext.setAttribute("static", Boolean.valueOf(addPropertyConfig.isStatic()), ScriptContext.ENGINE_SCOPE); // NOI18N scriptContext.setAttribute("final", Boolean.valueOf(addPropertyConfig.isFinale()), ScriptContext.ENGINE_SCOPE); // NOI18N AddPropertyConfig.GENERATE generateGetterSetter = addPropertyConfig.getGenerateGetterSetter(); scriptContext.setAttribute("generateGetter", // NOI18N Boolean.valueOf(generateGetterSetter == AddPropertyConfig.GENERATE.GETTER_AND_SETTER || generateGetterSetter == AddPropertyConfig.GENERATE.GETTER), ScriptContext.ENGINE_SCOPE); scriptContext.setAttribute("generateSetter", // NOI18N Boolean.valueOf(generateGetterSetter == AddPropertyConfig.GENERATE.GETTER_AND_SETTER || generateGetterSetter == AddPropertyConfig.GENERATE.SETTER), ScriptContext.ENGINE_SCOPE); scriptContext.setAttribute("generateJavadoc", Boolean.valueOf(addPropertyConfig.isGenerateJavadoc()), ScriptContext.ENGINE_SCOPE); // NOI18N scriptContext.setAttribute("bound", Boolean.valueOf(addPropertyConfig.isBound()), ScriptContext.ENGINE_SCOPE); // NOI18N scriptContext.setAttribute("PROP_NAME", propName, ScriptContext.ENGINE_SCOPE); // NOI18N scriptContext.setAttribute("vetoable", Boolean.valueOf(addPropertyConfig.isVetoable()), ScriptContext.ENGINE_SCOPE); // NOI18N scriptContext.setAttribute("indexed", Boolean.valueOf(addPropertyConfig.isIndexed()), ScriptContext.ENGINE_SCOPE); // NOI18N scriptContext.setAttribute("propertyChangeSupport", addPropertyConfig.getPropertyChangeSupportName(), ScriptContext.ENGINE_SCOPE); // NOI18N scriptContext.setAttribute("vetoableChangeSupport", addPropertyConfig.getVetoableChangeSupportName(), ScriptContext.ENGINE_SCOPE); // NOI18N scriptContext.setAttribute("generatePropertyChangeSupport", Boolean.valueOf(addPropertyConfig.isGeneratePropertyChangeSupport()), ScriptContext.ENGINE_SCOPE); // NOI18N scriptContext.setAttribute("generateVetoablePropertyChangeSupport", Boolean.valueOf(addPropertyConfig.isGenerateVetoableChangeSupport()), ScriptContext.ENGINE_SCOPE); // NOI18N Reader templateReader = null; try { templateReader = new InputStreamReader(template.getInputStream()); scriptEngine.eval(templateReader); return writer.toString(); } catch (ScriptException ex) { Exceptions.printStackTrace(ex); } catch (IOException ioe) { Exceptions.printStackTrace(ioe); } finally { if (writer != null) { try { writer.close(); } catch (IOException ex) { Exceptions.printStackTrace(ex); } } if (templateReader != null) { try { templateReader.close(); } catch (IOException ex) { Exceptions.printStackTrace(ex); } } } } } return "/*Error*/"; // NOI18N } private static FileObject getTemplateFileObject(String templatePath) { return FileUtil.getConfigFile(templatePath); } private static ScriptEngine getScriptEngine() { return Scripting.createManager().getEngineByName("freemarker"); // NOI18N } }
3,939
764
<reponame>641589523/token-profile<gh_stars>100-1000 {"symbol": "CHADS","address": "0x69692D3345010a207b759a7D1af6fc7F38b35c5E","overview":{"en": ""},"email": "","website": "https://chads.vc","state": "NORMAL","links": {"blog": "","twitter": "https://twitter.com/chadsvc","telegram": "","github": ""}}
118
914
<reponame>yanlun0323/cicada package top.crossoverjie.cicada.example.res; /** * Function: * * @author crossoverJie * Date: 2018/8/31 19:18 * @since JDK 1.8 */ public class DemoResVO { private Long index ; private String msg ; public String getMsg() { return msg; } public void setMsg(String msg) { this.msg = msg; } public Long getIndex() { return index; } public void setIndex(Long index) { this.index = index; } }
220
575
// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "ui/events/event_target.h" #include <algorithm> #include "base/check.h" #include "base/notreached.h" #include "ui/events/event.h" #include "ui/gfx/geometry/point_conversions.h" namespace ui { EventTarget::EventTarget() = default; EventTarget::~EventTarget() = default; void EventTarget::ConvertEventToTarget(const EventTarget* target, LocatedEvent* event) const {} gfx::PointF EventTarget::GetScreenLocationF( const ui::LocatedEvent& event) const { NOTREACHED(); return event.root_location_f(); } gfx::Point EventTarget::GetScreenLocation(const ui::LocatedEvent& event) const { return gfx::ToFlooredPoint(GetScreenLocationF(event)); } void EventTarget::AddPreTargetHandler(EventHandler* handler, Priority priority) { CHECK(handler); PrioritizedHandler prioritized; prioritized.handler = handler; prioritized.priority = priority; if (priority == Priority::kDefault) pre_target_list_.push_back(prioritized); else pre_target_list_.insert(pre_target_list_.begin(), prioritized); handler->targets_installed_on_.push_back(this); } void EventTarget::RemovePreTargetHandler(EventHandler* handler) { CHECK(handler); // Only erase a single one, which matches the removal code right after this. auto installed_on_iter = std::find(handler->targets_installed_on_.begin(), handler->targets_installed_on_.end(), this); if (installed_on_iter != handler->targets_installed_on_.end()) handler->targets_installed_on_.erase(installed_on_iter); EventHandlerPriorityList::iterator it, end; for (it = pre_target_list_.begin(), end = pre_target_list_.end(); it != end; ++it) { if (it->handler == handler) { pre_target_list_.erase(it); return; } } } void EventTarget::AddPostTargetHandler(EventHandler* handler) { DCHECK(handler); post_target_list_.push_back(handler); } void EventTarget::RemovePostTargetHandler(EventHandler* handler) { auto find = std::find(post_target_list_.begin(), post_target_list_.end(), handler); if (find != post_target_list_.end()) post_target_list_.erase(find); } bool EventTarget::IsPreTargetListEmpty() const { return pre_target_list_.empty(); } EventHandler* EventTarget::SetTargetHandler(EventHandler* target_handler) { EventHandler* original_target_handler = target_handler_; target_handler_ = target_handler; return original_target_handler; } void EventTarget::GetPreTargetHandlers(EventHandlerList* list) { EventTarget* target = this; EventHandlerPriorityList temp; while (target) { // Build a composite list of EventHandlers from targets. temp.insert(temp.begin(), target->pre_target_list_.begin(), target->pre_target_list_.end()); target = target->GetParentTarget(); } // Sort the list, keeping relative order, but making sure the // accessibility handlers always go first before system, which will // go before default, at all levels of EventTarget. std::stable_sort(temp.begin(), temp.end()); // Add the sorted handlers to the result list, in order. for (size_t i = 0; i < temp.size(); ++i) list->insert(list->end(), temp[i].handler); } void EventTarget::GetPostTargetHandlers(EventHandlerList* list) { EventTarget* target = this; while (target) { list->insert(list->end(), target->post_target_list_.begin(), target->post_target_list_.end()); target = target->GetParentTarget(); } } } // namespace ui
1,274
822
<filename>natasha/tests/test_addr.py import pytest from natasha.obj import ( AddrPart as Part, Addr ) tests = [ [ 'Россия, Вологодская обл. г. Череповец, пр.Победы 93 б', Addr([ Part('Россия', 'страна'), Part('Вологодская', 'область'), Part('Череповец', 'город'), Part('Победы', 'проспект'), ]) ], [ '692909, РФ, Приморский край, г. Находка, ул. Добролюбова, 18', Addr([ Part('692909', 'индекс'), Part('РФ', 'страна'), Part('Приморский', 'край'), Part('Находка', 'город'), Part('Добролюбова', 'улица'), ]) ], [ '<NAME>, ул. Дружбы, 13', Addr([ Part('Федоровка', 'деревня'), Part('Дружбы', 'улица'), ]) ], [ 'Россия, 129110, г.Москва, Олимпийский проспект, 22', Addr([ Part('Россия', 'страна'), Part('129110', 'индекс'), Part('Москва', 'город'), Part('Олимпийский', 'проспект'), ]) ], [ 'г. Санкт-Петербург, Красногвардейский пер., д. 15', Addr([ Part('Санкт-Петербург', 'город'), Part('Красногвардейский', 'переулок'), Part('15', 'дом') ]) ], [ 'Республика Карелия,г.Петрозаводск,ул.Маршала Мерецкова, д.8 Б,офис 4', Addr([ Part('Карелия', 'республика'), Part('Петрозаводск', 'город'), Part('Маршала Мерецкова', 'улица'), Part('8 Б', 'дом'), Part('4', 'офис') ]) ], [ '628000, ХМАО-Югра, г.Ханты-Мансийск, ул. Ледовая , д.19', Addr([ Part('628000', 'индекс'), Part('ХМАО-Югра'), Part('Ханты-Мансийск', 'город'), Part('Ледовая', 'улица'), Part('19', 'дом') ]) ], [ 'ХМАО г.Нижневартовск пер.Ягельный 17', Addr([ Part('ХМАО'), Part('Нижневартовск', 'город'), Part('Ягельный', 'переулок'), ]) ], [ 'Белгородская обл, пгт Борисовка,ул. Рудого д.160', Addr([ Part('Белгородская', 'область'), Part('Борисовка', 'посёлок'), Part('Рудого', 'улица'), Part('160', 'дом') ]) ], [ 'Самарская область, п.г.т. Алексеевка, ул. Ульяновская д. 21', Addr([ Part('Самарская', 'область'), Part('Алексеевка', 'посёлок'), Part('Ульяновская', 'улица'), Part('21', 'дом') ]) ], [ 'Мурманская обл поселок городского типа Молочный, ул.Гальченко д.11', Addr([ Part('Мурманская', 'область'), Part('Молочный', 'посёлок'), Part('Гальченко', 'улица'), Part('11', 'дом') ]) ], [ 'ул. Народного Ополчения д. 9к.3', Addr([ Part('Народного Ополчения', 'улица'), Part('9к', 'дом'), ]) ], [ 'ул. <NAME>, д.37/430', Addr([ Part('Б. Пироговская', 'улица'), Part('37/430', 'дом') ]) ], [ 'Ставропольский край, Изобильненский район, город Изобильный, улица Чапаева, дом 68', Addr([ Part('Ставропольский', 'край'), Part('Изобильненский', 'район'), Part('Изобильный', 'город'), Part('Чапаева', 'улица'), Part('68', 'дом') ]) ], ] @pytest.mark.parametrize('test', tests) def test_extractor(addr_extractor, test): text, target = test pred = addr_extractor.find(text).fact assert pred == target
3,083
806
<filename>killers/nokia/com.evenwell.powersaving.g3/sources/android/support/v4/media/session/MediaButtonReceiver.java package android.support.v4.media.session; import android.content.BroadcastReceiver; import android.content.ComponentName; import android.content.Context; import android.content.Intent; import android.content.pm.PackageManager; import android.content.pm.ResolveInfo; import android.support.v4.media.MediaBrowserServiceCompat; import android.view.KeyEvent; import java.util.List; public class MediaButtonReceiver extends BroadcastReceiver { public void onReceive(Context context, Intent intent) { Intent queryIntent = new Intent("android.intent.action.MEDIA_BUTTON"); queryIntent.setPackage(context.getPackageName()); PackageManager pm = context.getPackageManager(); List<ResolveInfo> resolveInfos = pm.queryIntentServices(queryIntent, 0); if (resolveInfos.isEmpty()) { queryIntent.setAction(MediaBrowserServiceCompat.SERVICE_INTERFACE); resolveInfos = pm.queryIntentServices(queryIntent, 0); } if (resolveInfos.isEmpty()) { throw new IllegalStateException("Could not find any Service that handles android.intent.action.MEDIA_BUTTON or a media browser service implementation"); } else if (resolveInfos.size() != 1) { throw new IllegalStateException("Expected 1 Service that handles " + queryIntent.getAction() + ", found " + resolveInfos.size()); } else { ResolveInfo resolveInfo = (ResolveInfo) resolveInfos.get(0); intent.setComponent(new ComponentName(resolveInfo.serviceInfo.packageName, resolveInfo.serviceInfo.name)); context.startService(intent); } } public static KeyEvent handleIntent(MediaSessionCompat mediaSessionCompat, Intent intent) { if (mediaSessionCompat == null || intent == null || !"android.intent.action.MEDIA_BUTTON".equals(intent.getAction()) || !intent.hasExtra("android.intent.extra.KEY_EVENT")) { return null; } KeyEvent ke = (KeyEvent) intent.getParcelableExtra("android.intent.extra.KEY_EVENT"); mediaSessionCompat.getController().dispatchMediaButtonEvent(ke); return ke; } }
789
384
<filename>thirdparty/tng_io/src/tests/compression/test44.h #define TESTNAME "Initial coding. Intra frame BWLZH algorithm. High accuracy. Cubic cell." #define FILENAME "test44.tng_compress" #define ALGOTEST #define NATOMS 100000 #define CHUNKY 1 #define SCALE 0.5 #define PRECISION 1e-8 #define WRITEVEL 0 #define VELPRECISION 0.1 #define INITIALCODING 9 #define INITIALCODINGPARAMETER 0 #define CODING 1 #define CODINGPARAMETER -1 #define VELCODING 4 #define VELCODINGPARAMETER 0 #define INTMIN1 0 #define INTMIN2 0 #define INTMIN3 0 #define INTMAX1 1610612736 #define INTMAX2 1610612736 #define INTMAX3 1610612736 #define NFRAMES 10 #define EXPECTED_FILESIZE 1436901.
261
575
#!/usr/bin/env python # Copyright 2018 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import json import os import re import subprocess import sys import common # A list of files that are allowed to have static initializers. # If something adds a static initializer, revert it. We don't accept regressions # in static initializers. _LINUX_SI_FILE_ALLOWLIST = { 'chrome': [ 'InstrProfilingRuntime.cpp', # Only in coverage builds, not production. 'atomicops_internals_x86.cc', # TODO(crbug.com/973551): Remove. 'debugallocation_shim.cc', # TODO(crbug.com/973552): Remove. 'iostream.cpp:', # TODO(crbug.com/973554): Remove. '000101', # libc++ uses init_priority 101 for iostreams. 'spinlock.cc', # TODO(crbug.com/973556): Remove. ], 'nacl_helper_bootstrap': [], } _LINUX_SI_FILE_ALLOWLIST['nacl_helper'] = _LINUX_SI_FILE_ALLOWLIST['chrome'] # The lists for Chrome OS are conceptually the same as the Linux ones above. # If something adds a static initializer, revert it. We don't accept regressions # in static initializers. _CROS_SI_FILE_ALLOWLIST = { 'chrome': [ 'InstrProfilingRuntime.cpp', # Only in coverage builds, not production. 'atomicops_internals_x86.cc', # TODO(crbug.com/973551): Remove. 'debugallocation_shim.cc', # TODO(crbug.com/973552): Remove. 'iostream.cpp:', # TODO(crbug.com/973554): Remove. '000101', # libc++ uses init_priority 101 for iostreams. 'spinlock.cc', # TODO(crbug.com/973556): Remove. 'rpc.pb.cc', # TODO(crbug.com/537099): Remove. ], 'nacl_helper_bootstrap': [], } _CROS_SI_FILE_ALLOWLIST['nacl_helper'] = _LINUX_SI_FILE_ALLOWLIST['chrome'] # Mac can use this list when a dsym is available, otherwise it will fall back # to checking the count. _MAC_SI_FILE_ALLOWLIST = [ 'InstrProfilingRuntime.cpp', # Only in coverage builds, not in production. 'sysinfo.cc', # Only in coverage builds, not in production. 'iostream.cpp', # Used to setup std::cin/cout/cerr. '000101', # Used to setup std::cin/cout/cerr ] # Two static initializers are needed on Mac for libc++ to set up # std::cin/cout/cerr before main() runs. Only iostream.cpp needs to be counted # here. FALLBACK_EXPECTED_MAC_SI_COUNT = 2 # For coverage builds, also allow 'IntrProfilingRuntime.cpp' COVERAGE_BUILD_FALLBACK_EXPECTED_MAC_SI_COUNT = 3 def run_process(command): p = subprocess.Popen(command, stdout=subprocess.PIPE) stdout = p.communicate()[0] if p.returncode != 0: raise Exception( 'ERROR from command "%s": %d' % (' '.join(command), p.returncode)) return stdout def main_mac(src_dir, allow_coverage_initializer = False): base_names = ('Chromium', 'Google Chrome') ret = 0 for base_name in base_names: app_bundle = base_name + '.app' framework_name = base_name + ' Framework' framework_bundle = framework_name + '.framework' framework_dsym_bundle = framework_bundle + '.dSYM' framework_unstripped_name = framework_name + '.unstripped' chromium_executable = os.path.join(app_bundle, 'Contents', 'MacOS', base_name) chromium_framework_executable = os.path.join(framework_bundle, framework_name) chromium_framework_dsym = os.path.join(framework_dsym_bundle, 'Contents', 'Resources', 'DWARF', framework_name) if os.path.exists(chromium_executable): # Count the number of files with at least one static initializer. si_count = 0 # Find the __DATA,__mod_init_func section. # If the checkout uses the hermetic xcode binaries, then otool must be # directly invoked. The indirection via /usr/bin/otool won't work unless # there's an actual system install of Xcode. hermetic_xcode_path = os.path.join(src_dir, 'build', 'mac_files', 'xcode_binaries') if os.path.exists(hermetic_xcode_path): otool_path = os.path.join(hermetic_xcode_path, 'Contents', 'Developer', 'Toolchains', 'XcodeDefault.xctoolchain', 'usr', 'bin', 'otool') else: otool_path = 'otool' stdout = run_process([otool_path, '-l', chromium_framework_executable]) section_index = stdout.find('sectname __mod_init_func') if section_index != -1: # If the section exists, the "size" line must follow it. initializers_s = re.search('size 0x([0-9a-f]+)', stdout[section_index:]).group(1) word_size = 8 # Assume 64 bit si_count = int(initializers_s, 16) / word_size # Print the list of static initializers. if si_count > 0: # First look for a dSYM to get information about the initializers. If # one is not present, check if there is an unstripped copy of the build # output. mac_tools_path = os.path.join(src_dir, 'tools', 'mac') if os.path.exists(chromium_framework_dsym): dump_static_initializers = os.path.join( mac_tools_path, 'dump-static-initializers.py') stdout = run_process( [dump_static_initializers, chromium_framework_dsym]) for line in stdout: if re.match('0x[0-9a-f]+', line) and not any( f in line for f in _MAC_SI_FILE_ALLOWLIST): ret = 1 print 'Found invalid static initializer: {}'.format(line) print stdout else: allowed_si_count = FALLBACK_EXPECTED_MAC_SI_COUNT if allow_coverage_initializer: allowed_si_count = COVERAGE_BUILD_FALLBACK_EXPECTED_MAC_SI_COUNT if si_count > allowed_si_count: print('Expected <= %d static initializers in %s, but found %d' % (allowed_si_count, chromium_framework_executable, si_count)) ret = 1 show_mod_init_func = os.path.join(mac_tools_path, 'show_mod_init_func.py') args = [show_mod_init_func] if os.path.exists(framework_unstripped_name): args.append(framework_unstripped_name) else: print '# Warning: Falling back to potentially stripped output.' args.append(chromium_framework_executable) if os.path.exists(hermetic_xcode_path): args.extend(['--xcode-path', hermetic_xcode_path]) stdout = run_process(args) print stdout return ret def main_linux(src_dir, is_chromeos): ret = 0 allowlist = _CROS_SI_FILE_ALLOWLIST if is_chromeos else \ _LINUX_SI_FILE_ALLOWLIST for binary_name in allowlist: if not os.path.exists(binary_name): continue dump_static_initializers = os.path.join(src_dir, 'tools', 'linux', 'dump-static-initializers.py') stdout = run_process([dump_static_initializers, '-d', binary_name]) # The output has the following format: # First lines: '# <file_name> <si_name>' # Last line: '# Found <num> static initializers in <num> files.' # # For example: # # spinlock.cc GetSystemCPUsCount() # # spinlock.cc adaptive_spin_count # # Found 2 static initializers in 1 files. files_with_si = set() for line in stdout.splitlines()[:-1]: parts = line.split(' ', 2) assert len(parts) == 3 and parts[0] == '#' files_with_si.add(parts[1]) for f in files_with_si: if f not in allowlist[binary_name]: ret = 1 print('Error: file "%s" is not expected to have static initializers in' ' binary "%s"') % (f, binary_name) print '\n# Static initializers in %s:' % binary_name print stdout return ret def main_run(args): if args.build_config_fs != 'Release': raise Exception('Only release builds are supported') src_dir = args.paths['checkout'] build_dir = os.path.join(src_dir, 'out', args.build_config_fs) os.chdir(build_dir) if sys.platform.startswith('darwin'): rc = main_mac(src_dir, allow_coverage_initializer = '--allow-coverage-initializer' in args.args) elif sys.platform == 'linux2': is_chromeos = 'buildername' in args.properties and \ 'chromeos' in args.properties['buildername'] rc = main_linux(src_dir, is_chromeos) else: sys.stderr.write('Unsupported platform %s.\n' % repr(sys.platform)) return 2 json.dump({ 'valid': rc == 0, 'failures': [], }, args.output) return rc def main_compile_targets(args): if sys.platform.startswith('darwin'): compile_targets = ['chrome'] elif sys.platform == 'linux2': compile_targets = ['chrome', 'nacl_helper', 'nacl_helper_bootstrap'] else: compile_targets = [] json.dump(compile_targets, args.output) return 0 if __name__ == '__main__': funcs = { 'run': main_run, 'compile_targets': main_compile_targets, } sys.exit(common.run_script(sys.argv[1:], funcs))
3,900
310
{ "name": "ws", "description": "A WebSocket library for Node.", "url": "https://github.com/websockets/ws" }
42
638
<gh_stars>100-1000 from __future__ import print_function import time import sys import Pyro4 if sys.version_info < (3, 0): input = raw_input uri = input("Uri of benchmark server? ").strip() print("Timing raw connect speed (no method call)...") p = Pyro4.core.Proxy(uri) p.oneway() ITERATIONS = 2000 begin = time.time() for loop in range(ITERATIONS): if loop % 500 == 0: print(loop) p._pyroRelease() p._pyroBind() duration = time.time() - begin print("%d connections in %.3f sec = %.0f conn/sec" % (ITERATIONS, duration, ITERATIONS / duration)) del p print("Timing proxy creation+connect+methodcall speed...") ITERATIONS = 2000 begin = time.time() for loop in range(ITERATIONS): if loop % 500 == 0: print(loop) with Pyro4.core.Proxy(uri) as p: p.oneway() duration = time.time() - begin print("%d new proxy calls in %.3f sec = %.0f calls/sec" % (ITERATIONS, duration, ITERATIONS / duration)) print("Timing proxy methodcall speed...") p = Pyro4.core.Proxy(uri) p.oneway() ITERATIONS = 10000 begin = time.time() for loop in range(ITERATIONS): if loop % 1000 == 0: print(loop) p.oneway() duration = time.time() - begin print("%d calls in %.3f sec = %.0f calls/sec" % (ITERATIONS, duration, ITERATIONS / duration)) print("Serializer used:", Pyro4.config.SERIALIZER)
500
14,668
<filename>chrome/android/javatests/src/org/chromium/chrome/browser/interstitials/LookalikeInterstitialTest.java // Copyright 2019 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. package org.chromium.chrome.browser.interstitials; import android.support.test.InstrumentationRegistry; import androidx.test.filters.MediumTest; import org.junit.After; import org.junit.Assert; import org.junit.Before; import org.junit.Ignore; import org.junit.Rule; import org.junit.Test; import org.junit.runner.RunWith; import org.chromium.base.test.params.ParameterizedCommandLineFlags; import org.chromium.base.test.params.ParameterizedCommandLineFlags.Switches; import org.chromium.base.test.util.CommandLineFlags; import org.chromium.chrome.browser.flags.ChromeFeatureList; import org.chromium.chrome.browser.flags.ChromeSwitches; import org.chromium.chrome.browser.tab.Tab; import org.chromium.chrome.test.ChromeJUnit4ClassRunner; import org.chromium.chrome.test.ChromeTabbedActivityTestRule; import org.chromium.chrome.test.util.ChromeTabUtils; import org.chromium.chrome.test.util.browser.TabTitleObserver; import org.chromium.content_public.common.ContentSwitches; import org.chromium.net.test.EmbeddedTestServer; /** Tests for the Lookalike URL interstitial (aka confusables). */ @RunWith(ChromeJUnit4ClassRunner.class) @MediumTest @CommandLineFlags.Add({ChromeSwitches.DISABLE_FIRST_RUN_EXPERIENCE, ContentSwitches.HOST_RESOLVER_RULES + "=MAP * 127.0.0.1"}) // clang-format off @ParameterizedCommandLineFlags({ @Switches(), @Switches("enable-features=" + ChromeFeatureList.LOOKALIKE_NAVIGATION_URL_SUGGESTIONS_UI), }) // clang-format on public class LookalikeInterstitialTest { private static final String INTERSTITIAL_TITLE_PREFIX = "Continue to "; private static final int INTERSTITIAL_TITLE_UPDATE_TIMEOUT_SECONDS = 5; private EmbeddedTestServer mServer; @Rule public ChromeTabbedActivityTestRule mActivityTestRule = new ChromeTabbedActivityTestRule(); @Before public void setUp() { mActivityTestRule.startMainActivityFromLauncher(); mServer = EmbeddedTestServer.createAndStartServer(InstrumentationRegistry.getContext()); } @After public void tearDown() { mServer.stopAndDestroyServer(); } @Test @Ignore("crbug/941488") public void testBasicInterstitialShown() throws Exception { Tab tab = mActivityTestRule.getActivity().getActivityTab(); ChromeTabUtils.loadUrlOnUiThread(tab, mServer.getURLWithHostName("xn--googl-fsa.com", // googlé.com "/chrome/test/data/android/navigate/simple.html")); // Wait for the interstitial page to commit and check the page title. new TabTitleObserver(tab, INTERSTITIAL_TITLE_PREFIX) { @Override protected boolean doesTitleMatch(String expectedTitle, String actualTitle) { return actualTitle.indexOf(expectedTitle) == 0; } }.waitForTitleUpdate(INTERSTITIAL_TITLE_UPDATE_TIMEOUT_SECONDS); Assert.assertEquals(0, tab.getTitle().indexOf(INTERSTITIAL_TITLE_PREFIX)); } }
1,160
679
<reponame>Grosskopf/openoffice /************************************************************** * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. * *************************************************************/ #ifndef OOX_DRAWINGML_TABLEPROPERTIES_HXX #define OOX_DRAWINGML_TABLEPROPERTIES_HXX #include "oox/drawingml/table/tablerow.hxx" #include "oox/drawingml/table/tablestyle.hxx" #include "oox/helper/propertymap.hxx" #include "oox/drawingml/color.hxx" #include <boost/shared_ptr.hpp> #include <boost/optional.hpp> #include <vector> #include <map> namespace oox { namespace drawingml { namespace table { class TableProperties { public: TableProperties(); ~TableProperties(); std::vector< sal_Int32 >& getTableGrid() { return mvTableGrid; }; std::vector< TableRow >& getTableRows() { return mvTableRows; }; rtl::OUString& getStyleId(){ return maStyleId; }; boost::shared_ptr< TableStyle >& getTableStyle(){ return mpTableStyle; }; sal_Bool& isRtl(){ return mbRtl; }; sal_Bool& isFirstRow(){ return mbFirstRow; }; sal_Bool& isFirstCol(){ return mbFirstCol; }; sal_Bool& isLastRow(){ return mbLastRow; }; sal_Bool& isLastCol(){ return mbLastCol; }; sal_Bool& isBandRow(){ return mbBandRow; }; sal_Bool& isBandCol(){ return mbBandCol; }; void apply( const TablePropertiesPtr& ); void pushToPropSet( const ::oox::core::XmlFilterBase& rFilterBase, const ::com::sun::star::uno::Reference < ::com::sun::star::beans::XPropertySet > & xPropSet, ::oox::drawingml::TextListStylePtr pMasterTextListStyle ); private: const TableStyle& getUsedTableStyle( const ::oox::core::XmlFilterBase& rFilterBase, sal_Bool &isCreateTabStyle); rtl::OUString maStyleId; // either StyleId is available boost::shared_ptr< TableStyle > mpTableStyle; // or the complete TableStyle std::vector< sal_Int32 > mvTableGrid; std::vector< TableRow > mvTableRows; sal_Bool mbRtl; sal_Bool mbFirstRow; sal_Bool mbFirstCol; sal_Bool mbLastRow; sal_Bool mbLastCol; sal_Bool mbBandRow; sal_Bool mbBandCol; }; } } } #endif // OOX_DRAWINGML_TABLEPROPERTIES_HXX
1,061
628
<gh_stars>100-1000 /************************************************************************* * Copyright (c) 2014 <NAME> * All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. **************************************************************************/ /* <NAME> Princeton University TriMesh_pointareas.cc Compute the area "belonging" to each vertex or each corner of a triangle (defined as Voronoi area restricted to the 1-ring of a vertex, or to the triangle). for more voronoi area, see Meyer M., Discrete differential geometry operators for triangulated 2-manifolds */ #include "trianglemesh.h" // Compute per-vertex point areas void TriangleMesh::need_pointareas() { if (pointareas.size() == vertices.size()) return; need_faces(); dprintf("Computing point areas... "); int nf = faces.size(), nv = vertices.size(); pointareas.clear(); pointareas.resize(nv); cornerareas.clear(); cornerareas.resize(nf); #pragma omp parallel for for (int i = 0; i < nf; i++) { // Edges vec e[3] = { vertices[faces[i][2]] - vertices[faces[i][1]], vertices[faces[i][0]] - vertices[faces[i][2]], vertices[faces[i][1]] - vertices[faces[i][0]] }; // Compute corner weights float area = 0.5f * len(e[0] CROSS e[1]); float l2[3] = { len2(e[0]), len2(e[1]), len2(e[2]) }; float ew[3] = { l2[0] * (l2[1] + l2[2] - l2[0]), l2[1] * (l2[2] + l2[0] - l2[1]), l2[2] * (l2[0] + l2[1] - l2[2]) }; if (ew[0] <= 0.0f) { cornerareas[i][1] = -0.25f * l2[2] * area / (e[0] DOT e[2]); cornerareas[i][2] = -0.25f * l2[1] * area / (e[0] DOT e[1]); cornerareas[i][0] = area - cornerareas[i][1] - cornerareas[i][2]; } else if (ew[1] <= 0.0f) { cornerareas[i][2] = -0.25f * l2[0] * area / (e[1] DOT e[0]); cornerareas[i][0] = -0.25f * l2[2] * area / (e[1] DOT e[2]); cornerareas[i][1] = area - cornerareas[i][2] - cornerareas[i][0]; } else if (ew[2] <= 0.0f) { cornerareas[i][0] = -0.25f * l2[1] * area / (e[2] DOT e[1]); cornerareas[i][1] = -0.25f * l2[0] * area / (e[2] DOT e[0]); cornerareas[i][2] = area - cornerareas[i][0] - cornerareas[i][1]; } else { float ewscale = 0.5f * area / (ew[0] + ew[1] + ew[2]); for (int j = 0; j < 3; j++) cornerareas[i][j] = ewscale * (ew[(j+1)%3] + ew[(j+2)%3]); } #pragma omp atomic pointareas[faces[i][0]] += cornerareas[i][0]; #pragma omp atomic pointareas[faces[i][1]] += cornerareas[i][1]; #pragma omp atomic pointareas[faces[i][2]] += cornerareas[i][2]; } dprintf("Done.\n"); }
2,410
1,056
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.netbeans.core.windows; import java.beans.PropertyVetoException; import java.io.File; import java.io.IOException; import java.net.URL; import java.net.URLStreamHandler; import java.net.URLStreamHandlerFactory; import java.util.Enumeration; import junit.framework.Assert; import org.netbeans.core.startup.MainLookup; import org.netbeans.junit.Manager; import org.openide.filesystems.FileObject; import org.openide.filesystems.FileSystem; import org.openide.filesystems.FileUtil; import org.openide.filesystems.MultiFileSystem; import org.openide.filesystems.Repository; import org.openide.filesystems.XMLFileSystem; import org.openide.util.Lookup; import org.openide.util.lookup.Lookups; import org.openide.util.lookup.ProxyLookup; /** * Inspired by org.netbeans.api.project.TestUtil. * * @author <NAME> */ public class IDEInitializer { private static XMLFileSystem systemFS; /** * Add layers to system filesystem. * * @param layers xml-layer URLs to be present in the system filesystem. * */ public static void addLayers (String[] layers) { ClassLoader classLoader = IDEInitializer.class.getClassLoader (); URL[] urls = new URL [layers.length]; int i, k = urls.length; for (i = 0; i < k; i++) { urls [i] = classLoader.getResource (layers [i]); } systemFS = new XMLFileSystem (); try { systemFS.setXmlUrls (urls); } catch (Exception ex) { ex.printStackTrace (); } MainLookup.register(systemFS); } /** * Remove layers from system filesystem which were added using addLayers * */ public static void removeLayers () { MainLookup.unregister(systemFS); } }
894
8,092
<gh_stars>1000+ # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # """ This module contains a BigQuery Hook, as well as a very basic PEP 249 implementation for BigQuery. """ import hashlib import json import logging import time import warnings from copy import deepcopy from datetime import datetime, timedelta from typing import Any, Dict, Iterable, List, Mapping, NoReturn, Optional, Sequence, Tuple, Type, Union from google.api_core.retry import Retry from google.cloud.bigquery import ( DEFAULT_RETRY, Client, CopyJob, ExternalConfig, ExtractJob, LoadJob, QueryJob, SchemaField, ) from google.cloud.bigquery.dataset import AccessEntry, Dataset, DatasetListItem, DatasetReference from google.cloud.bigquery.table import EncryptionConfiguration, Row, Table, TableReference from google.cloud.exceptions import NotFound from googleapiclient.discovery import Resource, build from pandas import DataFrame from pandas_gbq import read_gbq from pandas_gbq.gbq import GbqConnector # noqa from sqlalchemy import create_engine from airflow.exceptions import AirflowException from airflow.hooks.dbapi import DbApiHook from airflow.providers.google.common.consts import CLIENT_INFO from airflow.providers.google.common.hooks.base_google import GoogleBaseHook from airflow.utils.helpers import convert_camel_to_snake from airflow.utils.log.logging_mixin import LoggingMixin log = logging.getLogger(__name__) BigQueryJob = Union[CopyJob, QueryJob, LoadJob, ExtractJob] class BigQueryHook(GoogleBaseHook, DbApiHook): """ Interact with BigQuery. This hook uses the Google Cloud connection. :param gcp_conn_id: The Airflow connection used for GCP credentials. :param delegate_to: This performs a task on one host with reference to other hosts. :param use_legacy_sql: This specifies whether to use legacy SQL dialect. :param location: The location of the BigQuery resource. :param api_resource_configs: This contains params configuration applied for Google BigQuery jobs. :param impersonation_chain: This is the optional service account to impersonate using short term credentials. :param labels: The BigQuery resource label. """ conn_name_attr = 'gcp_conn_id' default_conn_name = 'google_cloud_bigquery_default' conn_type = 'gcpbigquery' hook_name = 'Google Bigquery' def __init__( self, gcp_conn_id: str = GoogleBaseHook.default_conn_name, delegate_to: Optional[str] = None, use_legacy_sql: bool = True, location: Optional[str] = None, api_resource_configs: Optional[Dict] = None, impersonation_chain: Optional[Union[str, Sequence[str]]] = None, labels: Optional[Dict] = None, ) -> None: super().__init__( gcp_conn_id=gcp_conn_id, delegate_to=delegate_to, impersonation_chain=impersonation_chain, ) self.use_legacy_sql = use_legacy_sql self.location = location self.running_job_id = None # type: Optional[str] self.api_resource_configs = api_resource_configs if api_resource_configs else {} # type Dict self.labels = labels self.credentials_path = "bigquery_hook_credentials.json" def get_conn(self) -> "BigQueryConnection": """Returns a BigQuery PEP 249 connection object.""" service = self.get_service() return BigQueryConnection( service=service, project_id=self.project_id, use_legacy_sql=self.use_legacy_sql, location=self.location, num_retries=self.num_retries, hook=self, ) def get_service(self) -> Resource: """Returns a BigQuery service object.""" warnings.warn( "This method will be deprecated. Please use `BigQueryHook.get_client` method", DeprecationWarning ) http_authorized = self._authorize() return build('bigquery', 'v2', http=http_authorized, cache_discovery=False) def get_client(self, project_id: Optional[str] = None, location: Optional[str] = None) -> Client: """ Returns authenticated BigQuery Client. :param project_id: Project ID for the project which the client acts on behalf of. :param location: Default location for jobs / datasets / tables. :return: """ return Client( client_info=CLIENT_INFO, project=project_id, location=location, credentials=self._get_credentials(), ) def get_uri(self) -> str: """Override DbApiHook get_uri method for get_sqlalchemy_engine()""" return f"bigquery://{self.project_id}" def get_sqlalchemy_engine(self, engine_kwargs=None): """ Get an sqlalchemy_engine object. :param engine_kwargs: Kwargs used in :func:`~sqlalchemy.create_engine`. :return: the created engine. """ if engine_kwargs is None: engine_kwargs = {} connection = self.get_connection(self.gcp_conn_id) if connection.extra_dejson.get("extra__google_cloud_platform__key_path"): credentials_path = connection.extra_dejson['extra__google_cloud_platform__key_path'] return create_engine(self.get_uri(), credentials_path=credentials_path, **engine_kwargs) elif connection.extra_dejson.get("extra__google_cloud_platform__keyfile_dict"): credential_file_content = json.loads( connection.extra_dejson["extra__google_cloud_platform__keyfile_dict"] ) return create_engine(self.get_uri(), credentials_info=credential_file_content, **engine_kwargs) try: # 1. If the environment variable GOOGLE_APPLICATION_CREDENTIALS is set # ADC uses the service account key or configuration file that the variable points to. # 2. If the environment variable GOOGLE_APPLICATION_CREDENTIALS isn't set # ADC uses the service account that is attached to the resource that is running your code. return create_engine(self.get_uri(), **engine_kwargs) except Exception as e: self.log.error(e) raise AirflowException( "For now, we only support instantiating SQLAlchemy engine by" " using ADC" ", extra__google_cloud_platform__key_path" "and extra__google_cloud_platform__keyfile_dict" ) def get_records(self, sql, parameters=None): if self.location is None: raise AirflowException("Need to specify 'location' to use BigQueryHook.get_records()") return super().get_records(sql, parameters=parameters) @staticmethod def _resolve_table_reference( table_resource: Dict[str, Any], project_id: Optional[str] = None, dataset_id: Optional[str] = None, table_id: Optional[str] = None, ) -> Dict[str, Any]: try: # Check if tableReference is present and is valid TableReference.from_api_repr(table_resource["tableReference"]) except KeyError: # Something is wrong so we try to build the reference table_resource["tableReference"] = table_resource.get("tableReference", {}) values = [("projectId", project_id), ("tableId", table_id), ("datasetId", dataset_id)] for key, value in values: # Check if value is already present if no use the provided one resolved_value = table_resource["tableReference"].get(key, value) if not resolved_value: # If there's no value in tableReference and provided one is None raise error raise AirflowException( f"Table resource is missing proper `tableReference` and `{key}` is None" ) table_resource["tableReference"][key] = resolved_value return table_resource def insert_rows( self, table: Any, rows: Any, target_fields: Any = None, commit_every: Any = 1000, replace: Any = False, **kwargs, ) -> None: """ Insertion is currently unsupported. Theoretically, you could use BigQuery's streaming API to insert rows into a table, but this hasn't been implemented. """ raise NotImplementedError() def get_pandas_df( self, sql: str, parameters: Optional[Union[Iterable, Mapping]] = None, dialect: Optional[str] = None, **kwargs, ) -> DataFrame: """ Returns a Pandas DataFrame for the results produced by a BigQuery query. The DbApiHook method must be overridden because Pandas doesn't support PEP 249 connections, except for SQLite. See: https://github.com/pydata/pandas/blob/master/pandas/io/sql.py#L447 https://github.com/pydata/pandas/issues/6900 :param sql: The BigQuery SQL to execute. :param parameters: The parameters to render the SQL query with (not used, leave to override superclass method) :param dialect: Dialect of BigQuery SQL – legacy SQL or standard SQL defaults to use `self.use_legacy_sql` if not specified :param kwargs: (optional) passed into pandas_gbq.read_gbq method """ if dialect is None: dialect = 'legacy' if self.use_legacy_sql else 'standard' credentials, project_id = self._get_credentials_and_project_id() return read_gbq( sql, project_id=project_id, dialect=dialect, verbose=False, credentials=credentials, **kwargs ) @GoogleBaseHook.fallback_to_default_project_id def table_exists(self, dataset_id: str, table_id: str, project_id: str) -> bool: """ Checks for the existence of a table in Google BigQuery. :param project_id: The Google cloud project in which to look for the table. The connection supplied to the hook must provide access to the specified project. :param dataset_id: The name of the dataset in which to look for the table. :param table_id: The name of the table to check the existence of. """ table_reference = TableReference(DatasetReference(project_id, dataset_id), table_id) try: self.get_client(project_id=project_id).get_table(table_reference) return True except NotFound: return False @GoogleBaseHook.fallback_to_default_project_id def table_partition_exists( self, dataset_id: str, table_id: str, partition_id: str, project_id: str ) -> bool: """ Checks for the existence of a partition in a table in Google BigQuery. :param project_id: The Google cloud project in which to look for the table. The connection supplied to the hook must provide access to the specified project. :param dataset_id: The name of the dataset in which to look for the table. :param table_id: The name of the table to check the existence of. :param partition_id: The name of the partition to check the existence of. """ table_reference = TableReference(DatasetReference(project_id, dataset_id), table_id) try: return partition_id in self.get_client(project_id=project_id).list_partitions(table_reference) except NotFound: return False @GoogleBaseHook.fallback_to_default_project_id def create_empty_table( self, project_id: Optional[str] = None, dataset_id: Optional[str] = None, table_id: Optional[str] = None, table_resource: Optional[Dict[str, Any]] = None, schema_fields: Optional[List] = None, time_partitioning: Optional[Dict] = None, cluster_fields: Optional[List[str]] = None, labels: Optional[Dict] = None, view: Optional[Dict] = None, materialized_view: Optional[Dict] = None, encryption_configuration: Optional[Dict] = None, retry: Optional[Retry] = DEFAULT_RETRY, location: Optional[str] = None, exists_ok: bool = True, ) -> Table: """ Creates a new, empty table in the dataset. To create a view, which is defined by a SQL query, parse a dictionary to 'view' kwarg :param project_id: The project to create the table into. :param dataset_id: The dataset to create the table into. :param table_id: The Name of the table to be created. :param table_resource: Table resource as described in documentation: https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#Table If provided all other parameters are ignored. :param schema_fields: If set, the schema field list as defined here: https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.schema :param labels: a dictionary containing labels for the table, passed to BigQuery :param retry: Optional. How to retry the RPC. **Example**: :: schema_fields=[{"name": "emp_name", "type": "STRING", "mode": "REQUIRED"}, {"name": "salary", "type": "INTEGER", "mode": "NULLABLE"}] :param time_partitioning: configure optional time partitioning fields i.e. partition by field, type and expiration as per API specifications. .. seealso:: https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#timePartitioning :param cluster_fields: [Optional] The fields used for clustering. BigQuery supports clustering for both partitioned and non-partitioned tables. https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#clustering.fields :param view: [Optional] A dictionary containing definition for the view. If set, it will create a view instead of a table: https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#ViewDefinition **Example**: :: view = { "query": "SELECT * FROM `test-project-id.test_dataset_id.test_table_prefix*` LIMIT 1000", "useLegacySql": False } :param materialized_view: [Optional] The materialized view definition. :param encryption_configuration: [Optional] Custom encryption configuration (e.g., Cloud KMS keys). **Example**: :: encryption_configuration = { "kmsKeyName": "projects/testp/locations/us/keyRings/test-kr/cryptoKeys/test-key" } :param num_retries: Maximum number of retries in case of connection problems. :param location: (Optional) The geographic location where the table should reside. :param exists_ok: If ``True``, ignore "already exists" errors when creating the table. :return: Created table """ _table_resource: Dict[str, Any] = {} if self.location: _table_resource['location'] = self.location if schema_fields: _table_resource['schema'] = {'fields': schema_fields} if time_partitioning: _table_resource['timePartitioning'] = time_partitioning if cluster_fields: _table_resource['clustering'] = {'fields': cluster_fields} if labels: _table_resource['labels'] = labels if view: _table_resource['view'] = view if materialized_view: _table_resource['materializedView'] = materialized_view if encryption_configuration: _table_resource["encryptionConfiguration"] = encryption_configuration table_resource = table_resource or _table_resource table_resource = self._resolve_table_reference( table_resource=table_resource, project_id=project_id, dataset_id=dataset_id, table_id=table_id, ) table = Table.from_api_repr(table_resource) return self.get_client(project_id=project_id, location=location).create_table( table=table, exists_ok=exists_ok, retry=retry ) @GoogleBaseHook.fallback_to_default_project_id def create_empty_dataset( self, dataset_id: Optional[str] = None, project_id: Optional[str] = None, location: Optional[str] = None, dataset_reference: Optional[Dict[str, Any]] = None, exists_ok: bool = True, ) -> Dict[str, Any]: """ Create a new empty dataset: https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets/insert :param project_id: The name of the project where we want to create an empty a dataset. Don't need to provide, if projectId in dataset_reference. :param dataset_id: The id of dataset. Don't need to provide, if datasetId in dataset_reference. :param location: (Optional) The geographic location where the dataset should reside. There is no default value but the dataset will be created in US if nothing is provided. :param dataset_reference: Dataset reference that could be provided with request body. More info: https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets#resource :param exists_ok: If ``True``, ignore "already exists" errors when creating the dataset. """ dataset_reference = dataset_reference or {"datasetReference": {}} for param, value in zip(["datasetId", "projectId"], [dataset_id, project_id]): specified_param = dataset_reference["datasetReference"].get(param) if specified_param: if value: self.log.info( "`%s` was provided in both `dataset_reference` and as `%s`. " "Using value from `dataset_reference`", param, convert_camel_to_snake(param), ) continue # use specified value if not value: raise ValueError( f"Please specify `{param}` either in `dataset_reference` " f"or by providing `{convert_camel_to_snake(param)}`", ) # dataset_reference has no param but we can fallback to default value self.log.info( "%s was not specified in `dataset_reference`. Will use default value %s.", param, value ) dataset_reference["datasetReference"][param] = value location = location or self.location if location: dataset_reference["location"] = dataset_reference.get("location", location) dataset: Dataset = Dataset.from_api_repr(dataset_reference) self.log.info('Creating dataset: %s in project: %s ', dataset.dataset_id, dataset.project) dataset_object = self.get_client(location=location).create_dataset( dataset=dataset, exists_ok=exists_ok ) self.log.info('Dataset created successfully.') return dataset_object.to_api_repr() @GoogleBaseHook.fallback_to_default_project_id def get_dataset_tables( self, dataset_id: str, project_id: Optional[str] = None, max_results: Optional[int] = None, retry: Retry = DEFAULT_RETRY, ) -> List[Dict[str, Any]]: """ Get the list of tables for a given dataset. For more information, see: https://cloud.google.com/bigquery/docs/reference/rest/v2/tables/list :param dataset_id: the dataset ID of the requested dataset. :param project_id: (Optional) the project of the requested dataset. If None, self.project_id will be used. :param max_results: (Optional) the maximum number of tables to return. :param retry: How to retry the RPC. :return: List of tables associated with the dataset. """ self.log.info('Start getting tables list from dataset: %s.%s', project_id, dataset_id) tables = self.get_client().list_tables( dataset=DatasetReference(project=project_id, dataset_id=dataset_id), max_results=max_results, retry=retry, ) # Convert to a list (consumes all values) return [t.reference.to_api_repr() for t in tables] @GoogleBaseHook.fallback_to_default_project_id def delete_dataset( self, dataset_id: str, project_id: Optional[str] = None, delete_contents: bool = False, retry: Retry = DEFAULT_RETRY, ) -> None: """ Delete a dataset of Big query in your project. :param project_id: The name of the project where we have the dataset. :param dataset_id: The dataset to be delete. :param delete_contents: If True, delete all the tables in the dataset. If False and the dataset contains tables, the request will fail. :param retry: How to retry the RPC. """ self.log.info('Deleting from project: %s Dataset:%s', project_id, dataset_id) self.get_client(project_id=project_id).delete_dataset( dataset=DatasetReference(project=project_id, dataset_id=dataset_id), delete_contents=delete_contents, retry=retry, not_found_ok=True, ) @GoogleBaseHook.fallback_to_default_project_id def create_external_table( self, external_project_dataset_table: str, schema_fields: List, source_uris: List, source_format: str = 'CSV', autodetect: bool = False, compression: str = 'NONE', ignore_unknown_values: bool = False, max_bad_records: int = 0, skip_leading_rows: int = 0, field_delimiter: str = ',', quote_character: Optional[str] = None, allow_quoted_newlines: bool = False, allow_jagged_rows: bool = False, encoding: str = "UTF-8", src_fmt_configs: Optional[Dict] = None, labels: Optional[Dict] = None, description: Optional[str] = None, encryption_configuration: Optional[Dict] = None, location: Optional[str] = None, project_id: Optional[str] = None, ) -> Table: """ Creates a new external table in the dataset with the data from Google Cloud Storage. See here: https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#resource for more details about these parameters. :param external_project_dataset_table: The dotted ``(<project>.|<project>:)<dataset>.<table>($<partition>)`` BigQuery table name to create external table. If ``<project>`` is not included, project will be the project defined in the connection json. :param schema_fields: The schema field list as defined here: https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#resource :param source_uris: The source Google Cloud Storage URI (e.g. gs://some-bucket/some-file.txt). A single wild per-object name can be used. :param source_format: File format to export. :param autodetect: Try to detect schema and format options automatically. Any option specified explicitly will be honored. :param compression: [Optional] The compression type of the data source. Possible values include GZIP and NONE. The default value is NONE. This setting is ignored for Google Cloud Bigtable, Google Cloud Datastore backups and Avro formats. :param ignore_unknown_values: [Optional] Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. :param max_bad_records: The maximum number of bad records that BigQuery can ignore when running the job. :param skip_leading_rows: Number of rows to skip when loading from a CSV. :param field_delimiter: The delimiter to use when loading from a CSV. :param quote_character: The value that is used to quote data sections in a CSV file. :param allow_quoted_newlines: Whether to allow quoted newlines (true) or not (false). :param allow_jagged_rows: Accept rows that are missing trailing optional columns. The missing values are treated as nulls. If false, records with missing trailing columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. Only applicable when source_format is CSV. :param encoding: The character encoding of the data. See: .. seealso:: https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#externalDataConfiguration.csvOptions.encoding :param src_fmt_configs: configure optional fields specific to the source format :param labels: A dictionary containing labels for the BiqQuery table. :param description: A string containing the description for the BigQuery table. :param encryption_configuration: [Optional] Custom encryption configuration (e.g., Cloud KMS keys). **Example**: :: encryption_configuration = { "kmsKeyName": "projects/testp/locations/us/keyRings/test-kr/cryptoKeys/test-key" } """ warnings.warn( "This method is deprecated. Please use `BigQueryHook.create_empty_table` method with " "passing the `table_resource` object. This gives more flexibility than this method.", DeprecationWarning, ) location = location or self.location src_fmt_configs = src_fmt_configs or {} source_format = source_format.upper() compression = compression.upper() external_config_api_repr = { 'autodetect': autodetect, 'sourceFormat': source_format, 'sourceUris': source_uris, 'compression': compression, 'ignoreUnknownValues': ignore_unknown_values, } # if following fields are not specified in src_fmt_configs, # honor the top-level params for backward-compatibility backward_compatibility_configs = { 'skipLeadingRows': skip_leading_rows, 'fieldDelimiter': field_delimiter, 'quote': quote_character, 'allowQuotedNewlines': allow_quoted_newlines, 'allowJaggedRows': allow_jagged_rows, 'encoding': encoding, } src_fmt_to_param_mapping = {'CSV': 'csvOptions', 'GOOGLE_SHEETS': 'googleSheetsOptions'} src_fmt_to_configs_mapping = { 'csvOptions': [ 'allowJaggedRows', 'allowQuotedNewlines', 'fieldDelimiter', 'skipLeadingRows', 'quote', 'encoding', ], 'googleSheetsOptions': ['skipLeadingRows'], } if source_format in src_fmt_to_param_mapping.keys(): valid_configs = src_fmt_to_configs_mapping[src_fmt_to_param_mapping[source_format]] src_fmt_configs = _validate_src_fmt_configs( source_format, src_fmt_configs, valid_configs, backward_compatibility_configs ) external_config_api_repr[src_fmt_to_param_mapping[source_format]] = src_fmt_configs # build external config external_config = ExternalConfig.from_api_repr(external_config_api_repr) if schema_fields: external_config.schema = [SchemaField.from_api_repr(f) for f in schema_fields] if max_bad_records: external_config.max_bad_records = max_bad_records # build table definition table = Table(table_ref=TableReference.from_string(external_project_dataset_table, project_id)) table.external_data_configuration = external_config if labels: table.labels = labels if description: table.description = description if encryption_configuration: table.encryption_configuration = EncryptionConfiguration.from_api_repr(encryption_configuration) self.log.info('Creating external table: %s', external_project_dataset_table) table_object = self.create_empty_table( table_resource=table.to_api_repr(), project_id=project_id, location=location, exists_ok=True ) self.log.info('External table created successfully: %s', external_project_dataset_table) return table_object @GoogleBaseHook.fallback_to_default_project_id def update_table( self, table_resource: Dict[str, Any], fields: Optional[List[str]] = None, dataset_id: Optional[str] = None, table_id: Optional[str] = None, project_id: Optional[str] = None, ) -> Dict[str, Any]: """ Change some fields of a table. Use ``fields`` to specify which fields to update. At least one field must be provided. If a field is listed in ``fields`` and is ``None`` in ``table``, the field value will be deleted. If ``table.etag`` is not ``None``, the update will only succeed if the table on the server has the same ETag. Thus reading a table with ``get_table``, changing its fields, and then passing it to ``update_table`` will ensure that the changes will only be saved if no modifications to the table occurred since the read. :param project_id: The project to create the table into. :param dataset_id: The dataset to create the table into. :param table_id: The Name of the table to be created. :param table_resource: Table resource as described in documentation: https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#Table The table has to contain ``tableReference`` or ``project_id``, ``dataset_id`` and ``table_id`` have to be provided. :param fields: The fields of ``table`` to change, spelled as the Table properties (e.g. "friendly_name"). """ fields = fields or list(table_resource.keys()) table_resource = self._resolve_table_reference( table_resource=table_resource, project_id=project_id, dataset_id=dataset_id, table_id=table_id ) table = Table.from_api_repr(table_resource) self.log.info('Updating table: %s', table_resource["tableReference"]) table_object = self.get_client(project_id=project_id).update_table(table=table, fields=fields) self.log.info('Table %s.%s.%s updated successfully', project_id, dataset_id, table_id) return table_object.to_api_repr() @GoogleBaseHook.fallback_to_default_project_id def patch_table( self, dataset_id: str, table_id: str, project_id: Optional[str] = None, description: Optional[str] = None, expiration_time: Optional[int] = None, external_data_configuration: Optional[Dict] = None, friendly_name: Optional[str] = None, labels: Optional[Dict] = None, schema: Optional[List] = None, time_partitioning: Optional[Dict] = None, view: Optional[Dict] = None, require_partition_filter: Optional[bool] = None, encryption_configuration: Optional[Dict] = None, ) -> None: """ Patch information in an existing table. It only updates fields that are provided in the request object. Reference: https://cloud.google.com/bigquery/docs/reference/rest/v2/tables/patch :param dataset_id: The dataset containing the table to be patched. :param table_id: The Name of the table to be patched. :param project_id: The project containing the table to be patched. :param description: [Optional] A user-friendly description of this table. :param expiration_time: [Optional] The time when this table expires, in milliseconds since the epoch. :param external_data_configuration: [Optional] A dictionary containing properties of a table stored outside of BigQuery. :param friendly_name: [Optional] A descriptive name for this table. :param labels: [Optional] A dictionary containing labels associated with this table. :param schema: [Optional] If set, the schema field list as defined here: https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.schema The supported schema modifications and unsupported schema modification are listed here: https://cloud.google.com/bigquery/docs/managing-table-schemas **Example**: :: schema=[{"name": "emp_name", "type": "STRING", "mode": "REQUIRED"}, {"name": "salary", "type": "INTEGER", "mode": "NULLABLE"}] :param time_partitioning: [Optional] A dictionary containing time-based partitioning definition for the table. :param view: [Optional] A dictionary containing definition for the view. If set, it will patch a view instead of a table: https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#ViewDefinition **Example**: :: view = { "query": "SELECT * FROM `test-project-id.test_dataset_id.test_table_prefix*` LIMIT 500", "useLegacySql": False } :param require_partition_filter: [Optional] If true, queries over the this table require a partition filter. If false, queries over the table :param encryption_configuration: [Optional] Custom encryption configuration (e.g., Cloud KMS keys). **Example**: :: encryption_configuration = { "kmsKeyName": "projects/testp/locations/us/keyRings/test-kr/cryptoKeys/test-key" } """ warnings.warn( "This method is deprecated, please use ``BigQueryHook.update_table`` method.", DeprecationWarning, ) table_resource: Dict[str, Any] = {} if description is not None: table_resource['description'] = description if expiration_time is not None: table_resource['expirationTime'] = expiration_time if external_data_configuration: table_resource['externalDataConfiguration'] = external_data_configuration if friendly_name is not None: table_resource['friendlyName'] = friendly_name if labels: table_resource['labels'] = labels if schema: table_resource['schema'] = {'fields': schema} if time_partitioning: table_resource['timePartitioning'] = time_partitioning if view: table_resource['view'] = view if require_partition_filter is not None: table_resource['requirePartitionFilter'] = require_partition_filter if encryption_configuration: table_resource["encryptionConfiguration"] = encryption_configuration self.update_table( table_resource=table_resource, fields=list(table_resource.keys()), project_id=project_id, dataset_id=dataset_id, table_id=table_id, ) @GoogleBaseHook.fallback_to_default_project_id def insert_all( self, project_id: str, dataset_id: str, table_id: str, rows: List, ignore_unknown_values: bool = False, skip_invalid_rows: bool = False, fail_on_error: bool = False, ) -> None: """ Method to stream data into BigQuery one record at a time without needing to run a load job .. seealso:: For more information, see: https://cloud.google.com/bigquery/docs/reference/rest/v2/tabledata/insertAll :param project_id: The name of the project where we have the table :param dataset_id: The name of the dataset where we have the table :param table_id: The name of the table :param rows: the rows to insert **Example or rows**: rows=[{"json": {"a_key": "a_value_0"}}, {"json": {"a_key": "a_value_1"}}] :param ignore_unknown_values: [Optional] Accept rows that contain values that do not match the schema. The unknown values are ignored. The default value is false, which treats unknown values as errors. :param skip_invalid_rows: [Optional] Insert all valid rows of a request, even if invalid rows exist. The default value is false, which causes the entire request to fail if any invalid rows exist. :param fail_on_error: [Optional] Force the task to fail if any errors occur. The default value is false, which indicates the task should not fail even if any insertion errors occur. """ self.log.info('Inserting %s row(s) into table %s:%s.%s', len(rows), project_id, dataset_id, table_id) table_ref = TableReference(dataset_ref=DatasetReference(project_id, dataset_id), table_id=table_id) bq_client = self.get_client(project_id=project_id) table = bq_client.get_table(table_ref) errors = bq_client.insert_rows( table=table, rows=rows, ignore_unknown_values=ignore_unknown_values, skip_invalid_rows=skip_invalid_rows, ) if errors: error_msg = f"{len(errors)} insert error(s) occurred. Details: {errors}" self.log.error(error_msg) if fail_on_error: raise AirflowException(f'BigQuery job failed. Error was: {error_msg}') else: self.log.info('All row(s) inserted successfully: %s:%s.%s', project_id, dataset_id, table_id) @GoogleBaseHook.fallback_to_default_project_id def update_dataset( self, fields: Sequence[str], dataset_resource: Dict[str, Any], dataset_id: Optional[str] = None, project_id: Optional[str] = None, retry: Retry = DEFAULT_RETRY, ) -> Dataset: """ Change some fields of a dataset. Use ``fields`` to specify which fields to update. At least one field must be provided. If a field is listed in ``fields`` and is ``None`` in ``dataset``, it will be deleted. If ``dataset.etag`` is not ``None``, the update will only succeed if the dataset on the server has the same ETag. Thus reading a dataset with ``get_dataset``, changing its fields, and then passing it to ``update_dataset`` will ensure that the changes will only be saved if no modifications to the dataset occurred since the read. :param dataset_resource: Dataset resource that will be provided in request body. https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets#resource :param dataset_id: The id of the dataset. :param fields: The properties of ``dataset`` to change (e.g. "friendly_name"). :param project_id: The Google Cloud Project ID :param retry: How to retry the RPC. """ dataset_resource["datasetReference"] = dataset_resource.get("datasetReference", {}) for key, value in zip(["datasetId", "projectId"], [dataset_id, project_id]): spec_value = dataset_resource["datasetReference"].get(key) if value and not spec_value: dataset_resource["datasetReference"][key] = value self.log.info('Start updating dataset') dataset = self.get_client(project_id=project_id).update_dataset( dataset=Dataset.from_api_repr(dataset_resource), fields=fields, retry=retry, ) self.log.info("Dataset successfully updated: %s", dataset) return dataset def patch_dataset( self, dataset_id: str, dataset_resource: Dict, project_id: Optional[str] = None ) -> Dict: """ Patches information in an existing dataset. It only replaces fields that are provided in the submitted dataset resource. More info: https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets/patch :param dataset_id: The BigQuery Dataset ID :param dataset_resource: Dataset resource that will be provided in request body. https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets#resource :param project_id: The Google Cloud Project ID :rtype: dataset https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets#resource """ warnings.warn("This method is deprecated. Please use ``update_dataset``.", DeprecationWarning) project_id = project_id or self.project_id if not dataset_id or not isinstance(dataset_id, str): raise ValueError( f"dataset_id argument must be provided and has a type 'str'. You provided: {dataset_id}" ) service = self.get_service() dataset_project_id = project_id or self.project_id self.log.info('Start patching dataset: %s:%s', dataset_project_id, dataset_id) dataset = ( service.datasets() .patch( datasetId=dataset_id, projectId=dataset_project_id, body=dataset_resource, ) .execute(num_retries=self.num_retries) ) self.log.info("Dataset successfully patched: %s", dataset) return dataset def get_dataset_tables_list( self, dataset_id: str, project_id: Optional[str] = None, table_prefix: Optional[str] = None, max_results: Optional[int] = None, ) -> List[Dict[str, Any]]: """ Method returns tables list of a BigQuery tables. If table prefix is specified, only tables beginning by it are returned. For more information, see: https://cloud.google.com/bigquery/docs/reference/rest/v2/tables/list :param dataset_id: The BigQuery Dataset ID :param project_id: The Google Cloud Project ID :param table_prefix: Tables must begin by this prefix to be returned (case sensitive) :param max_results: The maximum number of results to return in a single response page. Leverage the page tokens to iterate through the entire collection. :return: List of tables associated with the dataset """ warnings.warn("This method is deprecated. Please use ``get_dataset_tables``.", DeprecationWarning) project_id = project_id or self.project_id tables = self.get_client().list_tables( dataset=DatasetReference(project=project_id, dataset_id=dataset_id), max_results=max_results, ) if table_prefix: result = [t.reference.to_api_repr() for t in tables if t.table_id.startswith(table_prefix)] else: result = [t.reference.to_api_repr() for t in tables] self.log.info("%s tables found", len(result)) return result @GoogleBaseHook.fallback_to_default_project_id def get_datasets_list( self, project_id: Optional[str] = None, include_all: bool = False, filter_: Optional[str] = None, max_results: Optional[int] = None, page_token: Optional[str] = None, retry: Retry = DEFAULT_RETRY, ) -> List[DatasetListItem]: """ Method returns full list of BigQuery datasets in the current project For more information, see: https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets/list :param project_id: Google Cloud Project for which you try to get all datasets :param include_all: True if results include hidden datasets. Defaults to False. :param filter_: An expression for filtering the results by label. For syntax, see https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets/list#filter. :param filter_: str :param max_results: Maximum number of datasets to return. :param max_results: int :param page_token: Token representing a cursor into the datasets. If not passed, the API will return the first page of datasets. The token marks the beginning of the iterator to be returned and the value of the ``page_token`` can be accessed at ``next_page_token`` of the :class:`~google.api_core.page_iterator.HTTPIterator`. :param page_token: str :param retry: How to retry the RPC. """ datasets = self.get_client(project_id=project_id).list_datasets( project=project_id, include_all=include_all, filter=filter_, max_results=max_results, page_token=page_token, retry=retry, ) datasets_list = list(datasets) self.log.info("Datasets List: %s", len(datasets_list)) return datasets_list @GoogleBaseHook.fallback_to_default_project_id def get_dataset(self, dataset_id: str, project_id: Optional[str] = None) -> Dataset: """ Fetch the dataset referenced by dataset_id. :param dataset_id: The BigQuery Dataset ID :param project_id: The Google Cloud Project ID :return: dataset_resource .. seealso:: For more information, see Dataset Resource content: https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets#resource """ dataset = self.get_client(project_id=project_id).get_dataset( dataset_ref=DatasetReference(project_id, dataset_id) ) self.log.info("Dataset Resource: %s", dataset) return dataset @GoogleBaseHook.fallback_to_default_project_id def run_grant_dataset_view_access( self, source_dataset: str, view_dataset: str, view_table: str, view_project: Optional[str] = None, project_id: Optional[str] = None, ) -> Dict[str, Any]: """ Grant authorized view access of a dataset to a view table. If this view has already been granted access to the dataset, do nothing. This method is not atomic. Running it may clobber a simultaneous update. :param source_dataset: the source dataset :param view_dataset: the dataset that the view is in :param view_table: the table of the view :param project_id: the project of the source dataset. If None, self.project_id will be used. :param view_project: the project that the view is in. If None, self.project_id will be used. :return: the datasets resource of the source dataset. """ view_project = view_project or project_id view_access = AccessEntry( role=None, entity_type="view", entity_id={'projectId': view_project, 'datasetId': view_dataset, 'tableId': view_table}, ) dataset = self.get_dataset(project_id=project_id, dataset_id=source_dataset) # Check to see if the view we want to add already exists. if view_access not in dataset.access_entries: self.log.info( 'Granting table %s:%s.%s authorized view access to %s:%s dataset.', view_project, view_dataset, view_table, project_id, source_dataset, ) dataset.access_entries += [view_access] dataset = self.update_dataset( fields=["access"], dataset_resource=dataset.to_api_repr(), project_id=project_id ) else: self.log.info( 'Table %s:%s.%s already has authorized view access to %s:%s dataset.', view_project, view_dataset, view_table, project_id, source_dataset, ) return dataset.to_api_repr() @GoogleBaseHook.fallback_to_default_project_id def run_table_upsert( self, dataset_id: str, table_resource: Dict[str, Any], project_id: Optional[str] = None ) -> Dict[str, Any]: """ If the table already exists, update the existing table if not create new. Since BigQuery does not natively allow table upserts, this is not an atomic operation. :param dataset_id: the dataset to upsert the table into. :param table_resource: a table resource. see https://cloud.google.com/bigquery/docs/reference/v2/tables#resource :param project_id: the project to upsert the table into. If None, project will be self.project_id. :return: """ table_id = table_resource['tableReference']['tableId'] table_resource = self._resolve_table_reference( table_resource=table_resource, project_id=project_id, dataset_id=dataset_id, table_id=table_id ) tables_list_resp = self.get_dataset_tables(dataset_id=dataset_id, project_id=project_id) if any(table['tableId'] == table_id for table in tables_list_resp): self.log.info('Table %s:%s.%s exists, updating.', project_id, dataset_id, table_id) table = self.update_table(table_resource=table_resource) else: self.log.info('Table %s:%s.%s does not exist. creating.', project_id, dataset_id, table_id) table = self.create_empty_table( table_resource=table_resource, project_id=project_id ).to_api_repr() return table def run_table_delete(self, deletion_dataset_table: str, ignore_if_missing: bool = False) -> None: """ Delete an existing table from the dataset; If the table does not exist, return an error unless ignore_if_missing is set to True. :param deletion_dataset_table: A dotted ``(<project>.|<project>:)<dataset>.<table>`` that indicates which table will be deleted. :param ignore_if_missing: if True, then return success even if the requested table does not exist. :return: """ warnings.warn("This method is deprecated. Please use `delete_table`.", DeprecationWarning) return self.delete_table(table_id=deletion_dataset_table, not_found_ok=ignore_if_missing) @GoogleBaseHook.fallback_to_default_project_id def delete_table( self, table_id: str, not_found_ok: bool = True, project_id: Optional[str] = None, ) -> None: """ Delete an existing table from the dataset. If the table does not exist, return an error unless not_found_ok is set to True. :param table_id: A dotted ``(<project>.|<project>:)<dataset>.<table>`` that indicates which table will be deleted. :param not_found_ok: if True, then return success even if the requested table does not exist. :param project_id: the project used to perform the request """ self.get_client(project_id=project_id).delete_table( table=Table.from_string(table_id), not_found_ok=not_found_ok, ) self.log.info('Deleted table %s', table_id) def get_tabledata( self, dataset_id: str, table_id: str, max_results: Optional[int] = None, selected_fields: Optional[str] = None, page_token: Optional[str] = None, start_index: Optional[int] = None, ) -> List[Dict]: """ Get the data of a given dataset.table and optionally with selected columns. see https://cloud.google.com/bigquery/docs/reference/v2/tabledata/list :param dataset_id: the dataset ID of the requested table. :param table_id: the table ID of the requested table. :param max_results: the maximum results to return. :param selected_fields: List of fields to return (comma-separated). If unspecified, all fields are returned. :param page_token: page token, returned from a previous call, identifying the result set. :param start_index: zero based index of the starting row to read. :return: list of rows """ warnings.warn("This method is deprecated. Please use `list_rows`.", DeprecationWarning) rows = self.list_rows( dataset_id=dataset_id, table_id=table_id, max_results=max_results, selected_fields=selected_fields, page_token=page_token, start_index=start_index, ) return [dict(r) for r in rows] @GoogleBaseHook.fallback_to_default_project_id def list_rows( self, dataset_id: str, table_id: str, max_results: Optional[int] = None, selected_fields: Optional[Union[List[str], str]] = None, page_token: Optional[str] = None, start_index: Optional[int] = None, project_id: Optional[str] = None, location: Optional[str] = None, ) -> List[Row]: """ List the rows of the table. See https://cloud.google.com/bigquery/docs/reference/rest/v2/tabledata/list :param dataset_id: the dataset ID of the requested table. :param table_id: the table ID of the requested table. :param max_results: the maximum results to return. :param selected_fields: List of fields to return (comma-separated). If unspecified, all fields are returned. :param page_token: page token, returned from a previous call, identifying the result set. :param start_index: zero based index of the starting row to read. :param project_id: Project ID for the project which the client acts on behalf of. :param location: Default location for job. :return: list of rows """ location = location or self.location if isinstance(selected_fields, str): selected_fields = selected_fields.split(",") if selected_fields: selected_fields = [SchemaField(n, "") for n in selected_fields] else: selected_fields = None table = self._resolve_table_reference( table_resource={}, project_id=project_id, dataset_id=dataset_id, table_id=table_id, ) result = self.get_client(project_id=project_id, location=location).list_rows( table=Table.from_api_repr(table), selected_fields=selected_fields, max_results=max_results, page_token=page_token, start_index=start_index, ) return list(result) @GoogleBaseHook.fallback_to_default_project_id def get_schema(self, dataset_id: str, table_id: str, project_id: Optional[str] = None) -> dict: """ Get the schema for a given dataset and table. see https://cloud.google.com/bigquery/docs/reference/v2/tables#resource :param dataset_id: the dataset ID of the requested table :param table_id: the table ID of the requested table :param project_id: the optional project ID of the requested table. If not provided, the connector's configured project will be used. :return: a table schema """ table_ref = TableReference(dataset_ref=DatasetReference(project_id, dataset_id), table_id=table_id) table = self.get_client(project_id=project_id).get_table(table_ref) return {"fields": [s.to_api_repr() for s in table.schema]} @GoogleBaseHook.fallback_to_default_project_id def update_table_schema( self, schema_fields_updates: List[Dict[str, Any]], include_policy_tags: bool, dataset_id: str, table_id: str, project_id: Optional[str] = None, ) -> Dict[str, Any]: """ Update fields within a schema for a given dataset and table. Note that some fields in schemas are immutable and trying to change them will cause an exception. If a new field is included it will be inserted which requires all required fields to be set. See https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#TableSchema :param include_policy_tags: If set to True policy tags will be included in the update request which requires special permissions even if unchanged see https://cloud.google.com/bigquery/docs/column-level-security#roles :param dataset_id: the dataset ID of the requested table to be updated :param table_id: the table ID of the table to be updated :param schema_fields_updates: a partial schema resource. see https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#TableSchema **Example**: :: schema_fields_updates=[ {"name": "emp_name", "description": "Some New Description"}, {"name": "salary", "description": "Some New Description"}, {"name": "departments", "fields": [ {"name": "name", "description": "Some New Description"}, {"name": "type", "description": "Some New Description"} ]}, ] :param project_id: The name of the project where we want to update the table. """ def _build_new_schema( current_schema: List[Dict[str, Any]], schema_fields_updates: List[Dict[str, Any]] ) -> List[Dict[str, Any]]: # Turn schema_field_updates into a dict keyed on field names schema_fields_updates_dict = {field["name"]: field for field in deepcopy(schema_fields_updates)} # Create a new dict for storing the new schema, initiated based on the current_schema # as of Python 3.6, dicts retain order. new_schema = {field["name"]: field for field in deepcopy(current_schema)} # Each item in schema_fields_updates contains a potential patch # to a schema field, iterate over them for field_name, patched_value in schema_fields_updates_dict.items(): # If this field already exists, update it if field_name in new_schema: # If this field is of type RECORD and has a fields key we need to patch it recursively if "fields" in patched_value: patched_value["fields"] = _build_new_schema( new_schema[field_name]["fields"], patched_value["fields"] ) # Update the new_schema with the patched value new_schema[field_name].update(patched_value) # This is a new field, just include the whole configuration for it else: new_schema[field_name] = patched_value return list(new_schema.values()) def _remove_policy_tags(schema: List[Dict[str, Any]]): for field in schema: if "policyTags" in field: del field["policyTags"] if "fields" in field: _remove_policy_tags(field["fields"]) current_table_schema = self.get_schema( dataset_id=dataset_id, table_id=table_id, project_id=project_id )["fields"] new_schema = _build_new_schema(current_table_schema, schema_fields_updates) if not include_policy_tags: _remove_policy_tags(new_schema) table = self.update_table( table_resource={"schema": {"fields": new_schema}}, fields=["schema"], project_id=project_id, dataset_id=dataset_id, table_id=table_id, ) return table @GoogleBaseHook.fallback_to_default_project_id def poll_job_complete( self, job_id: str, project_id: Optional[str] = None, location: Optional[str] = None, retry: Retry = DEFAULT_RETRY, ) -> bool: """ Check if jobs completed. :param job_id: id of the job. :param project_id: Google Cloud Project where the job is running :param location: location the job is running :param retry: How to retry the RPC. :rtype: bool """ location = location or self.location job = self.get_client(project_id=project_id, location=location).get_job(job_id=job_id) return job.done(retry=retry) def cancel_query(self) -> None: """Cancel all started queries that have not yet completed""" warnings.warn( "This method is deprecated. Please use `BigQueryHook.cancel_job`.", DeprecationWarning, ) if self.running_job_id: self.cancel_job(job_id=self.running_job_id) else: self.log.info('No running BigQuery jobs to cancel.') @GoogleBaseHook.fallback_to_default_project_id def cancel_job( self, job_id: str, project_id: Optional[str] = None, location: Optional[str] = None, ) -> None: """ Cancels a job an wait for cancellation to complete :param job_id: id of the job. :param project_id: Google Cloud Project where the job is running :param location: location the job is running """ location = location or self.location if self.poll_job_complete(job_id=job_id): self.log.info('No running BigQuery jobs to cancel.') return self.log.info('Attempting to cancel job : %s, %s', project_id, job_id) self.get_client(location=location, project_id=project_id).cancel_job(job_id=job_id) # Wait for all the calls to cancel to finish max_polling_attempts = 12 polling_attempts = 0 job_complete = False while polling_attempts < max_polling_attempts and not job_complete: polling_attempts += 1 job_complete = self.poll_job_complete(job_id) if job_complete: self.log.info('Job successfully canceled: %s, %s', project_id, job_id) elif polling_attempts == max_polling_attempts: self.log.info( "Stopping polling due to timeout. Job with id %s " "has not completed cancel and may or may not finish.", job_id, ) else: self.log.info('Waiting for canceled job with id %s to finish.', job_id) time.sleep(5) @GoogleBaseHook.fallback_to_default_project_id def get_job( self, job_id: Optional[str] = None, project_id: Optional[str] = None, location: Optional[str] = None, ) -> Union[CopyJob, QueryJob, LoadJob, ExtractJob]: """ Retrieves a BigQuery job. For more information see: https://cloud.google.com/bigquery/docs/reference/v2/jobs :param job_id: The ID of the job. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). The maximum length is 1,024 characters. If not provided then uuid will be generated. :param project_id: Google Cloud Project where the job is running :param location: location the job is running """ client = self.get_client(project_id=project_id, location=location) job = client.get_job(job_id=job_id, project=project_id, location=location) return job @staticmethod def _custom_job_id(configuration: Dict[str, Any]) -> str: hash_base = json.dumps(configuration, sort_keys=True) uniqueness_suffix = hashlib.md5(hash_base.encode()).hexdigest() microseconds_from_epoch = int( (datetime.now() - datetime.fromtimestamp(0)) / timedelta(microseconds=1) ) return f"airflow_{microseconds_from_epoch}_{uniqueness_suffix}" @GoogleBaseHook.fallback_to_default_project_id def insert_job( self, configuration: Dict, job_id: Optional[str] = None, project_id: Optional[str] = None, location: Optional[str] = None, nowait: bool = False, retry: Retry = DEFAULT_RETRY, timeout: Optional[float] = None, ) -> BigQueryJob: """ Executes a BigQuery job. Waits for the job to complete and returns job id. See here: https://cloud.google.com/bigquery/docs/reference/v2/jobs :param configuration: The configuration parameter maps directly to BigQuery's configuration field in the job object. See https://cloud.google.com/bigquery/docs/reference/v2/jobs for details. :param job_id: The ID of the job. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). The maximum length is 1,024 characters. If not provided then uuid will be generated. :param project_id: Google Cloud Project where the job is running :param location: location the job is running :param nowait: specify whether to insert job without waiting for the result :param retry: How to retry the RPC. :param timeout: The number of seconds to wait for the underlying HTTP transport before using ``retry``. """ location = location or self.location job_id = job_id or self._custom_job_id(configuration) client = self.get_client(project_id=project_id, location=location) job_data = { "configuration": configuration, "jobReference": {"jobId": job_id, "projectId": project_id, "location": location}, } supported_jobs = { LoadJob._JOB_TYPE: LoadJob, CopyJob._JOB_TYPE: CopyJob, ExtractJob._JOB_TYPE: ExtractJob, QueryJob._JOB_TYPE: QueryJob, } job = None for job_type, job_object in supported_jobs.items(): if job_type in configuration: job = job_object break if not job: raise AirflowException(f"Unknown job type. Supported types: {supported_jobs.keys()}") job = job.from_api_repr(job_data, client) self.log.info("Inserting job %s", job.job_id) if nowait: # Initiate the job and don't wait for it to complete. job._begin() else: # Start the job and wait for it to complete and get the result. job.result(timeout=timeout, retry=retry) return job def run_with_configuration(self, configuration: dict) -> str: """ Executes a BigQuery SQL query. See here: https://cloud.google.com/bigquery/docs/reference/v2/jobs For more details about the configuration parameter. :param configuration: The configuration parameter maps directly to BigQuery's configuration field in the job object. See https://cloud.google.com/bigquery/docs/reference/v2/jobs for details. """ warnings.warn("This method is deprecated. Please use `BigQueryHook.insert_job`", DeprecationWarning) job = self.insert_job(configuration=configuration, project_id=self.project_id) self.running_job_id = job.job_id return job.job_id def run_load( self, destination_project_dataset_table: str, source_uris: List, schema_fields: Optional[List] = None, source_format: str = 'CSV', create_disposition: str = 'CREATE_IF_NEEDED', skip_leading_rows: int = 0, write_disposition: str = 'WRITE_EMPTY', field_delimiter: str = ',', max_bad_records: int = 0, quote_character: Optional[str] = None, ignore_unknown_values: bool = False, allow_quoted_newlines: bool = False, allow_jagged_rows: bool = False, encoding: str = "UTF-8", schema_update_options: Optional[Iterable] = None, src_fmt_configs: Optional[Dict] = None, time_partitioning: Optional[Dict] = None, cluster_fields: Optional[List] = None, autodetect: bool = False, encryption_configuration: Optional[Dict] = None, labels: Optional[Dict] = None, description: Optional[str] = None, ) -> str: """ Executes a BigQuery load command to load data from Google Cloud Storage to BigQuery. See here: https://cloud.google.com/bigquery/docs/reference/v2/jobs For more details about these parameters. :param destination_project_dataset_table: The dotted ``(<project>.|<project>:)<dataset>.<table>($<partition>)`` BigQuery table to load data into. If ``<project>`` is not included, project will be the project defined in the connection json. If a partition is specified the operator will automatically append the data, create a new partition or create a new DAY partitioned table. :param schema_fields: The schema field list as defined here: https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load Required if autodetect=False; optional if autodetect=True. :param autodetect: Attempt to autodetect the schema for CSV and JSON source files. :param source_uris: The source Google Cloud Storage URI (e.g. gs://some-bucket/some-file.txt). A single wild per-object name can be used. :param source_format: File format to export. :param create_disposition: The create disposition if the table doesn't exist. :param skip_leading_rows: Number of rows to skip when loading from a CSV. :param write_disposition: The write disposition if the table already exists. :param field_delimiter: The delimiter to use when loading from a CSV. :param max_bad_records: The maximum number of bad records that BigQuery can ignore when running the job. :param quote_character: The value that is used to quote data sections in a CSV file. :param ignore_unknown_values: [Optional] Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. :param allow_quoted_newlines: Whether to allow quoted newlines (true) or not (false). :param allow_jagged_rows: Accept rows that are missing trailing optional columns. The missing values are treated as nulls. If false, records with missing trailing columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. Only applicable when source_format is CSV. :param encoding: The character encoding of the data. .. seealso:: https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#externalDataConfiguration.csvOptions.encoding :param schema_update_options: Allows the schema of the destination table to be updated as a side effect of the load job. :param src_fmt_configs: configure optional fields specific to the source format :param time_partitioning: configure optional time partitioning fields i.e. partition by field, type and expiration as per API specifications. :param cluster_fields: Request that the result of this load be stored sorted by one or more columns. BigQuery supports clustering for both partitioned and non-partitioned tables. The order of columns given determines the sort order. :param encryption_configuration: [Optional] Custom encryption configuration (e.g., Cloud KMS keys). **Example**: :: encryption_configuration = { "kmsKeyName": "projects/testp/locations/us/keyRings/test-kr/cryptoKeys/test-key" } :param labels: A dictionary containing labels for the BiqQuery table. :param description: A string containing the description for the BigQuery table. """ warnings.warn( "This method is deprecated. Please use `BigQueryHook.insert_job` method.", DeprecationWarning ) if not self.project_id: raise ValueError("The project_id should be set") # To provide backward compatibility schema_update_options = list(schema_update_options or []) # bigquery only allows certain source formats # we check to make sure the passed source format is valid # if it's not, we raise a ValueError # Refer to this link for more details: # https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query.tableDefinitions.(key).sourceFormat # noqa if schema_fields is None and not autodetect: raise ValueError('You must either pass a schema or autodetect=True.') if src_fmt_configs is None: src_fmt_configs = {} source_format = source_format.upper() allowed_formats = [ "CSV", "NEWLINE_DELIMITED_JSON", "AVRO", "GOOGLE_SHEETS", "DATASTORE_BACKUP", "PARQUET", ] if source_format not in allowed_formats: raise ValueError( f"{source_format} is not a valid source format. " f"Please use one of the following types: {allowed_formats}." ) # bigquery also allows you to define how you want a table's schema to change # as a side effect of a load # for more details: # https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.schemaUpdateOptions allowed_schema_update_options = ['ALLOW_FIELD_ADDITION', "ALLOW_FIELD_RELAXATION"] if not set(allowed_schema_update_options).issuperset(set(schema_update_options)): raise ValueError( f"{schema_update_options} contains invalid schema update options. " f"Please only use one or more of the following options: {allowed_schema_update_options}" ) destination_project, destination_dataset, destination_table = _split_tablename( table_input=destination_project_dataset_table, default_project_id=self.project_id, var_name='destination_project_dataset_table', ) configuration: Dict[str, Any] = { 'load': { 'autodetect': autodetect, 'createDisposition': create_disposition, 'destinationTable': { 'projectId': destination_project, 'datasetId': destination_dataset, 'tableId': destination_table, }, 'sourceFormat': source_format, 'sourceUris': source_uris, 'writeDisposition': write_disposition, 'ignoreUnknownValues': ignore_unknown_values, } } time_partitioning = _cleanse_time_partitioning(destination_project_dataset_table, time_partitioning) if time_partitioning: configuration['load'].update({'timePartitioning': time_partitioning}) if cluster_fields: configuration['load'].update({'clustering': {'fields': cluster_fields}}) if schema_fields: configuration['load']['schema'] = {'fields': schema_fields} if schema_update_options: if write_disposition not in ["WRITE_APPEND", "WRITE_TRUNCATE"]: raise ValueError( "schema_update_options is only " "allowed if write_disposition is " "'WRITE_APPEND' or 'WRITE_TRUNCATE'." ) else: self.log.info("Adding experimental 'schemaUpdateOptions': %s", schema_update_options) configuration['load']['schemaUpdateOptions'] = schema_update_options if max_bad_records: configuration['load']['maxBadRecords'] = max_bad_records if encryption_configuration: configuration["load"]["destinationEncryptionConfiguration"] = encryption_configuration if labels or description: configuration['load'].update({'destinationTableProperties': {}}) if labels: configuration['load']['destinationTableProperties']['labels'] = labels if description: configuration['load']['destinationTableProperties']['description'] = description src_fmt_to_configs_mapping = { 'CSV': [ 'allowJaggedRows', 'allowQuotedNewlines', 'autodetect', 'fieldDelimiter', 'skipLeadingRows', 'ignoreUnknownValues', 'nullMarker', 'quote', 'encoding', ], 'DATASTORE_BACKUP': ['projectionFields'], 'NEWLINE_DELIMITED_JSON': ['autodetect', 'ignoreUnknownValues'], 'PARQUET': ['autodetect', 'ignoreUnknownValues'], 'AVRO': ['useAvroLogicalTypes'], } valid_configs = src_fmt_to_configs_mapping[source_format] # if following fields are not specified in src_fmt_configs, # honor the top-level params for backward-compatibility backward_compatibility_configs = { 'skipLeadingRows': skip_leading_rows, 'fieldDelimiter': field_delimiter, 'ignoreUnknownValues': ignore_unknown_values, 'quote': quote_character, 'allowQuotedNewlines': allow_quoted_newlines, 'encoding': encoding, } src_fmt_configs = _validate_src_fmt_configs( source_format, src_fmt_configs, valid_configs, backward_compatibility_configs ) configuration['load'].update(src_fmt_configs) if allow_jagged_rows: configuration['load']['allowJaggedRows'] = allow_jagged_rows job = self.insert_job(configuration=configuration, project_id=self.project_id) self.running_job_id = job.job_id return job.job_id def run_copy( self, source_project_dataset_tables: Union[List, str], destination_project_dataset_table: str, write_disposition: str = 'WRITE_EMPTY', create_disposition: str = 'CREATE_IF_NEEDED', labels: Optional[Dict] = None, encryption_configuration: Optional[Dict] = None, ) -> str: """ Executes a BigQuery copy command to copy data from one BigQuery table to another. See here: https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.copy For more details about these parameters. :param source_project_dataset_tables: One or more dotted ``(project:|project.)<dataset>.<table>`` BigQuery tables to use as the source data. Use a list if there are multiple source tables. If ``<project>`` is not included, project will be the project defined in the connection json. :param destination_project_dataset_table: The destination BigQuery table. Format is: ``(project:|project.)<dataset>.<table>`` :param write_disposition: The write disposition if the table already exists. :param create_disposition: The create disposition if the table doesn't exist. :param labels: a dictionary containing labels for the job/query, passed to BigQuery :param encryption_configuration: [Optional] Custom encryption configuration (e.g., Cloud KMS keys). **Example**: :: encryption_configuration = { "kmsKeyName": "projects/testp/locations/us/keyRings/test-kr/cryptoKeys/test-key" } """ warnings.warn( "This method is deprecated. Please use `BigQueryHook.insert_job` method.", DeprecationWarning ) if not self.project_id: raise ValueError("The project_id should be set") source_project_dataset_tables = ( [source_project_dataset_tables] if not isinstance(source_project_dataset_tables, list) else source_project_dataset_tables ) source_project_dataset_tables_fixup = [] for source_project_dataset_table in source_project_dataset_tables: source_project, source_dataset, source_table = _split_tablename( table_input=source_project_dataset_table, default_project_id=self.project_id, var_name='source_project_dataset_table', ) source_project_dataset_tables_fixup.append( {'projectId': source_project, 'datasetId': source_dataset, 'tableId': source_table} ) destination_project, destination_dataset, destination_table = _split_tablename( table_input=destination_project_dataset_table, default_project_id=self.project_id ) configuration = { 'copy': { 'createDisposition': create_disposition, 'writeDisposition': write_disposition, 'sourceTables': source_project_dataset_tables_fixup, 'destinationTable': { 'projectId': destination_project, 'datasetId': destination_dataset, 'tableId': destination_table, }, } } if labels: configuration['labels'] = labels if encryption_configuration: configuration["copy"]["destinationEncryptionConfiguration"] = encryption_configuration job = self.insert_job(configuration=configuration, project_id=self.project_id) self.running_job_id = job.job_id return job.job_id def run_extract( self, source_project_dataset_table: str, destination_cloud_storage_uris: List[str], compression: str = 'NONE', export_format: str = 'CSV', field_delimiter: str = ',', print_header: bool = True, labels: Optional[Dict] = None, ) -> str: """ Executes a BigQuery extract command to copy data from BigQuery to Google Cloud Storage. See here: https://cloud.google.com/bigquery/docs/reference/v2/jobs For more details about these parameters. :param source_project_dataset_table: The dotted ``<dataset>.<table>`` BigQuery table to use as the source data. :param destination_cloud_storage_uris: The destination Google Cloud Storage URI (e.g. gs://some-bucket/some-file.txt). Follows convention defined here: https://cloud.google.com/bigquery/exporting-data-from-bigquery#exportingmultiple :param compression: Type of compression to use. :param export_format: File format to export. :param field_delimiter: The delimiter to use when extracting to a CSV. :param print_header: Whether to print a header for a CSV file extract. :param labels: a dictionary containing labels for the job/query, passed to BigQuery """ warnings.warn( "This method is deprecated. Please use `BigQueryHook.insert_job` method.", DeprecationWarning ) if not self.project_id: raise ValueError("The project_id should be set") source_project, source_dataset, source_table = _split_tablename( table_input=source_project_dataset_table, default_project_id=self.project_id, var_name='source_project_dataset_table', ) configuration: Dict[str, Any] = { 'extract': { 'sourceTable': { 'projectId': source_project, 'datasetId': source_dataset, 'tableId': source_table, }, 'compression': compression, 'destinationUris': destination_cloud_storage_uris, 'destinationFormat': export_format, } } if labels: configuration['labels'] = labels if export_format == 'CSV': # Only set fieldDelimiter and printHeader fields if using CSV. # Google does not like it if you set these fields for other export # formats. configuration['extract']['fieldDelimiter'] = field_delimiter configuration['extract']['printHeader'] = print_header job = self.insert_job(configuration=configuration, project_id=self.project_id) self.running_job_id = job.job_id return job.job_id def run_query( self, sql: str, destination_dataset_table: Optional[str] = None, write_disposition: str = 'WRITE_EMPTY', allow_large_results: bool = False, flatten_results: Optional[bool] = None, udf_config: Optional[List] = None, use_legacy_sql: Optional[bool] = None, maximum_billing_tier: Optional[int] = None, maximum_bytes_billed: Optional[float] = None, create_disposition: str = 'CREATE_IF_NEEDED', query_params: Optional[List] = None, labels: Optional[Dict] = None, schema_update_options: Optional[Iterable] = None, priority: str = 'INTERACTIVE', time_partitioning: Optional[Dict] = None, api_resource_configs: Optional[Dict] = None, cluster_fields: Optional[List[str]] = None, location: Optional[str] = None, encryption_configuration: Optional[Dict] = None, ) -> str: """ Executes a BigQuery SQL query. Optionally persists results in a BigQuery table. See here: https://cloud.google.com/bigquery/docs/reference/v2/jobs For more details about these parameters. :param sql: The BigQuery SQL to execute. :param destination_dataset_table: The dotted ``<dataset>.<table>`` BigQuery table to save the query results. :param write_disposition: What to do if the table already exists in BigQuery. :param allow_large_results: Whether to allow large results. :param flatten_results: If true and query uses legacy SQL dialect, flattens all nested and repeated fields in the query results. ``allowLargeResults`` must be true if this is set to false. For standard SQL queries, this flag is ignored and results are never flattened. :param udf_config: The User Defined Function configuration for the query. See https://cloud.google.com/bigquery/user-defined-functions for details. :param use_legacy_sql: Whether to use legacy SQL (true) or standard SQL (false). If `None`, defaults to `self.use_legacy_sql`. :param api_resource_configs: a dictionary that contain params 'configuration' applied for Google BigQuery Jobs API: https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs for example, {'query': {'useQueryCache': False}}. You could use it if you need to provide some params that are not supported by the BigQueryHook like args. :param maximum_billing_tier: Positive integer that serves as a multiplier of the basic price. :param maximum_bytes_billed: Limits the bytes billed for this job. Queries that will have bytes billed beyond this limit will fail (without incurring a charge). If unspecified, this will be set to your project default. :param create_disposition: Specifies whether the job is allowed to create new tables. :param query_params: a list of dictionary containing query parameter types and values, passed to BigQuery :param labels: a dictionary containing labels for the job/query, passed to BigQuery :param schema_update_options: Allows the schema of the destination table to be updated as a side effect of the query job. :param priority: Specifies a priority for the query. Possible values include INTERACTIVE and BATCH. The default value is INTERACTIVE. :param time_partitioning: configure optional time partitioning fields i.e. partition by field, type and expiration as per API specifications. :param cluster_fields: Request that the result of this query be stored sorted by one or more columns. BigQuery supports clustering for both partitioned and non-partitioned tables. The order of columns given determines the sort order. :param location: The geographic location of the job. Required except for US and EU. See details at https://cloud.google.com/bigquery/docs/locations#specifying_your_location :param encryption_configuration: [Optional] Custom encryption configuration (e.g., Cloud KMS keys). **Example**: :: encryption_configuration = { "kmsKeyName": "projects/testp/locations/us/keyRings/test-kr/cryptoKeys/test-key" } """ warnings.warn( "This method is deprecated. Please use `BigQueryHook.insert_job` method.", DeprecationWarning ) if not self.project_id: raise ValueError("The project_id should be set") labels = labels or self.labels schema_update_options = list(schema_update_options or []) if time_partitioning is None: time_partitioning = {} if location: self.location = location if not api_resource_configs: api_resource_configs = self.api_resource_configs else: _validate_value('api_resource_configs', api_resource_configs, dict) configuration = deepcopy(api_resource_configs) if 'query' not in configuration: configuration['query'] = {} else: _validate_value("api_resource_configs['query']", configuration['query'], dict) if sql is None and not configuration['query'].get('query', None): raise TypeError('`BigQueryBaseCursor.run_query` missing 1 required positional argument: `sql`') # BigQuery also allows you to define how you want a table's schema to change # as a side effect of a query job # for more details: # https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query.schemaUpdateOptions # noqa allowed_schema_update_options = ['ALLOW_FIELD_ADDITION', "ALLOW_FIELD_RELAXATION"] if not set(allowed_schema_update_options).issuperset(set(schema_update_options)): raise ValueError( f"{schema_update_options} contains invalid schema update options." f" Please only use one or more of the following options: {allowed_schema_update_options}" ) if schema_update_options: if write_disposition not in ["WRITE_APPEND", "WRITE_TRUNCATE"]: raise ValueError( "schema_update_options is only " "allowed if write_disposition is " "'WRITE_APPEND' or 'WRITE_TRUNCATE'." ) if destination_dataset_table: destination_project, destination_dataset, destination_table = _split_tablename( table_input=destination_dataset_table, default_project_id=self.project_id ) destination_dataset_table = { # type: ignore 'projectId': destination_project, 'datasetId': destination_dataset, 'tableId': destination_table, } if cluster_fields: cluster_fields = {'fields': cluster_fields} # type: ignore query_param_list = [ (sql, 'query', None, (str,)), (priority, 'priority', 'INTERACTIVE', (str,)), (use_legacy_sql, 'useLegacySql', self.use_legacy_sql, bool), (query_params, 'queryParameters', None, list), (udf_config, 'userDefinedFunctionResources', None, list), (maximum_billing_tier, 'maximumBillingTier', None, int), (maximum_bytes_billed, 'maximumBytesBilled', None, float), (time_partitioning, 'timePartitioning', {}, dict), (schema_update_options, 'schemaUpdateOptions', None, list), (destination_dataset_table, 'destinationTable', None, dict), (cluster_fields, 'clustering', None, dict), ] # type: List[Tuple] for param, param_name, param_default, param_type in query_param_list: if param_name not in configuration['query'] and param in [None, {}, ()]: if param_name == 'timePartitioning': param_default = _cleanse_time_partitioning(destination_dataset_table, time_partitioning) param = param_default if param in [None, {}, ()]: continue _api_resource_configs_duplication_check(param_name, param, configuration['query']) configuration['query'][param_name] = param # check valid type of provided param, # it last step because we can get param from 2 sources, # and first of all need to find it _validate_value(param_name, configuration['query'][param_name], param_type) if param_name == 'schemaUpdateOptions' and param: self.log.info("Adding experimental 'schemaUpdateOptions': %s", schema_update_options) if param_name != 'destinationTable': continue for key in ['projectId', 'datasetId', 'tableId']: if key not in configuration['query']['destinationTable']: raise ValueError( "Not correct 'destinationTable' in " "api_resource_configs. 'destinationTable' " "must be a dict with {'projectId':'', " "'datasetId':'', 'tableId':''}" ) configuration['query'].update( { 'allowLargeResults': allow_large_results, 'flattenResults': flatten_results, 'writeDisposition': write_disposition, 'createDisposition': create_disposition, } ) if ( 'useLegacySql' in configuration['query'] and configuration['query']['useLegacySql'] and 'queryParameters' in configuration['query'] ): raise ValueError("Query parameters are not allowed when using legacy SQL") if labels: _api_resource_configs_duplication_check('labels', labels, configuration) configuration['labels'] = labels if encryption_configuration: configuration["query"]["destinationEncryptionConfiguration"] = encryption_configuration job = self.insert_job(configuration=configuration, project_id=self.project_id) self.running_job_id = job.job_id return job.job_id class BigQueryConnection: """ BigQuery does not have a notion of a persistent connection. Thus, these objects are small stateless factories for cursors, which do all the real work. """ def __init__(self, *args, **kwargs) -> None: self._args = args self._kwargs = kwargs def close(self) -> None: """The BigQueryConnection does not have anything to close""" def commit(self) -> None: """The BigQueryConnection does not support transactions""" def cursor(self) -> "BigQueryCursor": """Return a new :py:class:`Cursor` object using the connection""" return BigQueryCursor(*self._args, **self._kwargs) def rollback(self) -> NoReturn: """The BigQueryConnection does not have transactions""" raise NotImplementedError("BigQueryConnection does not have transactions") class BigQueryBaseCursor(LoggingMixin): """ The BigQuery base cursor contains helper methods to execute queries against BigQuery. The methods can be used directly by operators, in cases where a PEP 249 cursor isn't needed. """ def __init__( self, service: Any, project_id: str, hook: BigQueryHook, use_legacy_sql: bool = True, api_resource_configs: Optional[Dict] = None, location: Optional[str] = None, num_retries: int = 5, labels: Optional[Dict] = None, ) -> None: super().__init__() self.service = service self.project_id = project_id self.use_legacy_sql = use_legacy_sql if api_resource_configs: _validate_value("api_resource_configs", api_resource_configs, dict) self.api_resource_configs = api_resource_configs if api_resource_configs else {} # type Dict self.running_job_id = None # type: Optional[str] self.location = location self.num_retries = num_retries self.labels = labels self.hook = hook def create_empty_table(self, *args, **kwargs) -> None: """ This method is deprecated. Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.create_empty_table` """ warnings.warn( "This method is deprecated. " "Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.create_empty_table`", DeprecationWarning, stacklevel=3, ) return self.hook.create_empty_table(*args, **kwargs) def create_empty_dataset(self, *args, **kwargs) -> Dict[str, Any]: """ This method is deprecated. Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.create_empty_dataset` """ warnings.warn( "This method is deprecated. " "Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.create_empty_dataset`", DeprecationWarning, stacklevel=3, ) return self.hook.create_empty_dataset(*args, **kwargs) def get_dataset_tables(self, *args, **kwargs) -> List[Dict[str, Any]]: """ This method is deprecated. Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_dataset_tables` """ warnings.warn( "This method is deprecated. " "Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_dataset_tables`", DeprecationWarning, stacklevel=3, ) return self.hook.get_dataset_tables(*args, **kwargs) def delete_dataset(self, *args, **kwargs) -> None: """ This method is deprecated. Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.delete_dataset` """ warnings.warn( "This method is deprecated. " "Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.delete_dataset`", DeprecationWarning, stacklevel=3, ) return self.hook.delete_dataset(*args, **kwargs) def create_external_table(self, *args, **kwargs) -> None: """ This method is deprecated. Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.create_external_table` """ warnings.warn( "This method is deprecated. " "Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.create_external_table`", DeprecationWarning, stacklevel=3, ) return self.hook.create_external_table(*args, **kwargs) def patch_table(self, *args, **kwargs) -> None: """ This method is deprecated. Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.patch_table` """ warnings.warn( "This method is deprecated. " "Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.patch_table`", DeprecationWarning, stacklevel=3, ) return self.hook.patch_table(*args, **kwargs) def insert_all(self, *args, **kwargs) -> None: """ This method is deprecated. Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_all` """ warnings.warn( "This method is deprecated. " "Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_all`", DeprecationWarning, stacklevel=3, ) return self.hook.insert_all(*args, **kwargs) def update_dataset(self, *args, **kwargs) -> Dict: """ This method is deprecated. Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.update_dataset` """ warnings.warn( "This method is deprecated. " "Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.update_dataset`", DeprecationWarning, stacklevel=3, ) return Dataset.to_api_repr(self.hook.update_dataset(*args, **kwargs)) def patch_dataset(self, *args, **kwargs) -> Dict: """ This method is deprecated. Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.patch_dataset` """ warnings.warn( "This method is deprecated. " "Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.patch_dataset`", DeprecationWarning, stacklevel=3, ) return self.hook.patch_dataset(*args, **kwargs) def get_dataset_tables_list(self, *args, **kwargs) -> List[Dict[str, Any]]: """ This method is deprecated. Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_dataset_tables_list` """ warnings.warn( "This method is deprecated. " "Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_dataset_tables_list`", DeprecationWarning, stacklevel=3, ) return self.hook.get_dataset_tables_list(*args, **kwargs) def get_datasets_list(self, *args, **kwargs) -> list: """ This method is deprecated. Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_datasets_list` """ warnings.warn( "This method is deprecated. " "Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_datasets_list`", DeprecationWarning, stacklevel=3, ) return self.hook.get_datasets_list(*args, **kwargs) def get_dataset(self, *args, **kwargs) -> dict: """ This method is deprecated. Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_dataset` """ warnings.warn( "This method is deprecated. " "Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_dataset`", DeprecationWarning, stacklevel=3, ) return self.hook.get_dataset(*args, **kwargs) def run_grant_dataset_view_access(self, *args, **kwargs) -> dict: """ This method is deprecated. Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.run_grant_dataset_view_access` """ warnings.warn( "This method is deprecated. " "Please use `airflow.providers.google.cloud.hooks" ".bigquery.BigQueryHook.run_grant_dataset_view_access`", DeprecationWarning, stacklevel=3, ) return self.hook.run_grant_dataset_view_access(*args, **kwargs) def run_table_upsert(self, *args, **kwargs) -> dict: """ This method is deprecated. Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.run_table_upsert` """ warnings.warn( "This method is deprecated. " "Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.run_table_upsert`", DeprecationWarning, stacklevel=3, ) return self.hook.run_table_upsert(*args, **kwargs) def run_table_delete(self, *args, **kwargs) -> None: """ This method is deprecated. Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.run_table_delete` """ warnings.warn( "This method is deprecated. " "Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.run_table_delete`", DeprecationWarning, stacklevel=3, ) return self.hook.run_table_delete(*args, **kwargs) def get_tabledata(self, *args, **kwargs) -> List[dict]: """ This method is deprecated. Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_tabledata` """ warnings.warn( "This method is deprecated. " "Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_tabledata`", DeprecationWarning, stacklevel=3, ) return self.hook.get_tabledata(*args, **kwargs) def get_schema(self, *args, **kwargs) -> dict: """ This method is deprecated. Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_schema` """ warnings.warn( "This method is deprecated. " "Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_schema`", DeprecationWarning, stacklevel=3, ) return self.hook.get_schema(*args, **kwargs) def poll_job_complete(self, *args, **kwargs) -> bool: """ This method is deprecated. Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.poll_job_complete` """ warnings.warn( "This method is deprecated. " "Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.poll_job_complete`", DeprecationWarning, stacklevel=3, ) return self.hook.poll_job_complete(*args, **kwargs) def cancel_query(self, *args, **kwargs) -> None: """ This method is deprecated. Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.cancel_query` """ warnings.warn( "This method is deprecated. " "Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.cancel_query`", DeprecationWarning, stacklevel=3, ) return self.hook.cancel_query(*args, **kwargs) # type: ignore def run_with_configuration(self, *args, **kwargs) -> str: """ This method is deprecated. Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.run_with_configuration` """ warnings.warn( "This method is deprecated. " "Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.run_with_configuration`", DeprecationWarning, stacklevel=3, ) return self.hook.run_with_configuration(*args, **kwargs) def run_load(self, *args, **kwargs) -> str: """ This method is deprecated. Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.run_load` """ warnings.warn( "This method is deprecated. " "Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.run_load`", DeprecationWarning, stacklevel=3, ) return self.hook.run_load(*args, **kwargs) def run_copy(self, *args, **kwargs) -> str: """ This method is deprecated. Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.run_copy` """ warnings.warn( "This method is deprecated. " "Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.run_copy`", DeprecationWarning, stacklevel=3, ) return self.hook.run_copy(*args, **kwargs) def run_extract(self, *args, **kwargs) -> str: """ This method is deprecated. Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.run_extract` """ warnings.warn( "This method is deprecated. " "Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.run_extract`", DeprecationWarning, stacklevel=3, ) return self.hook.run_extract(*args, **kwargs) def run_query(self, *args, **kwargs) -> str: """ This method is deprecated. Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.run_query` """ warnings.warn( "This method is deprecated. " "Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.run_query`", DeprecationWarning, stacklevel=3, ) return self.hook.run_query(*args, **kwargs) class BigQueryCursor(BigQueryBaseCursor): """ A very basic BigQuery PEP 249 cursor implementation. The PyHive PEP 249 implementation was used as a reference: https://github.com/dropbox/PyHive/blob/master/pyhive/presto.py https://github.com/dropbox/PyHive/blob/master/pyhive/common.py """ def __init__( self, service: Any, project_id: str, hook: BigQueryHook, use_legacy_sql: bool = True, location: Optional[str] = None, num_retries: int = 5, ) -> None: super().__init__( service=service, project_id=project_id, hook=hook, use_legacy_sql=use_legacy_sql, location=location, num_retries=num_retries, ) self.buffersize = None # type: Optional[int] self.page_token = None # type: Optional[str] self.job_id = None # type: Optional[str] self.buffer = [] # type: list self.all_pages_loaded = False # type: bool @property def description(self) -> None: """The schema description method is not currently implemented""" raise NotImplementedError def close(self) -> None: """By default, do nothing""" @property def rowcount(self) -> int: """By default, return -1 to indicate that this is not supported""" return -1 def execute(self, operation: str, parameters: Optional[dict] = None) -> None: """ Executes a BigQuery query, and returns the job ID. :param operation: The query to execute. :param parameters: Parameters to substitute into the query. """ sql = _bind_parameters(operation, parameters) if parameters else operation self.flush_results() self.job_id = self.hook.run_query(sql) def executemany(self, operation: str, seq_of_parameters: list) -> None: """ Execute a BigQuery query multiple times with different parameters. :param operation: The query to execute. :param seq_of_parameters: List of dictionary parameters to substitute into the query. """ for parameters in seq_of_parameters: self.execute(operation, parameters) def flush_results(self) -> None: """Flush results related cursor attributes""" self.page_token = None self.job_id = None self.all_pages_loaded = False self.buffer = [] def fetchone(self) -> Union[List, None]: """Fetch the next row of a query result set""" return self.next() def next(self) -> Union[List, None]: """ Helper method for fetchone, which returns the next row from a buffer. If the buffer is empty, attempts to paginate through the result set for the next page, and load it into the buffer. """ if not self.job_id: return None if not self.buffer: if self.all_pages_loaded: return None query_results = ( self.service.jobs() .getQueryResults( projectId=self.project_id, jobId=self.job_id, location=self.location, pageToken=self.page_token, ) .execute(num_retries=self.num_retries) ) if 'rows' in query_results and query_results['rows']: self.page_token = query_results.get('pageToken') fields = query_results['schema']['fields'] col_types = [field['type'] for field in fields] rows = query_results['rows'] for dict_row in rows: typed_row = [_bq_cast(vs['v'], col_types[idx]) for idx, vs in enumerate(dict_row['f'])] self.buffer.append(typed_row) if not self.page_token: self.all_pages_loaded = True else: # Reset all state since we've exhausted the results. self.flush_results() return None return self.buffer.pop(0) def fetchmany(self, size: Optional[int] = None) -> list: """ Fetch the next set of rows of a query result, returning a sequence of sequences (e.g. a list of tuples). An empty sequence is returned when no more rows are available. The number of rows to fetch per call is specified by the parameter. If it is not given, the cursor's arraysize determines the number of rows to be fetched. The method should try to fetch as many rows as indicated by the size parameter. If this is not possible due to the specified number of rows not being available, fewer rows may be returned. An :py:class:`~pyhive.exc.Error` (or subclass) exception is raised if the previous call to :py:meth:`execute` did not produce any result set or no call was issued yet. """ if size is None: size = self.arraysize result = [] for _ in range(size): one = self.fetchone() if one is None: break result.append(one) return result def fetchall(self) -> List[list]: """ Fetch all (remaining) rows of a query result, returning them as a sequence of sequences (e.g. a list of tuples). """ result = [] while True: one = self.fetchone() if one is None: break result.append(one) return result def get_arraysize(self) -> int: """Specifies the number of rows to fetch at a time with .fetchmany()""" return self.buffersize or 1 def set_arraysize(self, arraysize: int) -> None: """Specifies the number of rows to fetch at a time with .fetchmany()""" self.buffersize = arraysize arraysize = property(get_arraysize, set_arraysize) def setinputsizes(self, sizes: Any) -> None: """Does nothing by default""" def setoutputsize(self, size: Any, column: Any = None) -> None: """Does nothing by default""" def _bind_parameters(operation: str, parameters: dict) -> str: """Helper method that binds parameters to a SQL query""" # inspired by MySQL Python Connector (conversion.py) string_parameters = {} # type Dict[str, str] for (name, value) in parameters.items(): if value is None: string_parameters[name] = 'NULL' elif isinstance(value, str): string_parameters[name] = "'" + _escape(value) + "'" else: string_parameters[name] = str(value) return operation % string_parameters def _escape(s: str) -> str: """Helper method that escapes parameters to a SQL query""" e = s e = e.replace('\\', '\\\\') e = e.replace('\n', '\\n') e = e.replace('\r', '\\r') e = e.replace("'", "\\'") e = e.replace('"', '\\"') return e def _bq_cast(string_field: str, bq_type: str) -> Union[None, int, float, bool, str]: """ Helper method that casts a BigQuery row to the appropriate data types. This is useful because BigQuery returns all fields as strings. """ if string_field is None: return None elif bq_type == 'INTEGER': return int(string_field) elif bq_type in ('FLOAT', 'TIMESTAMP'): return float(string_field) elif bq_type == 'BOOLEAN': if string_field not in ['true', 'false']: raise ValueError(f"{string_field} must have value 'true' or 'false'") return string_field == 'true' else: return string_field def _split_tablename( table_input: str, default_project_id: str, var_name: Optional[str] = None ) -> Tuple[str, str, str]: if '.' not in table_input: raise ValueError(f'Expected table name in the format of <dataset>.<table>. Got: {table_input}') if not default_project_id: raise ValueError("INTERNAL: No default project is specified") def var_print(var_name): if var_name is None: return "" else: return f"Format exception for {var_name}: " if table_input.count('.') + table_input.count(':') > 3: raise Exception(f'{var_print(var_name)}Use either : or . to specify project got {table_input}') cmpt = table_input.rsplit(':', 1) project_id = None rest = table_input if len(cmpt) == 1: project_id = None rest = cmpt[0] elif len(cmpt) == 2 and cmpt[0].count(':') <= 1: if cmpt[-1].count('.') != 2: project_id = cmpt[0] rest = cmpt[1] else: raise Exception( f'{var_print(var_name)}Expect format of (<project:)<dataset>.<table>, got {table_input}' ) cmpt = rest.split('.') if len(cmpt) == 3: if project_id: raise ValueError(f"{var_print(var_name)}Use either : or . to specify project") project_id = cmpt[0] dataset_id = cmpt[1] table_id = cmpt[2] elif len(cmpt) == 2: dataset_id = cmpt[0] table_id = cmpt[1] else: raise Exception( f'{var_print(var_name)}Expect format of (<project.|<project:)<dataset>.<table>, got {table_input}' ) if project_id is None: if var_name is not None: log.info( 'Project not included in %s: %s; using project "%s"', var_name, table_input, default_project_id, ) project_id = default_project_id return project_id, dataset_id, table_id def _cleanse_time_partitioning( destination_dataset_table: Optional[str], time_partitioning_in: Optional[Dict] ) -> Dict: # if it is a partitioned table ($ is in the table name) add partition load option if time_partitioning_in is None: time_partitioning_in = {} time_partitioning_out = {} if destination_dataset_table and '$' in destination_dataset_table: time_partitioning_out['type'] = 'DAY' time_partitioning_out.update(time_partitioning_in) return time_partitioning_out def _validate_value(key: Any, value: Any, expected_type: Type) -> None: """Function to check expected type and raise error if type is not correct""" if not isinstance(value, expected_type): raise TypeError(f"{key} argument must have a type {expected_type} not {type(value)}") def _api_resource_configs_duplication_check( key: Any, value: Any, config_dict: dict, config_dict_name='api_resource_configs' ) -> None: if key in config_dict and value != config_dict[key]: raise ValueError( "Values of {param_name} param are duplicated. " "{dict_name} contained {param_name} param " "in `query` config and {param_name} was also provided " "with arg to run_query() method. Please remove duplicates.".format( param_name=key, dict_name=config_dict_name ) ) def _validate_src_fmt_configs( source_format: str, src_fmt_configs: dict, valid_configs: List[str], backward_compatibility_configs: Optional[Dict] = None, ) -> Dict: """ Validates the given src_fmt_configs against a valid configuration for the source format. Adds the backward compatibility config to the src_fmt_configs. :param source_format: File format to export. :param src_fmt_configs: Configure optional fields specific to the source format. :param valid_configs: Valid configuration specific to the source format :param backward_compatibility_configs: The top-level params for backward-compatibility """ if backward_compatibility_configs is None: backward_compatibility_configs = {} for k, v in backward_compatibility_configs.items(): if k not in src_fmt_configs and k in valid_configs: src_fmt_configs[k] = v for k, v in src_fmt_configs.items(): if k not in valid_configs: raise ValueError(f"{k} is not a valid src_fmt_configs for type {source_format}.") return src_fmt_configs
52,450
1,062
<reponame>larkov/MailTrackerBlocker // // Generated by class-dump 3.5b1 (64 bit) (Debug version compiled Dec 3 2019 19:59:57). // // Copyright (C) 1997-2019 <NAME>. // #import <MailFW/MFLibraryUpgradeStep.h> @interface MFCreateServerLabelsTableUpgradeStep : MFLibraryUpgradeStep { } + (unsigned long long)targetVersion; // IMP=0x000000000003f2c2 - (void)_populateFromLocalMessagesTable; // IMP=0x000000000003f430 - (void)_populateFromIMAPLabelsTable; // IMP=0x000000000003f3c2 - (void)runWithRowIDsNeedingConversationRecalculation:(id)arg1; // IMP=0x000000000003f2cd @end
224
803
<filename>dev/ese/src/inc/sortapi.hxx<gh_stars>100-1000 // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. ERR ErrSORTInsert ( FUCB *pfucb, const KEY& key, const DATA& data ); ERR ErrSORTEndInsert( FUCB *pfucb ); ERR ErrSORTFirst ( FUCB *pfucb ); ERR ErrSORTLast ( FUCB *pfucb ); ERR ErrSORTNext ( FUCB *pfucb ); ERR ErrSORTPrev ( FUCB *pfucb ); ERR ErrSORTSeek ( FUCB * const pfucb, const KEY& key ); ERR ErrSORTOpen ( PIB *ppib, FUCB **ppfucb, const BOOL fRemoveDuplicateKey, const BOOL fRemoveDuplicateKeyData ); VOID SORTClose ( FUCB *pfucb ); VOID SORTICloseRun ( PIB * const ppib, SCB * const pscb ); VOID SORTClosePscb ( SCB *pscb ); ERR ErrSORTCheckIndexRange ( FUCB *pfucb ); ERR ErrSORTCopyRecords( PIB *ppib, FUCB *pfucbSrc, FUCB *pfucbDest, CPCOL *rgcpcol, ULONG ccpcolMax, LONG crecMax, ULONG *pcsridCopied, _Out_ QWORD *pqwAutoIncMax, BYTE *pbLVBuf, size_t cbLVBuf, JET_COLUMNID *mpcolumnidcolumnidTagged, STATUSINFO *pstatus ); ERR ErrSORTIncrementLVRefcountDest( FUCB * const pfucbSrc, const LvId lidSrc, LvId * const plidDest ); // ======================== API ============================ ERR VTAPI ErrIsamSortOpen( PIB *ppib, JET_COLUMNDEF *rgcolumndef, ULONG ccolumndef, JET_UNICODEINDEX2 *pidxunicode, JET_GRBIT grbit, FUCB **ppfucb, JET_COLUMNID *rgcolumnid, ULONG cbKeyMost, ULONG cbVarSegMac ); ERR VTAPI ErrIsamSortMove( JET_SESID sesid, JET_VTID vtid, LONG crow, JET_GRBIT grbit ); ERR VTAPI ErrIsamSortSetIndexRange( JET_SESID sesid, JET_VTID vtid, JET_GRBIT grbit ); // ERR VTAPI ErrIsamSortInsert( // JET_SESID sesid, // JET_VTID vtid, // BYTE *pb, // ULONG cbMax, // ULONG *pcbActual ); ERR VTAPI ErrIsamSortSeek( JET_SESID sesid, JET_VTID vtid, JET_GRBIT grbit ); ERR VTAPI ErrIsamSortDupCursor( JET_SESID sesid, JET_VTID vtid, JET_TABLEID *tableid, JET_GRBIT ulFlags); ERR VTAPI ErrIsamSortClose( JET_SESID sesid, JET_VTID vtid ); ERR VTAPI ErrIsamSortGotoBookmark( JET_SESID sesid, JET_VTID vtid, const VOID * const pvBookmark, const ULONG cbBookmark ); ERR VTAPI ErrIsamSortGetTableInfo( JET_SESID sesid, JET_VTID vtid, _Out_bytecap_(cbOutMax) VOID *pv, ULONG cbOutMax, ULONG lInfoLevel ); ERR VTAPI ErrIsamCopyBookmarks( JET_SESID sesid, JET_VTID vtid, FUCB *pfucbDest, JET_COLUMNID columnidDest, ULONG crecMax, ULONG *pcrowCopied, ULONG *precidLast ); ERR VTAPI ErrIsamSortRetrieveKey( JET_SESID sesid, JET_VTID vtid, VOID* pb, const ULONG cbMax, ULONG* pcbActual, JET_GRBIT grbit ); ERR VTAPI ErrIsamSortGetBookmark( JET_SESID sesid, JET_VTID vtid, VOID * const pvBookmark, const ULONG cbMax, ULONG * const pcbActual ); // INLINE HACKS INLINE ERR VTAPI ErrIsamSortMove( PIB *ppib, FUCB *pfucb, LONG crow, JET_GRBIT grbit ) { return ErrIsamSortMove( ( JET_SESID )( ppib ), ( JET_VTID )( pfucb ), crow, grbit ); } INLINE ERR VTAPI ErrIsamSortSetIndexRange( PIB *ppib, FUCB *pfucb, JET_GRBIT grbit ) { return ErrIsamSortSetIndexRange( ( JET_SESID )( ppib ), ( JET_VTID )( pfucb ), grbit ); } // INLINE ERR VTAPI ErrIsamSortInsert( // PIB *ppib, // FUCB *pfucb, // BYTE *pb, // ULONG cbMax, // ULONG *pcbActual ) // { // return ErrIsamSortInsert( ( JET_SESID )( ppib ), ( JET_VTID )( pfucb), // pb, cbMax, pcbActual ); // } INLINE ERR VTAPI ErrIsamSortSeek( PIB *ppib, FUCB *pfucb, JET_GRBIT grbit ) { return ErrIsamSortSeek( ( JET_SESID )( ppib ), ( JET_VTID )( pfucb), grbit ); } INLINE ERR VTAPI ErrIsamSortDupCursor( PIB *ppib, FUCB *pfucb, JET_TABLEID *tableid, JET_GRBIT ulFlags) { return ErrIsamSortDupCursor( ( JET_SESID )( ppib ), ( JET_VTID )( pfucb), tableid, ulFlags ); } INLINE ERR VTAPI ErrIsamSortClose( PIB *ppib, FUCB *pfucb ) { return ErrIsamSortClose( ( JET_SESID )( ppib ), ( JET_VTID )( pfucb ) ); } INLINE ERR VTAPI ErrIsamSortGotoBookmark( PIB * ppib, FUCB * pfucb, const VOID * const pvBookmark, const ULONG cbBookmark ) { return ErrIsamSortGotoBookmark( (JET_SESID)ppib, (JET_VTID)pfucb, pvBookmark, cbBookmark ); } INLINE ERR VTAPI ErrIsamSortGetTableInfo( PIB *ppib, FUCB *pfucb, _Out_bytecap_(cbOutMax) VOID *pv, ULONG cbOutMax, ULONG lInfoLevel ) { return ErrIsamSortGetTableInfo( ( JET_SESID )( ppib ), ( JET_VTID )( pfucb), pv, cbOutMax, lInfoLevel ); } INLINE ERR VTAPI ErrIsamSortRetrieveKey( PIB* ppib, FUCB* pfucb, VOID* pb, const ULONG cbMax, ULONG* pcbActual, JET_GRBIT grbit ) { return ErrIsamSortRetrieveKey( (JET_SESID)ppib, (JET_VTID)pfucb, pb, cbMax, pcbActual, grbit ); } INLINE ERR VTAPI ErrIsamSortGetBookmark( PIB *ppib, FUCB *pfucb, VOID * const pvBookmark, const ULONG cbMax, ULONG * const pcbActual ) { return ErrIsamSortGetBookmark( (JET_SESID)ppib, (JET_VTID)pfucb, pvBookmark, cbMax, pcbActual ); } INLINE VOID SORTBeforeFirst( FUCB *pfucb ) { pfucb->ispairCurr = -1L; DIRBeforeFirst( pfucb ); }
3,972
370
<filename>3rdparty/suitesparse-metis-for-windows-1.2.1/SuiteSparse/UMFPACK/SourceWrappers/umf_zl_row_search.o.c<gh_stars>100-1000 #define ZLONG #include <../Source/umf_row_search.c>
80
355
<gh_stars>100-1000 /* * This file is part of helper, licensed under the MIT License. * * Copyright (c) lucko (Luck) <<EMAIL>> * Copyright (c) contributors * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ package me.lucko.helper.metadata; import java.lang.ref.SoftReference; import java.util.Objects; import java.util.function.Supplier; import javax.annotation.Nullable; /** * Represents a value wrapped in a {@link SoftReference} * * @param <T> the wrapped value type */ public final class SoftValue<T> implements TransientValue<T> { public static <T> SoftValue<T> of(T value) { Objects.requireNonNull(value, "value"); return new SoftValue<>(value); } public static <T> Supplier<SoftValue<T>> supplied(Supplier<? extends T> supplier) { Objects.requireNonNull(supplier, "supplier"); return () -> { T value = supplier.get(); Objects.requireNonNull(value, "value"); return new SoftValue<>(value); }; } private final SoftReference<T> value; private SoftValue(T value) { this.value = new SoftReference<>(value); } @Nullable @Override public T getOrNull() { return this.value.get(); } @Override public boolean shouldExpire() { return this.value.get() == null; } }
782
317
package com.googlecode.totallylazy; public interface BinaryPredicate<T> { boolean matches(T a, T b); }
38
654
<filename>Arduino/Arduino_LoRa_Generic_Sensor/LeafWetness.h /* * Copyright (C) 2016 <NAME>, University of Pau, France * * <EMAIL> */ #ifndef LEAFWETNESS_H #define LEAFWETNESS_H #include "Sensor.h" #define LW_SCALE _BOARD_VOLT_SCALE class LeafWetness : public Sensor { public: LeafWetness(char* nomenclature, bool is_analog, bool is_connected, bool is_low_power, uint8_t pin_read, uint8_t pin_power); void update_data(); double get_value(); }; #endif
188
1,444
<gh_stars>1000+ package mage.game.permanent.token; import mage.MageInt; import mage.constants.CardType; import mage.constants.SubType; /** * * @author <EMAIL> */ public final class OxToken extends TokenImpl { public OxToken() { super("Ox", "2/4 white Ox creature token"); cardType.add(CardType.CREATURE); color.setWhite(true); subtype.add(SubType.OX); power = new MageInt(2); toughness = new MageInt(4); } public OxToken(final OxToken token) { super(token); } @Override public OxToken copy() { return new OxToken(this); } }
261
5,169
{ "name": "FontAwesomeKit", "version": "2.1.8", "summary": "Icon font library for iOS. Currently supports Font-Awesome, Foundation icons, Zocial, and ionicons", "homepage": "https://github.com/PrideChung/FontAwesomeKit", "screenshots": "http://i.minus.com/i3vNn0fTwcJeI.png", "license": "MIT", "authors": { "<NAME>": "<EMAIL>" }, "source": { "git": "https://github.com/PrideChung/FontAwesomeKit.git", "tag": "2.1.8" }, "platforms": { "ios": "6.0" }, "ios": { "frameworks": [ "UIKit", "CoreText" ] }, "source_files": "FontAwesomeKit/FontAwesomeKit.h", "requires_arc": true, "subspecs": [ { "name": "Core", "source_files": "FontAwesomeKit/FAKIcon.{h,m}" }, { "name": "FontAwesome", "dependencies": { "FontAwesomeKit/Core": [ ] }, "source_files": "FontAwesomeKit/FAKFontAwesome.{h,m}", "resources": "FontAwesomeKit/FontAwesome.otf" }, { "name": "FoundationIcons", "dependencies": { "FontAwesomeKit/Core": [ ] }, "source_files": "FontAwesomeKit/FAKFoundationIcons.{h,m}", "resources": "FontAwesomeKit/foundation-icons.ttf" }, { "name": "Zocial", "dependencies": { "FontAwesomeKit/Core": [ ] }, "source_files": "FontAwesomeKit/FAKZocial.{h,m}", "resources": "FontAwesomeKit/zocial-regular-webfont.ttf" }, { "name": "IonIcons", "dependencies": { "FontAwesomeKit/Core": [ ] }, "source_files": "FontAwesomeKit/FAKIonIcons.{h,m}", "resources": "FontAwesomeKit/ionicons.ttf" } ] }
797
432
/* * Copyright (c) 1980, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)edit.c 8.1 (Berkeley) 6/6/93 * $FreeBSD: src/usr.bin/mail/edit.c,v 1.2.6.4 2003/01/06 05:46:03 mikeh Exp $ */ #include "rcv.h" #include <fcntl.h> #include "extern.h" /* * Mail -- a mail program * * Perform message editing functions. */ /* * Edit a message list. */ int editor(int *msgvec) { return (edit1(msgvec, 'e')); } /* * Invoke the visual editor on a message list. */ int visual(int *msgvec) { return (edit1(msgvec, 'v')); } /* * Edit a message by writing the message into a funnily-named file * (which should not exist) and forking an editor on it. * We get the editor from the stuff above. */ int edit1(int *msgvec, int type) { int c, i; FILE *fp; struct message *mp; off_t size; /* * Deal with each message to be edited . . . */ for (i = 0; i < msgCount && msgvec[i]; i++) { sig_t sigint; if (i > 0) { char buf[100]; char *p; printf("Edit message %d [ynq]? ", msgvec[i]); if (fgets(buf, sizeof(buf), stdin) == 0) break; for (p = buf; *p == ' ' || *p == '\t'; p++) ; if (*p == 'q') break; if (*p == 'n') continue; } dot = mp = &message[msgvec[i] - 1]; touch(mp); sigint = signal(SIGINT, SIG_IGN); fp = run_editor(setinput(mp), mp->m_size, type, readonly); if (fp != NULL) { fseeko(otf, (off_t)0, SEEK_END); size = ftello(otf); mp->m_block = blockof(size); mp->m_offset = boffsetof(size); mp->m_size = (long)fsize(fp); mp->m_lines = 0; mp->m_flag |= MODIFY; rewind(fp); while ((c = getc(fp)) != EOF) { if (c == '\n') mp->m_lines++; if (putc(c, otf) == EOF) break; } if (ferror(otf)) warnx("/tmp"); Fclose(fp); } signal(SIGINT, sigint); } return (0); } /* * Run an editor on the file at "fpp" of "size" bytes, * and return a new file pointer. * Signals must be handled by the caller. * "Type" is 'e' for _PATH_EX, 'v' for _PATH_VI. */ FILE * run_editor(FILE *fp, off_t size, int type, int readonly) { FILE *nf = NULL; int t; time_t modtime; char *edit, tempname[PATHSIZE]; struct stat statb; snprintf(tempname, sizeof(tempname), "%s/mail.ReXXXXXXXXXX", tmpdir); if ((t = mkstemp(tempname)) == -1 || (nf = Fdopen(t, "w")) == NULL) { warn("%s", tempname); goto out; } if (readonly && fchmod(t, 0400) == -1) { warn("%s", tempname); rm(tempname); goto out; } if (size >= 0) while (--size >= 0 && (t = getc(fp)) != EOF) putc(t, nf); else while ((t = getc(fp)) != EOF) putc(t, nf); fflush(nf); if (fstat(fileno(nf), &statb) < 0) modtime = 0; else modtime = statb.st_mtime; if (ferror(nf)) { Fclose(nf); warnx("%s", tempname); rm(tempname); nf = NULL; goto out; } if (Fclose(nf) < 0) { warn("%s", tempname); rm(tempname); nf = NULL; goto out; } nf = NULL; if ((edit = value(type == 'e' ? "EDITOR" : "VISUAL")) == NULL) edit = type == 'e' ? _PATH_EX : _PATH_VI; if (run_command(edit, 0, -1, -1, tempname, NULL, NULL) < 0) { rm(tempname); goto out; } /* * If in read only mode or file unchanged, just remove the editor * temporary and return. */ if (readonly) { rm(tempname); goto out; } if (stat(tempname, &statb) < 0) { warn("%s", tempname); goto out; } if (modtime == statb.st_mtime) { rm(tempname); goto out; } /* * Now switch to new file. */ if ((nf = Fopen(tempname, "a+")) == NULL) { warn("%s", tempname); rm(tempname); goto out; } rm(tempname); out: return (nf); }
2,079
348
{"nom":"Bruguières","circ":"5ème circonscription","dpt":"Haute-Garonne","inscrits":4076,"abs":2245,"votants":1831,"blancs":160,"nuls":62,"exp":1609,"res":[{"nuance":"REM","nom":"<NAME>","voix":1156},{"nuance":"FN","nom":"<NAME>","voix":453}]}
99
965
// Display the current state of the progress control. CString str = _T("The progress control state is "); int progState = m_progressCtrl.GetState(); if (progState == PBST_NORMAL) str += _T("NORMAL"); else if (progState == PBST_PAUSED) str += _T("PAUSED"); else if (progState == PBST_ERROR) str += _T("ERROR"); else str += _T("unknown"); AfxMessageBox(str, MB_ICONEXCLAMATION);
141
908
from __future__ import print_function from __future__ import division from __future__ import absolute_import import unittest from jnius import autoclass class PassByReferenceOrValueTest(unittest.TestCase): def _verify(self, numbers, changed): for i in range(len(numbers)): self.assertEqual(numbers[i], i * i if changed else i) def _verify_all(self, numbers, changed): for n, c in zip(numbers, changed): self._verify(n, c) def test_single_param_static(self): VariablePassing = autoclass('org.jnius.VariablePassing') # passed by reference (default), numbers should change numbers = list(range(10)) VariablePassing.singleParamStatic(numbers) self._verify(numbers, True) # passed by reference, numbers should change numbers = list(range(10)) VariablePassing.singleParamStatic(numbers, pass_by_reference=True) self._verify(numbers, True) # passed by value, numbers should not change numbers = list(range(10)) VariablePassing.singleParamStatic(numbers, pass_by_reference=False) self._verify(numbers, False) def test_single_param(self): VariablePassing = autoclass('org.jnius.VariablePassing') variablePassing = VariablePassing() # passed by reference (default), numbers should change numbers = list(range(10)) variablePassing.singleParam(numbers) self._verify(numbers, True) # passed by reference, numbers should change numbers = list(range(10)) variablePassing.singleParam(numbers, pass_by_reference=True) self._verify(numbers, True) # passed by value, numbers should not change numbers = list(range(10)) variablePassing.singleParam(numbers, pass_by_reference=False) self._verify(numbers, False) def test_multiple_params_static(self): VariablePassing = autoclass('org.jnius.VariablePassing') # passed by reference (default), all numbers should change numbers = [list(range(10)) for _ in range(4)] VariablePassing.multipleParamsStatic(*numbers) self._verify_all(numbers, [True] * 4) # passed by reference, all numbers should change numbers = [list(range(10)) for _ in range(4)] VariablePassing.multipleParamsStatic(*numbers, pass_by_reference=True) self._verify_all(numbers, [True] * 4) # passed by value, no numbers should change numbers = [list(range(10)) for _ in range(4)] VariablePassing.multipleParamsStatic(*numbers, pass_by_reference=False) self._verify_all(numbers, [False] * 4) # only the first set of numbers should change numbers = [list(range(10)) for _ in range(4)] VariablePassing.multipleParamsStatic(*numbers, pass_by_reference=[True, False]) self._verify_all(numbers, [True, False, False, False]) # only the first set of numbers should not change numbers = [list(range(10)) for _ in range(4)] VariablePassing.multipleParamsStatic(*numbers, pass_by_reference=[False, True]) self._verify_all(numbers, [False, True, True, True]) # only the odd sets of numbers should change numbers = [list(range(10)) for _ in range(4)] changed = (True, False, True, False) VariablePassing.multipleParamsStatic(*numbers, pass_by_reference=changed) self._verify_all(numbers, changed) # only the even sets of numbers should change numbers = [list(range(10)) for _ in range(4)] changed = (False, True, False, True) VariablePassing.multipleParamsStatic(*numbers, pass_by_reference=changed) self._verify_all(numbers, changed) def test_multiple_params(self): VariablePassing = autoclass('org.jnius.VariablePassing') variablePassing = VariablePassing() # passed by reference (default), all numbers should change numbers = [list(range(10)) for _ in range(4)] variablePassing.multipleParams(*numbers) self._verify_all(numbers, [True] * 4) # passed by reference, all numbers should change numbers = [list(range(10)) for _ in range(4)] variablePassing.multipleParams(*numbers, pass_by_reference=True) self._verify_all(numbers, [True] * 4) # passed by value, no numbers should change numbers = [list(range(10)) for _ in range(4)] variablePassing.multipleParams(*numbers, pass_by_reference=False) self._verify_all(numbers, [False] * 4) # only the first set of numbers should change numbers = [list(range(10)) for _ in range(4)] variablePassing.multipleParams(*numbers, pass_by_reference=[True, False]) self._verify_all(numbers, [True, False, False, False]) # only the first set of numbers should not change numbers = [list(range(10)) for _ in range(4)] variablePassing.multipleParams(*numbers, pass_by_reference=[False, True]) self._verify_all(numbers, [False, True, True, True]) # only the odd sets of numbers should change numbers = [list(range(10)) for _ in range(4)] changed = (True, False, True, False) variablePassing.multipleParams(*numbers, pass_by_reference=changed) self._verify_all(numbers, changed) # only the even sets of numbers should change numbers = [list(range(10)) for _ in range(4)] changed = (False, True, False, True) variablePassing.multipleParams(*numbers, pass_by_reference=changed) self._verify_all(numbers, changed) def test_contructor_single_param(self): VariablePassing = autoclass('org.jnius.VariablePassing') # passed by reference (default), numbers should change numbers = list(range(10)) variablePassing = VariablePassing(numbers) self._verify(numbers, True) # passed by reference, numbers should change numbers = list(range(10)) variablePassing = VariablePassing(numbers, pass_by_reference=True) self._verify(numbers, True) # passed by value, numbers should not change numbers = list(range(10)) variablePassing = VariablePassing(numbers, pass_by_reference=False) self._verify(numbers, False) def test_contructor_multiple_params(self): VariablePassing = autoclass('org.jnius.VariablePassing') # passed by reference (default), all numbers should change numbers = [list(range(10)) for _ in range(4)] variablePassing = VariablePassing(*numbers) self._verify_all(numbers, [True] * 4) # passed by reference, all numbers should change numbers = [list(range(10)) for _ in range(4)] variablePassing = VariablePassing(*numbers, pass_by_reference=True) self._verify_all(numbers, [True] * 4) # passed by value, no numbers should change numbers = [list(range(10)) for _ in range(4)] variablePassing = VariablePassing(*numbers, pass_by_reference=False) self._verify_all(numbers, [False] * 4) # only the first set of numbers should change numbers = [list(range(10)) for _ in range(4)] variablePassing = VariablePassing(*numbers, pass_by_reference=[True, False]) self._verify_all(numbers, [True, False, False, False]) # only the first set of numbers should not change numbers = [list(range(10)) for _ in range(4)] variablePassing = VariablePassing(*numbers, pass_by_reference=[False, True]) self._verify_all(numbers, [False, True, True, True]) # only the odd sets of numbers should change numbers = [list(range(10)) for _ in range(4)] changed = (True, False, True, False) variablePassing = VariablePassing(*numbers, pass_by_reference=changed) self._verify_all(numbers, changed) # only the even sets of numbers should change numbers = [list(range(10)) for _ in range(4)] changed = (False, True, False, True) variablePassing = VariablePassing(*numbers, pass_by_reference=changed) self._verify_all(numbers, changed)
3,184
14,668
// Copyright 2019 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "ash/wm/splitview/split_view_drag_indicators.h" #include "ash/display/screen_orientation_controller.h" #include "ash/display/screen_orientation_controller_test_api.h" #include "ash/public/cpp/presentation_time_recorder.h" #include "ash/shell.h" #include "ash/test/ash_test_base.h" #include "ash/wm/overview/overview_controller.h" #include "ash/wm/overview/overview_grid.h" #include "ash/wm/overview/overview_item.h" #include "ash/wm/overview/overview_session.h" #include "ash/wm/overview/overview_test_util.h" #include "ash/wm/splitview/split_view_constants.h" #include "ash/wm/splitview/split_view_controller.h" #include "ash/wm/tablet_mode/tablet_mode_controller.h" #include "base/command_line.h" #include "base/test/metrics/histogram_tester.h" #include "base/test/scoped_feature_list.h" #include "chromeos/ui/wm/features.h" #include "ui/aura/client/aura_constants.h" #include "ui/aura/env.h" #include "ui/aura/window.h" #include "ui/display/display_switches.h" #include "ui/events/test/event_generator.h" #include "ui/views/widget/widget.h" namespace ash { class SplitViewDragIndicatorsTest : public AshTestBase { public: SplitViewDragIndicatorsTest() = default; SplitViewDragIndicatorsTest(const SplitViewDragIndicatorsTest&) = delete; SplitViewDragIndicatorsTest& operator=(const SplitViewDragIndicatorsTest&) = delete; ~SplitViewDragIndicatorsTest() override = default; void SetUp() override { base::CommandLine::ForCurrentProcess()->AppendSwitch( switches::kUseFirstDisplayAsInternal); AshTestBase::SetUp(); // Ensure calls to SetEnabledForTest complete. base::RunLoop().RunUntilIdle(); Shell::Get()->tablet_mode_controller()->SetEnabledForTest(true); base::RunLoop().RunUntilIdle(); PresentationTimeRecorder::SetReportPresentationTimeImmediatelyForTest(true); } void TearDown() override { PresentationTimeRecorder::SetReportPresentationTimeImmediatelyForTest( false); AshTestBase::TearDown(); } void ToggleOverview() { auto* overview_controller = Shell::Get()->overview_controller(); if (overview_controller->InOverviewSession()) ExitOverview(); else EnterOverview(); if (!overview_controller->InOverviewSession()) { overview_session_ = nullptr; split_view_drag_indicators_ = nullptr; return; } overview_session_ = Shell::Get()->overview_controller()->overview_session(); ASSERT_TRUE(overview_session_); split_view_drag_indicators_ = overview_session_->grid_list()[0]->split_view_drag_indicators(); } SplitViewController* split_view_controller() { return SplitViewController::Get(Shell::GetPrimaryRootWindow()); } SplitViewDragIndicators::WindowDraggingState window_dragging_state() { DCHECK(split_view_drag_indicators_); return split_view_drag_indicators_->current_window_dragging_state(); } bool IsPreviewAreaShowing() { return SplitViewDragIndicators::GetSnapPosition(window_dragging_state()) != SplitViewController::NONE; } float GetEdgeInset(int screen_width) const { return screen_width * kHighlightScreenPrimaryAxisRatio + kHighlightScreenEdgePaddingDp; } // Creates a window which cannot be snapped by splitview. std::unique_ptr<aura::Window> CreateUnsnappableWindow() { std::unique_ptr<aura::Window> window(CreateTestWindow()); window->SetProperty(aura::client::kResizeBehaviorKey, aura::client::kResizeBehaviorNone); return window; } protected: SplitViewDragIndicators* split_view_drag_indicators_ = nullptr; OverviewSession* overview_session_ = nullptr; }; TEST_F(SplitViewDragIndicatorsTest, Dragging) { base::HistogramTester histogram_tester; aura::Env::GetInstance()->set_throttle_input_on_resize_for_testing(false); UpdateDisplay("800x600"); const int screen_width = 800; const float edge_inset = GetEdgeInset(screen_width); std::unique_ptr<aura::Window> right_window(CreateTestWindow()); std::unique_ptr<aura::Window> left_window(CreateTestWindow()); ui::test::EventGenerator* generator = GetEventGenerator(); ToggleOverview(); OverviewItem* left_item = GetOverviewItemForWindow(left_window.get()); OverviewItem* right_item = GetOverviewItemForWindow(right_window.get()); // The inset on each side of the screen which is a snap region. Items dragged // to and released under this region will get snapped. const int drag_offset = 5; const int drag_offset_snap_region = 48; const int minimum_drag_offset = 96; // The selector item has a margin which does not accept events. Inset any // event aimed at the selector items edge so events will reach it. const int item_inset = 20; // Check the two windows set up have a region which is under no snap region, a // region that is under the left snap region and a region that is under the // right snap region. ASSERT_GT(left_item->target_bounds().CenterPoint().x(), edge_inset); ASSERT_LT(left_item->target_bounds().origin().x() + item_inset, edge_inset); ASSERT_GT(right_item->target_bounds().right() - item_inset, screen_width - edge_inset); // Verify if the drag is not started in either snap region, the drag still // must move by |drag_offset| before split view acknowledges the drag (ie. // starts moving the selector item). generator->set_current_screen_location( gfx::ToRoundedPoint(left_item->target_bounds().CenterPoint())); generator->PressLeftButton(); const gfx::RectF left_original_bounds = left_item->target_bounds(); generator->MoveMouseBy(drag_offset - 1, 0); EXPECT_EQ(left_original_bounds, left_item->target_bounds()); histogram_tester.ExpectTotalCount( "Ash.Overview.WindowDrag.PresentationTime.TabletMode", 0); generator->MoveMouseBy(1, 0); EXPECT_NE(left_original_bounds, left_item->target_bounds()); histogram_tester.ExpectTotalCount( "Ash.Overview.WindowDrag.PresentationTime.TabletMode", 1); histogram_tester.ExpectTotalCount( "ash.overview.windowdrag.presentationtime.maxlatency.tabletmode", 0); generator->ReleaseLeftButton(); histogram_tester.ExpectTotalCount( "Ash.Overview.WindowDrag.PresentationTime.TabletMode", 1); histogram_tester.ExpectTotalCount( "Ash.Overview.WindowDrag.PresentationTime.MaxLatency.TabletMode", 1); // Verify if the drag is started in the left snap region, the drag needs to // move by |drag_offset_snap_region| towards the right side of the screen // before split view acknowledges the drag (shows the preview area). ASSERT_TRUE(Shell::Get()->overview_controller()->InOverviewSession()); generator->set_current_screen_location( gfx::Point(left_item->target_bounds().origin().x() + item_inset, left_item->target_bounds().CenterPoint().y())); generator->PressLeftButton(); generator->MoveMouseBy(-drag_offset, 0); EXPECT_FALSE(IsPreviewAreaShowing()); generator->MoveMouseBy(drag_offset_snap_region, 0); EXPECT_FALSE(IsPreviewAreaShowing()); generator->MoveMouseBy(-minimum_drag_offset, 0); EXPECT_TRUE(IsPreviewAreaShowing()); // Drag back to the middle before releasing so that we stay in overview mode // on release. generator->MoveMouseTo( gfx::ToRoundedPoint(left_original_bounds.CenterPoint())); histogram_tester.ExpectTotalCount( "Ash.Overview.WindowDrag.PresentationTime.TabletMode", 5); histogram_tester.ExpectTotalCount( "Ash.Overview.WindowDrag.PresentationTime.MaxLatency.TabletMode", 1); generator->ReleaseLeftButton(); histogram_tester.ExpectTotalCount( "Ash.Overview.WindowDrag.PresentationTime.MaxLatency.TabletMode", 2); // Verify if the drag is started in the right snap region, the drag needs to // move by |drag_offset_snap_region| towards the left side of the screen // before split view acknowledges the drag. ASSERT_TRUE(Shell::Get()->overview_controller()->InOverviewSession()); generator->set_current_screen_location( gfx::Point(right_item->target_bounds().right() - item_inset, right_item->target_bounds().CenterPoint().y())); generator->PressLeftButton(); generator->MoveMouseBy(drag_offset, 0); EXPECT_FALSE(IsPreviewAreaShowing()); generator->MoveMouseBy(-drag_offset_snap_region, 0); generator->MoveMouseBy(minimum_drag_offset, 0); EXPECT_TRUE(IsPreviewAreaShowing()); histogram_tester.ExpectTotalCount( "Ash.Overview.WindowDrag.PresentationTime.TabletMode", 8); histogram_tester.ExpectTotalCount( "Ash.Overview.WindowDrag.PresentationTime.MaxLatency.TabletMode", 2); generator->ReleaseLeftButton(); histogram_tester.ExpectTotalCount( "Ash.Overview.WindowDrag.PresentationTime.TabletMode", 8); histogram_tester.ExpectTotalCount( "Ash.Overview.WindowDrag.PresentationTime.MaxLatency.TabletMode", 3); } // Verify the split view preview area becomes visible when expected. TEST_F(SplitViewDragIndicatorsTest, PreviewAreaVisibility) { UpdateDisplay("800x600"); const int screen_width = 800; const float edge_inset = GetEdgeInset(screen_width); std::unique_ptr<aura::Window> window(CreateTestWindow()); ToggleOverview(); // Verify the preview area is visible when |item|'s x is in the // range [0, edge_inset] or [screen_width - edge_inset - 1, screen_width]. OverviewItem* item = GetOverviewItemForWindow(window.get()); ASSERT_TRUE(item); const gfx::PointF start_location(item->target_bounds().CenterPoint()); // Drag horizontally to avoid activating drag to close. const float y = start_location.y(); overview_session_->InitiateDrag(item, start_location, /*is_touch_dragging=*/false); EXPECT_FALSE(IsPreviewAreaShowing()); overview_session_->Drag(item, gfx::PointF(edge_inset + 1, y)); EXPECT_FALSE(IsPreviewAreaShowing()); overview_session_->Drag(item, gfx::PointF(edge_inset, y)); EXPECT_TRUE(IsPreviewAreaShowing()); overview_session_->Drag(item, gfx::PointF(screen_width - edge_inset - 2, y)); EXPECT_FALSE(IsPreviewAreaShowing()); overview_session_->Drag(item, gfx::PointF(screen_width - edge_inset - 1, y)); EXPECT_TRUE(IsPreviewAreaShowing()); // Drag back to |start_location| before compeleting the drag, otherwise // |selector_time| will snap to the right and the system will enter splitview, // making |window_drag_controller()| nullptr. overview_session_->Drag(item, start_location); overview_session_->CompleteDrag(item, start_location); EXPECT_FALSE(IsPreviewAreaShowing()); } // Verify that the preview area never shows up when dragging a unsnappable // window. TEST_F(SplitViewDragIndicatorsTest, PreviewAreaVisibilityUnsnappableWindow) { UpdateDisplay("800x600"); const int screen_width = 800; std::unique_ptr<aura::Window> window(CreateUnsnappableWindow()); ToggleOverview(); OverviewItem* item = GetOverviewItemForWindow(window.get()); const gfx::PointF start_location(item->target_bounds().CenterPoint()); overview_session_->InitiateDrag(item, start_location, /*is_touch_dragging=*/false); EXPECT_FALSE(IsPreviewAreaShowing()); overview_session_->Drag(item, gfx::PointF(0.f, 1.f)); EXPECT_FALSE(IsPreviewAreaShowing()); overview_session_->Drag(item, gfx::PointF(screen_width, 1.f)); EXPECT_FALSE(IsPreviewAreaShowing()); overview_session_->CompleteDrag(item, start_location); EXPECT_FALSE(IsPreviewAreaShowing()); } // Check |SplitViewDragIndicators::current_window_dragging_state_| in common // workflows (see the comments in the definition of // |SplitViewDragIndicators::WindowDraggingState|). TEST_F(SplitViewDragIndicatorsTest, SplitViewDragIndicatorsWindowDraggingState) { UpdateDisplay("800x600"); const int screen_width = 800; const float edge_inset = GetEdgeInset(screen_width); std::unique_ptr<aura::Window> window1(CreateTestWindow()); std::unique_ptr<aura::Window> window2(CreateTestWindow()); ToggleOverview(); // Start dragging from overview. OverviewItem* item = GetOverviewItemForWindow(window1.get()); gfx::PointF start_location(item->target_bounds().CenterPoint()); overview_session_->InitiateDrag(item, start_location, /*is_touch_dragging=*/false); EXPECT_EQ(SplitViewDragIndicators::WindowDraggingState::kNoDrag, window_dragging_state()); overview_session_->StartNormalDragMode(start_location); EXPECT_EQ(SplitViewDragIndicators::WindowDraggingState::kFromOverview, window_dragging_state()); // Reset the gesture so we stay in overview mode. overview_session_->ResetDraggedWindowGesture(); // Verify the width of a snap area. const float y_position = start_location.y(); overview_session_->InitiateDrag(item, start_location, /*is_touch_dragging=*/false); EXPECT_EQ(SplitViewDragIndicators::WindowDraggingState::kNoDrag, window_dragging_state()); overview_session_->Drag(item, gfx::PointF(edge_inset + 1, y_position)); EXPECT_EQ(SplitViewDragIndicators::WindowDraggingState::kFromOverview, window_dragging_state()); overview_session_->Drag(item, gfx::PointF(edge_inset, y_position)); EXPECT_EQ(SplitViewDragIndicators::WindowDraggingState::kToSnapLeft, window_dragging_state()); // Snap window to the left. overview_session_->CompleteDrag(item, gfx::PointF(edge_inset, y_position)); ASSERT_TRUE(split_view_controller()->InSplitViewMode()); ASSERT_EQ(SplitViewController::State::kLeftSnapped, split_view_controller()->state()); // Drag from overview and snap to the right. item = GetOverviewItemForWindow(window2.get()); start_location = item->target_bounds().CenterPoint(); overview_session_->InitiateDrag(item, start_location, /*is_touch_dragging=*/false); EXPECT_EQ(SplitViewDragIndicators::WindowDraggingState::kNoDrag, window_dragging_state()); overview_session_->Drag(item, gfx::PointF(screen_width - 1, y_position)); EXPECT_EQ(SplitViewDragIndicators::WindowDraggingState::kToSnapRight, window_dragging_state()); overview_session_->CompleteDrag(item, start_location); } // Test dragging an unsnappable window. TEST_F(SplitViewDragIndicatorsTest, SplitViewDragIndicatorVisibilityUnsnappableWindow) { std::unique_ptr<aura::Window> unsnappable_window(CreateUnsnappableWindow()); ToggleOverview(); OverviewItem* item = GetOverviewItemForWindow(unsnappable_window.get()); gfx::PointF start_location(item->target_bounds().CenterPoint()); overview_session_->InitiateDrag(item, start_location, /*is_touch_dragging=*/false); overview_session_->StartNormalDragMode(start_location); EXPECT_EQ(SplitViewDragIndicators::WindowDraggingState::kFromOverview, window_dragging_state()); const gfx::PointF end_location1(0.f, 0.f); overview_session_->Drag(item, end_location1); EXPECT_EQ(SplitViewDragIndicators::WindowDraggingState::kFromOverview, window_dragging_state()); overview_session_->CompleteDrag(item, end_location1); EXPECT_EQ(SplitViewDragIndicators::WindowDraggingState::kNoDrag, window_dragging_state()); } // Verify when the window dragging state changes, the expected indicators will // become visible or invisible. TEST_F(SplitViewDragIndicatorsTest, SplitViewDragIndicatorsVisibility) { std::unique_ptr<aura::Window> dragged_window(CreateTestWindow()); auto indicator = std::make_unique<SplitViewDragIndicators>( dragged_window->GetRootWindow()); indicator->SetDraggedWindow(dragged_window.get()); auto to_int = [](IndicatorType type) { return static_cast<int>(type); }; // Helper function to which checks that all indicator types passed in |mask| // are visible, and those that are not are not visible. auto check_helper = [](SplitViewDragIndicators* svdi, int mask) { const std::vector<IndicatorType> types = { IndicatorType::kLeftHighlight, IndicatorType::kLeftText, IndicatorType::kRightHighlight, IndicatorType::kRightText}; for (auto type : types) { if ((static_cast<int>(type) & mask) > 0) EXPECT_TRUE(svdi->GetIndicatorTypeVisibilityForTesting(type)); else EXPECT_FALSE(svdi->GetIndicatorTypeVisibilityForTesting(type)); } }; // Check each state has the correct views displayed. Verify that nothing is // shown in state |SplitViewDragIndicators::WindowDraggingState::kNoDrag|. indicator->SetWindowDraggingState( SplitViewDragIndicators::WindowDraggingState::kNoDrag); check_helper(indicator.get(), 0); const int all = to_int(IndicatorType::kLeftHighlight) | to_int(IndicatorType::kLeftText) | to_int(IndicatorType::kRightHighlight) | to_int(IndicatorType::kRightText); // Verify that everything is visible in state // |SplitViewDragIndicators::WindowDraggingState::kFromOverview|. indicator->SetWindowDraggingState( SplitViewDragIndicators::WindowDraggingState::kFromOverview); check_helper(indicator.get(), all); // Verify that only one highlight shows up for the snap states. indicator->SetWindowDraggingState( SplitViewDragIndicators::WindowDraggingState::kToSnapLeft); check_helper(indicator.get(), to_int(IndicatorType::kLeftHighlight)); indicator->SetWindowDraggingState( SplitViewDragIndicators::WindowDraggingState::kToSnapRight); check_helper(indicator.get(), to_int(IndicatorType::kRightHighlight)); // Verify that only snap previews are shown for window dragging from shelf. indicator->SetWindowDraggingState( SplitViewDragIndicators::WindowDraggingState::kNoDrag); indicator->SetWindowDraggingState( SplitViewDragIndicators::WindowDraggingState::kFromShelf); check_helper(indicator.get(), 0); indicator->SetWindowDraggingState( SplitViewDragIndicators::WindowDraggingState::kToSnapLeft); check_helper(indicator.get(), to_int(IndicatorType::kLeftHighlight)); indicator->SetWindowDraggingState( SplitViewDragIndicators::WindowDraggingState::kFromShelf); check_helper(indicator.get(), 0); indicator->SetWindowDraggingState( SplitViewDragIndicators::WindowDraggingState::kToSnapRight); check_helper(indicator.get(), to_int(IndicatorType::kRightHighlight)); indicator->SetWindowDraggingState( SplitViewDragIndicators::WindowDraggingState::kFromShelf); check_helper(indicator.get(), 0); ScreenOrientationControllerTestApi orientation_api( Shell::Get()->screen_orientation_controller()); // Verify that only snap preview in state // |SplitViewDragIndicators::WindowDraggingState::kFromTop| in landscape // orientation. ASSERT_EQ(chromeos::OrientationType::kLandscapePrimary, orientation_api.GetCurrentOrientation()); indicator->SetWindowDraggingState( SplitViewDragIndicators::WindowDraggingState::kNoDrag); indicator->SetWindowDraggingState( SplitViewDragIndicators::WindowDraggingState::kFromTop); check_helper(indicator.get(), 0); indicator->SetWindowDraggingState( SplitViewDragIndicators::WindowDraggingState::kToSnapRight); check_helper(indicator.get(), to_int(IndicatorType::kRightHighlight)); indicator->SetWindowDraggingState( SplitViewDragIndicators::WindowDraggingState::kFromTop); check_helper(indicator.get(), 0); // Verify that no drag-to-snap indicators are shown in state // |SplitViewDragIndicators::WindowDraggingState::kFromTop| in portrait // orientation. orientation_api.SetDisplayRotation(display::Display::ROTATE_270, display::Display::RotationSource::ACTIVE); ASSERT_EQ(chromeos::OrientationType::kPortraitPrimary, orientation_api.GetCurrentOrientation()); indicator->SetWindowDraggingState( SplitViewDragIndicators::WindowDraggingState::kNoDrag); indicator->SetWindowDraggingState( SplitViewDragIndicators::WindowDraggingState::kFromTop); check_helper(indicator.get(), 0); indicator->SetWindowDraggingState( SplitViewDragIndicators::WindowDraggingState::kToSnapRight); indicator->SetWindowDraggingState( SplitViewDragIndicators::WindowDraggingState::kFromTop); check_helper(indicator.get(), 0); } // Defines a test fixture to test behavior of SplitViewDragIndicators on // multi-display in clamshell mode, parameterized to run with the feature // |chromeos::wm::features::kVerticalSnap| enabled and disabled. class ClamshellMultiDisplaySplitViewDragIndicatorsTest : public SplitViewDragIndicatorsTest, public ::testing::WithParamInterface<bool> { public: ClamshellMultiDisplaySplitViewDragIndicatorsTest() = default; ~ClamshellMultiDisplaySplitViewDragIndicatorsTest() override = default; // SplitViewDragIndicatorsTest: void SetUp() override { if (GetParam()) scoped_feature_list_.InitAndEnableFeature( chromeos::wm::features::kVerticalSnap); else scoped_feature_list_.InitAndDisableFeature( chromeos::wm::features::kVerticalSnap); SplitViewDragIndicatorsTest::SetUp(); // Disable tablet mode that is enabled in // `SplitViewDragIndicatorsTest::SetUp()` to test clamshell mode. Shell::Get()->tablet_mode_controller()->SetEnabledForTest(false); base::RunLoop().RunUntilIdle(); } bool IsVerticalSnapEnabled() const { return GetParam(); } protected: base::test::ScopedFeatureList scoped_feature_list_; }; // Tests that dragging a window to external portrait display will layout // split view drag indicators vertically instead of horizontally if // |chromeos::wm::features::kVerticalSnap| is enabled. TEST_P(ClamshellMultiDisplaySplitViewDragIndicatorsTest, IndicatorsLayoutWhileDraggingWindowToPortraitDisplay) { UpdateDisplay("800x600,600x800"); aura::Window::Windows root_windows = Shell::GetAllRootWindows(); ASSERT_EQ(2u, root_windows.size()); std::unique_ptr<aura::Window> window1(CreateTestWindow()); std::unique_ptr<aura::Window> window2(CreateTestWindow()); const display::Display landscape_display = display::Screen::GetScreen()->GetDisplayNearestWindow(root_windows[0]); const display::Display portrait_display = display::Screen::GetScreen()->GetDisplayNearestWindow(root_windows[1]); ToggleOverview(); // Overview starts with no split view drag indicator. auto* indicators = overview_session_->GetGridWithRootWindow(root_windows[0]) ->split_view_drag_indicators(); EXPECT_FALSE(indicators->GetIndicatorTypeVisibilityForTesting( IndicatorType::kLeftText)); EXPECT_FALSE(indicators->GetIndicatorTypeVisibilityForTesting( IndicatorType::kRightText)); // Start dragging from overview in the landscape display. OverviewItem* item = GetOverviewItemForWindow(window1.get()); gfx::PointF start_location(item->target_bounds().CenterPoint()); overview_session_->InitiateDrag(item, start_location, /*is_touch_dragging=*/false); EXPECT_EQ(SplitViewDragIndicators::WindowDraggingState::kNoDrag, window_dragging_state()); overview_session_->Drag(item, gfx::PointF(400, 300)); EXPECT_EQ(SplitViewDragIndicators::WindowDraggingState::kFromOverview, window_dragging_state()); // The split view indicator should show up with left indicator on the left // and its height span over height of the display work area. EXPECT_TRUE(indicators->GetIndicatorTypeVisibilityForTesting( IndicatorType::kLeftText)); EXPECT_TRUE(indicators->GetIndicatorTypeVisibilityForTesting( IndicatorType::kRightText)); gfx::Rect left_indicator_bounds = indicators->GetLeftHighlightViewBounds(); EXPECT_EQ(left_indicator_bounds.height(), landscape_display.work_area().height() - 2 * kHighlightScreenEdgePaddingDp); // Reset the gesture so we stay in overview mode. overview_session_->ResetDraggedWindowGesture(); // Drag a window to the portrait display. overview_session_->InitiateDrag(item, start_location, /*is_touch_dragging=*/false); Shell::Get()->cursor_manager()->SetDisplay(portrait_display); overview_session_->Drag(item, gfx::PointF(1100, 400)); EXPECT_EQ(SplitViewDragIndicators::WindowDraggingState::kOtherDisplay, window_dragging_state()); indicators = overview_session_->GetGridWithRootWindow(root_windows[1]) ->split_view_drag_indicators(); EXPECT_TRUE(indicators->GetIndicatorTypeVisibilityForTesting( IndicatorType::kLeftText)); EXPECT_TRUE(indicators->GetIndicatorTypeVisibilityForTesting( IndicatorType::kRightText)); // If |chromeos::wm::features::kVerticalSnap| is enabled, the left indicator // should be on the top of the display and its width span the work area width. // Otherwise, the left indicator should be on the left and its height span // the work area height. left_indicator_bounds = indicators->GetLeftHighlightViewBounds(); if (IsVerticalSnapEnabled()) { EXPECT_EQ(left_indicator_bounds.width(), portrait_display.work_area().width() - 2 * kHighlightScreenEdgePaddingDp); } else { EXPECT_EQ(left_indicator_bounds.height(), portrait_display.work_area().height() - 2 * kHighlightScreenEdgePaddingDp); } } // Instantiate the Boolean which is used to toggle the feature // |chromeos::wm::features::kVerticalSnap| in the parameterized tests. INSTANTIATE_TEST_SUITE_P(All, ClamshellMultiDisplaySplitViewDragIndicatorsTest, ::testing::Bool()); } // namespace ash
8,825
328
<filename>flow-core/src/main/java/com/dragon/flow/service/flowable/impl/ExtendProcinstServiceImpl.java package com.dragon.flow.service.flowable.impl; import com.baomidou.mybatisplus.core.conditions.query.LambdaQueryWrapper; import com.baomidou.mybatisplus.core.conditions.update.LambdaUpdateWrapper; import com.baomidou.mybatisplus.core.toolkit.IdWorker; import com.baomidou.mybatisplus.extension.service.impl.ServiceImpl; import com.dragon.flow.enm.flowable.runtime.ProcessStatusEnum; import com.dragon.flow.mapper.flowable.IExtendProcinstMapper; import com.dragon.flow.model.flowable.ExtendHisprocinst; import com.dragon.flow.model.flowable.ExtendProcinst; import com.dragon.flow.service.flowable.IExtendHisprocinstService; import com.dragon.flow.service.flowable.IExtendProcinstService; import org.springframework.beans.BeanUtils; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Service; import java.util.Date; import java.util.List; /** * <p> * 服务实现类 * </p> * * @author bruce.liu * @since 2021-04-21 */ @Service public class ExtendProcinstServiceImpl extends ServiceImpl<IExtendProcinstMapper, ExtendProcinst> implements IExtendProcinstService { @Autowired private IExtendHisprocinstService extendHisprocinstService; @Override public void deleteExtendProcinstByProcessInstanceId(String processInstanceId) { LambdaQueryWrapper<ExtendProcinst> extendProcinstLambdaQueryWrapper = new LambdaQueryWrapper<>(); extendProcinstLambdaQueryWrapper.eq(ExtendProcinst::getProcessInstanceId,processInstanceId); this.remove(extendProcinstLambdaQueryWrapper); } @Override public void saveExtendProcinstAndHis(ExtendProcinst extendProcinst) { extendProcinst.setId(IdWorker.get32UUID()); extendProcinst.setCreateTime(new Date()); extendProcinst.setUpdateTime(new Date()); this.save(extendProcinst); ExtendHisprocinst extendHisprocinst = new ExtendHisprocinst(); BeanUtils.copyProperties(extendProcinst, extendHisprocinst); this.extendHisprocinstService.save(extendHisprocinst); } @Override public void updateStatus(ProcessStatusEnum processStatus, String processInstanceId) { LambdaUpdateWrapper<ExtendProcinst> extendProcinstLambdaUpdateWrapper = new LambdaUpdateWrapper<>(); extendProcinstLambdaUpdateWrapper.set(ExtendProcinst::getProcessStatus, processStatus.getType()) .eq(ExtendProcinst::getProcessInstanceId,processInstanceId); this.update(extendProcinstLambdaUpdateWrapper); LambdaUpdateWrapper<ExtendHisprocinst> extendHisprocinstLambdaUpdateWrapper = new LambdaUpdateWrapper<>(); extendHisprocinstLambdaUpdateWrapper.set(ExtendHisprocinst::getProcessStatus, processStatus.getType()) .eq(ExtendHisprocinst::getProcessInstanceId,processInstanceId); extendHisprocinstService.update(extendHisprocinstLambdaUpdateWrapper); } @Override public ExtendProcinst findExtendProcinstByProcessInstanceId(String processInstanceId) { LambdaQueryWrapper<ExtendProcinst> extendProcinstLambdaQueryWrapper = new LambdaQueryWrapper<>(); extendProcinstLambdaQueryWrapper.eq(ExtendProcinst::getProcessInstanceId, processInstanceId); return this.getOne(extendProcinstLambdaQueryWrapper); } }
1,233
594
<gh_stars>100-1000 import numpy as np import re import json import matplotlib.pyplot as plt def parse_log_file(fname): with open(fname) as f: content = f.readlines() data = [] num_pre = -1 for line in content: if 'Epoch' in line and 'loss' in line and (not 'nan' in line): start = line.find('Epoch') start2 = line.find('time') # find all float number in string result1 = re.findall(r"[-+]?\d*\.\d+|\d+", line[start:start2]) result2 = re.findall(r"[-+]?\d*\.\d+|\d+", line[start2:]) result = result1[0:4] + result2 assert num_pre < 0 or len(result)==num_pre, 'number of parse loss should be the same' data.append(np.array([float(item) for item in result])) num_pre = len(result) data = np.array(data) print(data.shape) iteration = (data[:,0]-1)*(data[:,2]) + data[:,1] lr_rate = data[:,3] # loss starts from index 10 data = data[:,7:] plt.subplot(221) plt.plot(iteration, data[:, -1]) # total loss plt.subplot(222) plt.plot(iteration, data[:, -2]) # box loss plt.subplot(223) plt.plot(iteration, data[:, -4]) # class acc plt.subplot(224) plt.plot(iteration, lr_rate) plt.show() if __name__ == '__main__': fname = '/home/hust/tools/log_fold/mmdetect/log_cascade_rcnn_x152_caffe_32x8d_fpn.txt' fname = '/home/hust/tools/log_fold/mmdetect/log_cascade_rcnn_x101_64x4d_fpn_1x_trnbn.txt' fname = '/home/hust/tools/log_fold/mmdetect/log_cascade_rcnn_densenet161.txt' parse_log_file(fname)
768
60,067
/* * Copyright (C) 2017 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * @addtogroup NeuralNetworks * @{ */ /** * @file NeuralNetworks.h */ #ifndef ANDROID_ML_NN_RUNTIME_NEURAL_NETWORKS_H #define ANDROID_ML_NN_RUNTIME_NEURAL_NETWORKS_H /****************************************************************** * * IMPORTANT NOTICE: * * This file is part of Android's set of stable system headers * exposed by the Android NDK (Native Development Kit). * * Third-party source AND binary code relies on the definitions * here to be FROZEN ON ALL UPCOMING PLATFORM RELEASES. * * - DO NOT MODIFY ENUMS (EXCEPT IF YOU ADD NEW 32-BIT VALUES) * - DO NOT MODIFY CONSTANTS OR FUNCTIONAL MACROS * - DO NOT CHANGE THE SIGNATURE OF FUNCTIONS IN ANY WAY * - DO NOT CHANGE THE LAYOUT OR SIZE OF STRUCTURES */ #include <stddef.h> #include <stdint.h> #include <sys/cdefs.h> __BEGIN_DECLS /** * Operand types. * * The type of operands that can be added to a model. * * Although we define many types, most operators accept just a few * types. Most used are {@link ANEURALNETWORKS_TENSOR_FLOAT32}, * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, * and {@link ANEURALNETWORKS_INT32}. */ typedef enum { /** The following entries are used to declare scalars. */ /** A 32 bit floating point scalar value. */ ANEURALNETWORKS_FLOAT32 = 0, /** A signed 32 bit integer scalar value. */ ANEURALNETWORKS_INT32 = 1, /** An unsigned 32 bit integer scalar value. */ ANEURALNETWORKS_UINT32 = 2, /** The following entries are used to declare tensors. */ /** A tensor of 32 bit floating point values. */ ANEURALNETWORKS_TENSOR_FLOAT32 = 3, /** A tensor of 32 bit integer values. */ ANEURALNETWORKS_TENSOR_INT32 = 4, /** A tensor of 8 bit integers that represent real numbers. * * Attached to this tensor are two numbers that can be used to convert * the 8 bit integer to the real value and vice versa. These two numbers are: * - scale: a 32 bit non-negative floating point value. * - zeroPoint: an 32 bit integer, in range [0, 255]. * * The formula is: * real_value = (integer_value - zeroPoint) * scale. */ ANEURALNETWORKS_TENSOR_QUANT8_ASYMM = 5, } OperandCode; /** * Operation types. * * The type of operations that can be added to a model. */ typedef enum { /** Adds two tensors, element-wise. * * Takes two input tensors of identical type and compatible dimensions. The output * is the sum of both input tensors, optionally modified by an activation function. * * Two dimensions are compatible when: * 1. they are equal, or * 2. one of them is 1 * * The size of the output is the maximum size along each dimension of the input operands. * It starts with the trailing dimensions, and works its way forward. * * Example: * * input1.dimension = {4, 1, 2} * input2.dimension = {5, 4, 3, 1} * output.dimension = {5, 4, 3, 2} * * Supported tensor types: * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} * * Supported tensor rank: up to 4 * * Inputs: * * 0: A tensor. * * 1: A tensor of the same type, and compatible dimensions as input0. * * 2: An INT32 value, and has to be one of the {@link FuseCode} values. * Specifies the activation to invoke on the result of each addition. * * Outputs: * * 0: The sum, a tensor of the same type as input0. */ ANEURALNETWORKS_ADD = 0, /** Performs a 2-D average pooling operation. * * The output dimensions are functions of the filter dimensions, stride, and padding. * * The values in the output tensor are computed as: * * output[batch, row, col, channel] = * sum_{i, j}(input[batch, row + i, col + j, channel]) / sum(1) * * Supported tensor types: * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} * * Supported tensor rank: 4, with "NHWC" (i.e., Num_samples, Height, Width, and Channels) * data layout. * * Both explicit padding and implicit padding are supported. * * Inputs (explicit padding): * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying the input. * * 1: An INT32 value, specifying the padding on the left, in the ‘width’ dimension. * * 2: An INT32 value, specifying the padding on the right,in the ‘width’ dimension. * * 3: An INT32 value, specifying the padding on the top, in the ‘height’ dimension. * * 4: An INT32 value, specifying the padding on the bottom, in the ‘height’ dimension. * * 5: An INT32 value, specifying the stride when walking through input * in the ‘width’ dimension. * * 6: An INT32 value, specifying the stride when walking through input * in the ‘height’ dimension. * * 7: An INT32 value, specifying the filter width. * * 8: An INT32 value, specifying the filter height. * * 9: An INT32 value, and has to be one of the {@link FuseCode} values. * Specifies the activation to invoke on the result of each addition. * * Inputs (implicit padding): * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying the input. * * 1: An INT32 value, specifying the implicit padding scheme, has to be one of the * {@link PaddingCode} values. * * 2: An INT32 value, specifying the stride when walking through input * in the ‘width’ dimension. * * 3: An INT32 value, specifying the stride when walking through input * in the ‘height’ dimension. * * 4: An INT32 value, specifying the filter width. * * 5: An INT32 value, specifying the filter height. * * 6: An INT32 value, and has to be one of the {@link FuseCode} values. * Specifies the activation to invoke on the result of each addition. * * Outputs: * * 0: The output 4-D tensor, of shape [batches, out_height, out_width, depth]. */ ANEURALNETWORKS_AVERAGE_POOL_2D = 1, /** Concatenates the input tensors along the given dimension. * * The input tensors must have identical type and the same dimensions except the * dimension along the concatenation axis. * * Supported tensor types: * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} * * Supported tensor rank: up to 4 * * Inputs: * * 0 ~ n-1: The list of n input tensors, of shape [D0, D1, ..., Daxis(i), ..., Dm]. * For inputs of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} type, all * input tensors must have the same scale and zeroPoint. * * n: An INT32 value, specifying the concatenation axis. * * Outputs: * * 0: The output, a tensor of the same type as the input tensors. * The output shape is [D0, D1, ..., sum(Daxis(i)), ..., Dm]. */ ANEURALNETWORKS_CONCATENATION = 2, /** Performs an 2-D convolution operation. * * The CONV_2D op sweeps a 2-D filter that can mix channels together over a batch of * images, applying the filter to each window of each image of the appropriate size. * * The output dimensions are functions of the filter dimensions, stride, and padding. * * The values in the output tensor are computed as: * * output[batch, row, col, channel] = * sum_{i, j} ( * input[batch, row + i, col + j, k] * * filter[channel, row + i, col + j, k] + * bias[channel] * ) * * Supported tensor types: * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} * * Supported tensor rank: 4, with "NHWC" data layout. * * Both explicit padding and implicit padding are supported. * * Inputs (explicit padding): * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], specifying the input. * * 1: A 4-D tensor, of shape [depth_out, filter_height, filter_width, depth_in], * specifying the filter. * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. * For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32} type, the bias should * also be of {@link ANEURALNETWORKS_TENSOR_FLOAT32}. * For input tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} type, the bias * should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of 0 and * bias_scale == input_scale * filter_scale. * * 3: An INT32 value, specifying the padding on the left, in the ‘width’ dimension. * * 4: An INT32 value, specifying the padding on the right,in the ‘width’ dimension. * * 5: An INT32 value, specifying the padding on the top, in the ‘height’ dimension. * * 6: An INT32 value, specifying the padding on the bottom, in the ‘height’ dimension. * * 7: An INT32 value, specifying the stride when walking through input * in the ‘width’ dimension. * * 8: An INT32 value, specifying the stride when walking through input * in the ‘height’ dimension. * * 9: An INT32 value, and has to be one of the {@link FuseCode} values. * Specifies the activation to invoke on the result of each addition. * * Inputs (implicit padding): * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], specifying the input. * * 1: A 4-D tensor, of shape [depth_out, filter_height, filter_width, depth_in], * specifying the filter. * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. * For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32} type, the bias should * also be of {@link ANEURALNETWORKS_TENSOR_FLOAT32}. * For input tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} type, the bias * should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of 0 and * bias_scale == input_scale * filter_scale. * * 3: An INT32 value, specifying the implicit padding scheme, has to be one of the * {@link PaddingCode} values. * * 4: An INT32 value, specifying the stride when walking through input * in the ‘width’ dimension. * * 5: An INT32 value, specifying the stride when walking through input * in the ‘height’ dimension. * * 6: An INT32 value, and has to be one of the {@link FuseCode} values. * Specifies the activation to invoke on the result of each addition. * * Outputs: * * 0: The output 4-D tensor, of shape [batches, out_height, out_width, depth_out]. * For output tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} type, the following * condition must be satisfied: output_scale > input_scale * filter_scale. */ ANEURALNETWORKS_CONV_2D = 3, /** Performs a depthwise 2-D convolution operation. * * Given an input tensor of shape [batches, height, width, depth_in] and a filter * tensor of shape [1, filter_height, filter_width, depth_out] containing * depth_out convolutional filters of depth 1, DEPTHWISE_CONV applies a different * filter to each input channel (expanding from 1 channel to channel_multiplier channels * for each), then concatenates the results together. * * The output has depth_out = depth_in * depth_multiplier channels. * The output dimensions are functions of the filter dimensions, stride, and padding. * * The values in the output tensor are computed as: * * output[b, i, j, k * channel_multiplier + q] = * sum_{di, dj} ( * input[b, strides[1] * i + di, strides[2] * j + dj, k] * * filter[1, di, dj, k * channel_multiplier + q] * ) * * Supported tensor types: * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} * * Supported tensor rank: 4, with "NHWC" data layout. * * Both explicit padding and implicit padding are supported. * * Inputs (explicit padding): * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], specifying the input. * * 1: A 4-D tensor, of shape [1, filter_height, filter_width, depth_out], * specifying the filter. * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. * For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32} type, the bias should * also be of {@link ANEURALNETWORKS_TENSOR_FLOAT32}. * For input tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} type, the bias * should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of 0 and * bias_scale == input_scale * filter_scale. * * 3: An INT32 value, specifying the padding on the left, in the ‘width’ dimension. * * 4: An INT32 value, specifying the padding on the right,in the ‘width’ dimension. * * 5: An INT32 value, specifying the padding on the top, in the ‘height’ dimension. * * 6: An INT32 value, specifying the padding on the bottom, in the ‘height’ dimension. * * 7: An INT32 value, specifying the stride when walking through input * in the ‘width’ dimension. * * 8: An INT32 value, specifying the stride when walking through input * in the ‘height’ dimension. * * 9: An INT32 value, specifying the depthwise multiplier. * * 10: An INT32 value, and has to be one of the {@link FuseCode} values. * Specifies the activation to invoke on the result of each addition. * * Inputs (explicit padding): * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], specifying the input. * * 1: A 4-D tensor, of shape [1, filter_height, filter_width, depth_out], * specifying the filter. * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. * For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32} type, the bias should * also be of {@link ANEURALNETWORKS_TENSOR_FLOAT32}. * For input tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} type, the bias * should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of 0 and * bias_scale == input_scale * filter_scale. * * 3: An INT32 value, specifying the implicit padding scheme, has to be one of the * {@link PaddingCode} values. * * 4: An INT32 value, specifying the stride when walking through input * in the ‘width’ dimension. * * 5: An INT32 value, specifying the stride when walking through input * in the ‘height’ dimension. * * 6: An INT32 value, specifying the depthwise multiplier. * * 7: An INT32 value, and has to be one of the {@link FuseCode} values. * Specifies the activation to invoke on the result of each addition. * * Outputs: * * 0: The output 4-D tensor, of shape [batches, out_height, out_width, depth_out]. * For output tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} type, the following * condition must be satisfied: output_scale > input_scale * filter_scale. */ ANEURALNETWORKS_DEPTHWISE_CONV_2D = 4, /** Rearranges data from depth into blocks of spatial data. * * More specifically, this op outputs a copy of the input tensor where values from * the depth dimension are moved in spatial blocks to the height and width dimensions. * The value block_size indicates the input block size and how the data is moved. * * Chunks of data of size block_size * block_size from depth are rearranged into * non-overlapping blocks of size block_size x block_size. * * The width of the output tensor is input_depth * block_size, whereas the height is * input_height * block_size. * The depth of the input tensor must be divisible by block_size * block_size * * Supported tensor types: * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} * * Supported tensor rank: 4, with "NHWC" data layout. * * Inputs: * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], specifying the input. * * 1: An INT32 value, specifying the block_size. block_size must be >=1 and * block_size * block_size must be a divisor of the input depth. * * Outputs: * * 0: The output 4-D tensor, of shape [batch, height*block_size, width*block_size, * depth/(block_size*block_size)]. */ ANEURALNETWORKS_DEPTH_TO_SPACE = 5, /** Dequantizes the input tensor. * * The formula is: * * output = (input - zeroPoint) * scale. * * Supported tensor types: * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} * * Supported tensor rank: up to 4 * * Inputs: * * 0: A tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}. * * Outputs: * * 0: The output tensor of same shape as input0, but with type * {@link ANEURALNETWORKS_TENSOR_FLOAT32}. */ ANEURALNETWORKS_DEQUANTIZE = 6, /** Looks up sub-tensors in the input tensor. * * This operator takes for input a tensor of values (Values) and * a one-dimensional tensor of selection indices (Lookups). * The output tensor is the concatenation of sub-tensors of Values as * selected by Lookups. * * Think of Values as being sliced along its first dimension: * The entries in Lookups select which slices are concatenated together * to create the output tensor. * * For example, if Values has shape of [40, 200, 300] and * Lookups has shape of [3], we would expect all three values * found in Lookups to be between 0 and 39. The resulting tensor will * have shape of [3, 200, 300]. * * If a value in Lookups is out of bounds, the operation will fail * and an error will be reported. * * Inputs: * * 0: Lookups. A 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32} type. * The values are indices into the first dimension of Values. * * 1: Values. An n-D tensor, where n >= 2, from which sub-tensors are * extracted. * * Output: * * 0: A n-D tensor with the same rank and shape as the Values * tensor, except for the first dimension which has the same size * as Lookups' only dimension. */ ANEURALNETWORKS_EMBEDDING_LOOKUP = 7, /** Computes element-wise floor() on the input tensor. * * Supported tensor types: * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} * * Supported tensor rank: up to 4 * * Inputs: * * 0: A tensor. * * Outputs: * * 0: The output tensor, of the same type and dimensions as the input tensor. */ ANEURALNETWORKS_FLOOR = 8, /** Denotes a fully (densely) connected layer, which connects all elements in the input * tensor with each element in the output tensor. * * This layer implements the operation: * * outputs = activation(inputs * weights’ + bias) * * Supported tensor types: * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} * * Supported tensor rank: up to 4. * * Inputs: * * 0: A tensor, specifying the input. If rank is greater than 2, then it gets flattened to * a 2-D Tensor. The 2-D Tensor is handled as if dimensions corresponded to shape * [batch_size, input_size], where “batch_size” corresponds to the batching dimension, * and “input_size” is the size of the input. * * 1: A 2-D tensor, specifying the weights, of shape [num_units, input_size], where * "num_units" corresponds to the number of output nodes. * * 2: A 1-D tensor, of shape [num_units], specifying the bias. * For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32} type, the bias should * also be of {@link ANEURALNETWORKS_TENSOR_FLOAT32}. * For input tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} type, the bias * should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of 0 and * bias_scale == input_scale * filter_scale. * * 3: An INT32 value, and has to be one of the {@link FuseCode} values. * Specifies the activation to invoke on the result of each addition. * * Outputs: * * 0: The output tensor, of shape [batch_size, num_units]. * For output tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} type, the following * condition must be satisfied: output_scale > input_scale * filter_scale. */ ANEURALNETWORKS_FULLY_CONNECTED = 9, /** Looks up sub-tensors in the input tensor using a key-value map. * * This operator takes for input a tensor of values (Values), * a one-dimensional tensor of selection values (Lookups) and * a one-dimensional tensor that maps these values to Values * indexes. The output tensor is the concatenation of sub-tensors of * Values as selected by Lookups via Keys. * * Think of Values as being sliced along its outer-most dimension. * The output is a concatenation of selected slices, with one slice * for each entry of Lookups. The slice selected is the one at the * same index as the Maps entry that matches the value in Lookups. * * For a hit, the corresponding sub-tensor of Values is included * in the Output tensor. For a miss, the corresponding sub-tensor in * Output will have zero values. * * For example, if Values has shape of [40, 200, 300], * Keys should have a shape of [40]. If Lookups tensor has shape * of [3], we're concatenating three slices, so the resulting tensor * will have the shape of [3, 200, 300]. If the first entry in * Lookups has the value 123456, we'll look for that value in Keys tensor. * If the sixth entry of Keys contains 123456, we'll select the sixth * slice of Values. If no entry in Keys has 123456, a slice of zeroes * will be concatenated. * * Inputs: * * 0: Lookups. A 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor with shape [ k ]. * * 1: Keys. A 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor with shape [ n ]; * Keys and Values pair represent a map, i.e., the ith element * in Keys (Keys[i]) is the key to select the ith sub-tensor * in Values (Values[i]), where 0 <= i <= n-1. * Keys tensor *MUST* be sorted in ascending order. * * 2: Values. A tensor with shape of [ n, … ]; i.e., the first dimension must be n. * * Outputs: * * 0: Output. A tensor with shape [ k …]. * * 1: Hits. A boolean tensor with shape [ k ] indicates whether the lookup * hits (True) or not (False). * Stored as {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} with offset 0 and scale 1.0f. * A non-zero byte represents True, a hit. A zero indicates otherwise. */ ANEURALNETWORKS_HASHTABLE_LOOKUP = 10, /** Applies L2 normalization along the depth dimension. * * The values in the output tensor are computed as: * * output[batch, row, col, channel] = * input[batch, row, col, channel] / * sqrt(sum_{c} pow(input[batch, row, col, c], 2)) * * For input tensor with more dimensions, independently normalizes each 1-D slice along dimension dim. * * Supported tensor types: * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} * * Supported tensor rank: 4, with "NHWC" data layout (i.e., Num_samples, Height, Width, and Channels). * * Inputs: * * 0: A 4-D tensor, of shape [batches, height, width, depth]. * * Outputs: * * 0: The output 4-D tensor, of shape [batches, out_height, out_width, depth]. */ ANEURALNETWORKS_L2_NORMALIZATION = 11, /** Performs an 2-D L2 pooling operation. * * The output dimensions are functions of the filter dimensions, stride, and padding. * * The values in the output tensor are computed as: * * output[batch, row, col, channel] = * sqrt(sum_{i, j} pow(input[batch, row + i, col + j, channel], 2) / sum(1)) * * Supported tensor types: * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} * * Supported tensor rank: 4, with "NHWC" data layout. * * Both explicit padding and implicit padding are supported. * * Inputs (explicit padding): * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying the input. * * 1: An INT32 value, specifying the padding on the left, in the ‘width’ dimension. * * 2: An INT32 value, specifying the padding on the right,in the ‘width’ dimension. * * 3: An INT32 value, specifying the padding on the top, in the ‘height’ dimension. * * 4: An INT32 value, specifying the padding on the bottom, in the ‘height’ dimension. * * 5: An INT32 value, specifying the stride when walking through input * in the ‘width’ dimension. * * 6: An INT32 value, specifying the stride when walking through input * in the ‘height’ dimension. * * 7: An INT32 value, specifying the filter width. * * 8: An INT32 value, specifying the filter height. * * 9: An INT32 value, and has to be one of the {@link FuseCode} values. * Specifies the activation to invoke on the result of each addition. * * Inputs (implicit padding): * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying the input. * * 1: An INT32 value, specifying the implicit padding scheme, has to be one of the * {@link PaddingCode} values. * * 2: An INT32 value, specifying the stride when walking through input * in the ‘width’ dimension. * * 3: An INT32 value, specifying the stride when walking through input * in the ‘height’ dimension. * * 4: An INT32 value, specifying the filter width. * * 5: An INT32 value, specifying the filter height. * * 6: An INT32 value, and has to be one of the {@link FuseCode} values. * Specifies the activation to invoke on the result of each addition. * * Outputs: * * 0: The output 4-D tensor, of shape [batches, out_height, out_width, depth]. */ ANEURALNETWORKS_L2_POOL_2D = 12, /** Applies Local Response Normalization along the depth dimension. * * The 4-D input tensor is treated as a 3-D array of 1-D vectors (along the last * dimension), and each vector is normalized independently. Within a given vector, * each component is divided by the weighted, squared sum of inputs within depth_radius. * * The output is calculated using this formula: * * sqr_sum[a, b, c, d] = * sum(pow(input[a, b, c, d - depth_radius : d + depth_radius + 1], 2) * output = input / pow((bias + alpha * sqr_sum), beta) * * Supported tensor types: * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} * * Supported tensor rank: 4, with "NHWC" data layout. * * Inputs: * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying the input. * * 1: An INT32 value, specifying the radius of the normalization window. * * 2: A FLOAT32 value, specifying the bias, must not be zero. * * 3: A FLOAT32 value, specifying the scale factor, alpha. * * 4: A FLOAT32 value, specifying the exponent, beta. * * Outputs: * * 0: The output tensor of same shape as input0. */ ANEURALNETWORKS_LOCAL_RESPONSE_NORMALIZATION = 13, /** Computes sigmoid activation on the input tensor element-wise. * * The output is calculated using this formula: * * output = 1 / (1 + exp(-input)) * * Supported tensor types: * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} * * Supported tensor rank: up to 4. * * Inputs: * * 0: A tensor, specifying the input. * * Outputs: * * 0: The output tensor of same shape as input0. * For {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} type, * the scale must be 1.f / 256 and the zeroPoint must be 0. */ ANEURALNETWORKS_LOGISTIC = 14, /** * Projects an input to a bit vector via locality senstive hashing. * * Inputs: * * 0: Hash functions. Dim.size == 2, DataType: Float. * Tensor[0].Dim[0]: Number of hash functions. * Tensor[0].Dim[1]: Number of seeds per hash functions. * Tensor[0].Dim[1] <= 32 in sparse case. * * * 1: Input. Dim.size >= 1, no restriction on DataType. * * 2: Weight. Optional. Dim.size == 1, DataType: Float. * If not set, each input element is considered to have the same weight of * 1.0. * Tensor[1].Dim[0] == Tensor[2].Dim[0] * * 3: Type: * Sparse: Value LSHProjectionType_SPARSE(=1). * Computed bit vector is considered to be sparse. * Each output element is an int32 made up of multiple bits computed from * hash functions. * * Dense: Value LSHProjectionType_DENSE(=2). * Computed bit vector is considered to be dense. Each output element * represents a bit and can take the value of either 0 or 1. * * Outputs: * * 0: If the projection type is sparse: * Output.Dim == { Tensor[0].Dim[0] } * A tensor of int32 that represents hash signatures. * If the projection type is Dense: * Output.Dim == { Tensor[0].Dim[0] * Tensor[0].Dim[1] } * A flattened tensor that represents projected bit vectors. */ ANEURALNETWORKS_LSH_PROJECTION = 15, /** * Long short-term memory unit (LSTM) recurrent network layer. * * The default non-peephole implementation is based on: * http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf * <NAME> and <NAME>. "Long Short-Term Memory". Neural * Computation, 9(8):1735-1780, 1997. * * The peephole implementation is based on: * https://research.google.com/pubs/archive/43905.pdf * <NAME>, <NAME>, and <NAME>. "Long short-term memory * recurrent neural network architectures for large scale acoustic modeling." * INTERSPEECH, 2014. * * The coupling of input and forget gate (CIFG) is based on: * http://arxiv.org/pdf/1503.04069.pdf * Greff et al. "LSTM: A Search Space Odyssey" * * The class has the following independently optional inputs: * * If input gate (if CIFG): “input_to_forget_weights”, * “recurrent_to_input_weights”, “cell_to_input_weights”, “input_gate_bias”. * * If no peephole connections: “cell_to_input_weights”, * “cell_to_forget_weights”, “cell_to_output_weights”. * * If no projection layer: “projection_weights” and “projection_bias”. * * If no projection bias: “projection_bias”. * * Supported tensor types (type T): * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} * * Inputs: * * 0: Input. * A 2-D tensor of type T, of shape [batch_size, input_size], where * “batch_size” corresponds to the batching dimension, and “input_size” * is the size of the input. * * 1: input_to_input_weights. * A 2-D tensor of type T, of shape [num_units, input_size], where * “num_units” corresponds to the number of cell units. * * 2: input_to_forget_weights. * A 2-D tensor of type T, of shape [num_units, input_size]. * * 3: input_to_cell_weights. * A 2-D tensor of type T, of shape [num_units, input_size]. * * 4: input_to_output_weights. * A 2-D tensor of type T, of shape [num_units, input_size]. * * 5: recurrent_to_input_weights. * A 2-D tensor of type T, of shape [num_units, output_size], where * “output_size” corresponds to either the number of cell units (i.e., * “num_units”), or the second dimension of the “projection_weights”, if * defined. * * 6: recurrent_to_forget_weights. * A 2-D tensor of type T, of shape [num_units, output_size]. * * 7: recurrent_to_cell_weights. * A 2-D tensor of type T, of shape [num_units, output_size]. * * 8: recurrent_to_output_weights. * A 2-D tensor of type T, of shape [num_units, output_size]. * * 9: cell_to_input_weights. * A 1-D tensor of type T, of shape [num_units]. * * 10:cell_to_forget_weights. * A 1-D tensor of type T, of shape [num_units]. * * 11:cell_to_output_weights. * A 1-D tensor of type T, of shape [num_units]. * * 12:input_gate_bias. * A 1-D tensor of type T, of shape [num_units]. * * 13:forget_gate_bias. * A 1-D tensor of type T, of shape [num_units]. * * 14:cell_bias. * A 1-D tensor of type T, of shape [num_units]. * * 15:output_gate_bias. * A 1-D tensor of type T, of shape [num_units]. * * 16:projection_weights. * A 2-D tensor of type T, of shape [output_size, num_units]. * * 17:projection_bias. * A 1-D tensor of type T, of shape [output_size]. * * 18: output_state (in). * A 2-D tensor of type T, of shape [batch_size, output_size]. * * 19: cell_state (in). * A 2-D tensor of type T, of shape [batch_size, num_units]. * * 20:fused_activation_function. * An optional {@link FuseCode} value indicating the activation * function. * If “NONE” is specified then it results in a linear activation. * * 21:cell_clip. * A clipping threshold for the cell state, such that values are bound * within [-cell_clip, cell_clip]. If set to 0.0 then clipping is * disabled. * * 22:proj_clip. * A clipping threshold for the output from the projection layer, such * that values are bound within [-proj_clip, proj_clip]. If set to 0.0 * then clipping is disabled. * * Outputs: * * 0: scratch_buffer. * A 3-D tensor of type T, of shape [batch_size, num_cell, 4]. * * 1: output_state (out). * A 2-D tensor of type T, of shape [batch_size, output_size]. * * 2: cell_state (out). * A 2-D tensor of type T, of shape [batch_size, num_units]. * * 3: output. * A 2-D tensor of type T, of shape [batch_size, output_size]. This is * effectively the same as the current “output_state” value. */ ANEURALNETWORKS_LSTM = 16, /** Performs an 2-D max pooling operation. * * The output dimensions are functions of the filter dimensions, stride, and padding. * * The values in the output tensor are computed as: * * output[batch, row, col, channel] = * max_{i, j} (input[batch, row + i, col + j, channel]) * * Supported tensor types: * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} * * Supported tensor rank: 4, with "NHWC" data layout. * * Both explicit padding and implicit padding are supported. * * Inputs (explicit padding): * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying the input. * * 1: An INT32 value, specifying the padding on the left, in the ‘width’ dimension. * * 2: An INT32 value, specifying the padding on the right,in the ‘width’ dimension. * * 3: An INT32 value, specifying the padding on the top, in the ‘height’ dimension. * * 4: An INT32 value, specifying the padding on the bottom, in the ‘height’ dimension. * * 5: An INT32 value, specifying the stride when walking through input * in the ‘width’ dimension. * * 6: An INT32 value, specifying the stride when walking through input * in the ‘height’ dimension. * * 7: An INT32 value, specifying the filter width. * * 8: An INT32 value, specifying the filter height. * * 9: An INT32 value, and has to be one of the {@link FuseCode} values. * Specifies the activation to invoke on the result of each addition. * * Inputs (implicit padding): * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying the input. * * 1: An INT32 value, specifying the implicit padding scheme, has to be one of the * {@link PaddingCode} values. * * 2: An INT32 value, specifying the stride when walking through input * in the ‘width’ dimension. * * 3: An INT32 value, specifying the stride when walking through input * in the ‘height’ dimension. * * 4: An INT32 value, specifying the filter width. * * 5: An INT32 value, specifying the filter height. * * 6: An INT32 value, and has to be one of the {@link FuseCode} values. * Specifies the activation to invoke on the result of each addition. * * Outputs: * * 0: The output 4-D tensor, of shape [batches, out_height, out_width, depth]. */ ANEURALNETWORKS_MAX_POOL_2D = 17, /** Multiplies two tensors, element-wise. * * Takes two input tensors of identical type and compatible dimensions. The output * is the product of both input tensors, optionally modified by an activation function. * * Two dimensions are compatible when: * 1. they are equal, or * 2. one of them is 1 * * The size of the resulting output is the maximum size along each dimension of the * input operands. It starts with the trailing dimensions, and works its way forward. * * Supported tensor types: * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} * * Supported tensor rank: up to 4 * * Inputs: * * 0: A tensor. * * 1: A tensor of the same type, and compatible dimensions as input0. * * 2: An INT32 value, and has to be one of the {@link FuseCode} values. * Specifies the activation to invoke on the result of each addition. * * Outputs: * * 0: The product, a tensor of the same type as input0. * For output tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} type, the following * condition must be satisfied: output_scale > input1_scale * input2_scale. */ ANEURALNETWORKS_MUL = 18, /** Computes rectified linear activation on the input tensor element-wise. * * The output is calculated using this formula: * * output = max(0, input) * * Supported tensor types: * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} * * Supported tensor rank: up to 4. * * Inputs: * * 0: A tensor, specifying the input. * * Outputs: * * 0: The output tensor of same shape as input0. */ ANEURALNETWORKS_RELU = 19, /** Computes rectified linear 1 activation on the input tensor element-wise. * * The output is calculated using this formula: * * output = min(1.f, max(-1.f, input)) * * Supported tensor types: * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} * * Supported tensor rank: up to 4. * * Inputs: * * 0: A tensor, specifying the input. * * Outputs: * * 0: The output tensor of same shape as input0. */ ANEURALNETWORKS_RELU1 = 20, /** Computes rectified linear 6 activation on the input tensor element-wise. * * The output is calculated using this formula: * * output = min(6, max(0, input)) * * Supported tensor types: * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} * * Supported tensor rank: up to 4. * * Inputs: * * 0: A tensor, specifying the input. * * Outputs: * * 0: The output tensor of same shape as input0. */ ANEURALNETWORKS_RELU6 = 21, /** Reshapes a tensor. * * Given tensor, this operation returns a tensor that has the same values as tensor, * but with a newly specified shape. * * Supported tensor types: * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} * * Supported tensor rank: up to 4. * * Inputs: * * 0: A tensor, specifying the tensor to be reshaped. * * 1: A 1-D tensor of type {@link ANEURALNETWORKS_TENSOR_INT32}, defining the shape * of the output tensor. The number of elements implied by shape must be the same * as the number of elements in the input tensor. * * Outputs: * * 0: The output tensor, of shape specified by the input shape. */ ANEURALNETWORKS_RESHAPE = 22, /** Resizes images to given size using the bilinear interpretation. * * Resized images will be distorted if their output aspect ratio is not the same as * input aspect ratio. * * Supported tensor types: * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} * * Supported tensor rank: 4, with "NHWC" data layout. * * Inputs: * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying the input. * * 1: An INT32 value, specifying the output height of the output tensor. * * 2: An INT32 value, specifying the output width of the output tensor. * * Outputs: * * 0: The output 4-D tensor, of shape [batches, new_height, new_width, depth]. */ ANEURALNETWORKS_RESIZE_BILINEAR = 23, /** * A basic recurrent neural network layer. * * This layer implements the operation: * outputs = state = activation(inputs * input_weights + state * recurrent_weights + bias) * * Where: * * “input_weights” is a weight matrix that multiplies the inputs; * * “recurrent_weights” is a weight matrix that multiplies the current * “state” which itself is the output from the previous time step * computation; * * “bias” is a bias vector (added to each output vector in the batch); * * “activation” is the function passed as the “fused_activation_function” * argument (if not “NONE”). * * Supported tensor types (Type T): * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} * * Inputs: * * 0: input. * A 2-D tensor of type T, of shape [batch_size, input_size], where * “batch_size” corresponds to the batching dimension, and “input_size” is * the size of the input. * * 1: weights. * A 2-D tensor of type T, of shape [num_units, input_size], where * “num_units” corresponds to the number of units. * * 2: recurrent_weights. * A 2-D tensor of type T, of shape [num_units, num_units], with columns * corresponding to the weights from each unit. * * 3: bias. * A 1-D tensor of type T, of shape [num_units]. * * 4: hidden state (in). * A 2-D tensor of type T, of shape [batch_size, num_units]. * * 5: fused_activation_function. * An optional {@link FuseCode} value indicating the activation * function. If “NONE” is specified then it results in a linear * activation. * * Outputs: * * 0: hidden state (out). * A 2-D tensor of type T, of shape [batch_size, num_units]. * * * 1: output. * A 2-D tensor of type T, of shape [batch_size, num_units]. This is * effectively the same as the current state value. */ ANEURALNETWORKS_RNN = 24, /** Computes the softmax activation on the input tensor element-wise, per batch, by * normalizing the input vector so the maximum coefficient is zero. * * The output is calculated using this formula: * * output[batch, i] = * exp((input[batch, i] - max(input[batch, :])) * beta) / * sum_{k}{exp((input[batch, k] - max(input[batch, :])) * beta)} * * Supported tensor types: * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} * * Supported tensor rank: 2 or 4. * * Inputs: * * 0: A 2-D or 4-D tensor, specifying the tensor to be reshaped. * * 1: A FLOAT32 value, specifying the positive scaling factor for the exponent, beta. * * Outputs: * * 0: The output tensor of same shape as input0. * For {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} type, * the scale must be 1.f / 256 and the zeroPoint must be 0. */ ANEURALNETWORKS_SOFTMAX = 25, /** Rearranges blocks of spatial data, into depth. * * More specifically, this op outputs a copy of the input tensor where values from * the height and width dimensions are moved to the depth dimension. * The value block_size indicates the input block size and how the data is moved. * * Chunks of data of size block_size * block_size from depth are rearranged into * non-overlapping blocks of size block_size x block_size. * * The depth of the output tensor is input_depth * block_size * block_size. * The input tensor's height and width must be divisible by block_size. * * Supported tensor types: * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} * * Supported tensor rank: 4, with "NHWC" data layout. * * Inputs: * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], specifying the input. * * 1: An INT32 value, specifying the block_size. block_size must be >=1 and * block_size must be a divisor of both the input height and width. * * Outputs: * * 0: The output 4-D tensor, of shape [batch, height/block_size, width/block_size, * depth*block_size*block_size]. */ ANEURALNETWORKS_SPACE_TO_DEPTH = 26, /** * SVDF op is a kind of stateful layer derived from the notion that a * densely connected layer that's processing a sequence of input frames can * be approximated by using a singular value decomposition of each of its * nodes. The implementation is based on: * * https://research.google.com/pubs/archive/43813.pdf * * <NAME>, <NAME>, <NAME>, <NAME>. * “Compressing Deep Neural Networks using a Rank-Constrained Topology”. * INTERSPEECH, 2015. * * It processes the incoming input using a 2-stage filtering mechanism: * * stage 1 performs filtering on the "features" dimension, whose outputs get * pushed into a memory of fixed-size memory_size. * * stage 2 performs filtering on the "time" dimension of the memory_size * memoized outputs of stage 1. * * Specifically, for rank 1, this layer implements the operation: * * memory = push(conv1d(inputs, weights_feature, feature_dim, * "ANEURALNETWORKS_PADDING_VALID")); * outputs = activation(memory * weights_time + bias); * * Where: * * “weights_feature” is a weights matrix that processes the inputs (by * convolving the input with every “feature filter”), and whose outputs get * pushed, stacked in order, into the fixed-size “memory” (the oldest entry * gets dropped); * * “weights_time” is a weights matrix that processes the “memory” (by a * batched matrix multiplication on the num_units); * * “bias” is an optional bias vector (added to each output vector in the * batch); and * * “activation” is the function passed as the “fused_activation_function” * argument (if not “NONE”). * * Each rank adds a dimension to the weights matrices by means of stacking * the filters. * * Supported tensor types (type T): * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} * * Inputs: * * 0: input. * A 2-D tensor of type T, of shape [batch_size, input_size], where * “batch_size” corresponds to the batching dimension, and “input_size” is * the size of the input. * * 1: weights_feature. * A 2-D tensor of type T, of shape [num_units, input_size], where * “num_units” corresponds to the number of units. * * 2: weights_time. * A 2-D tensor of type T, of shape [num_units, memory_size], where * “memory_size” corresponds to the fixed-size of the memory. * * 3: bias. * An optional 1-D tensor of type T, of shape [num_units]. * * 4: state (in). * A 2-D tensor of type T, of shape [batch_size, (memory_size - 1) * num_units * rank]. * * 5: rank. * The rank of the SVD approximation. * * 6: fused_activation_function. * An optional {@link FuseCode} value indicating the activation function. * If “NONE” is specified then it results in a linear activation. * * Outputs: * * 0: state (out). * A 2-D tensor of type T, of shape [batch_size, (memory_size - 1) * num_units * rank]. * * 1: output. * A 2-D tensor of type T, of shape [batch_size, num_units]. */ ANEURALNETWORKS_SVDF = 27, /** Computes hyperbolic tangent of input tensor element-wise. * * The output is calculated using this formula: * * output = tanh(input) * * Supported tensor types: * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} * * Supported tensor rank: up to 4. * * Inputs: * * 0: A tensor, specifying the input. * * Outputs: * * 0: The output tensor of same shape as input0. */ ANEURALNETWORKS_TANH = 28, } OperationCode; /** * Fused activation function types. * */ typedef enum { /** NO fused activation function. */ ANEURALNETWORKS_FUSED_NONE = 0, /** Fused ReLU activation function. */ ANEURALNETWORKS_FUSED_RELU = 1, /** Fused ReLU1 activation function. */ ANEURALNETWORKS_FUSED_RELU1 = 2, /** Fused ReLU6 activation function. */ ANEURALNETWORKS_FUSED_RELU6 = 3, } FuseCode; /** * Implicit padding algorithms. * */ typedef enum { /** * SAME padding. * Padding on both ends are the "same": * padding_to_beginning = total_padding / 2 * padding_to_end = (total_padding + 1)/2. * i.e., for even number of padding, padding to both ends are exactly * the same; for odd number of padding, padding to the ending is bigger * than the padding to the beginning by 1. * * total_padding is a function of input, stride and filter size. * It could be computed as follows: * out_size = (input + stride - 1) / stride; * needed_input = (out_size - 1) * stride + filter_size * total_padding = max(0, needed_input - output_size) * The computation is the same for the horizontal and vertical directions. */ ANEURALNETWORKS_PADDING_SAME = 1, /** * VALID padding. * No padding. When the input size is not evenly divisible by * the filter size, the input at the end that could not fill * the whole filter tile will simply be ignored. */ ANEURALNETWORKS_PADDING_VALID = 2, } PaddingCode; /** * Execution preferences. */ typedef enum { /** * Prefer executing in a way that minimizes battery drain. * This is desirable for compilations that will be executed often. */ ANEURALNETWORKS_PREFER_LOW_POWER = 0, /** * Prefer returning a single answer as fast as possible, even if this causes * more power consumption. */ ANEURALNETWORKS_PREFER_FAST_SINGLE_ANSWER = 1, /** * Prefer maximizing the throughput of successive frames, for example when * processing successive frames coming from the camera. */ ANEURALNETWORKS_PREFER_SUSTAINED_SPEED = 2, } PreferenceCode; /** * Result codes. */ typedef enum { ANEURALNETWORKS_NO_ERROR = 0, ANEURALNETWORKS_OUT_OF_MEMORY = 1, ANEURALNETWORKS_INCOMPLETE = 2, ANEURALNETWORKS_UNEXPECTED_NULL = 3, ANEURALNETWORKS_BAD_DATA = 4, ANEURALNETWORKS_OP_FAILED = 5, ANEURALNETWORKS_UNMAPPABLE = 5, ANEURALNETWORKS_BAD_STATE = 6, } ResultCode; /** * For {@link ANeuralNetworksModel_setOperandValue}, values with a * length smaller or equal to this will be immediately copied into * the model. The size is in bytes. */ enum { ANEURALNETWORKS_MAX_SIZE_OF_IMMEDIATELY_COPIED_VALUES = 128 }; /** * ANeuralNetworksMemory is an opaque type that represents memory. * * This type is used to represent shared memory, memory mapped files, * and similar memories. * * By using shared memory, a program can efficiently communicate to the * runtime and drivers the tensors that define a model. See * {@link ANeuralNetworksModel_setOperandValueFromMemory}. An application * should typically create one shared memory object that contains every tensor * needed to define a model. {@link ANeuralNetworksMemory_createFromFd} can be * used to create shared memory from a file handle. {@link ANeuralNetworksMemory_createShared} * can be used to directly created shared memory. * * Memory objects can also be used to specify the input and output arguments of * an execution. See {@link ANeuralNetworksExecution_setInputFromMemory} * and {@link ANeuralNetworksExecution_setOutputFromMemory}. */ typedef struct ANeuralNetworksMemory ANeuralNetworksMemory; /** * ANeuralNetworksModel is an opaque type that contains a description of the * mathematical operations that constitute the model. * * <p>The model will be built by calling<ul> * <li>{@link ANeuralNetworksModel_create},</li> * <li>{@link ANeuralNetworksModel_addOperation},</li> * <li>{@link ANeuralNetworksModel_addOperand},</li> * </ul> * * A model is completed by calling {@link ANeuralNetworksModel_finish}. * A model is destroyed by calling {@link ANeuralNetworksModel_free}. * * <p>A model cannot be modified once {@link ANeuralNetworksModel_finish} * has been called on it.</p> * * <p>It is the application's responsibility to make sure that only one thread * modifies a model at a given time. It is however safe for more than one * thread to use the model once {@link ANeuralNetworksModel_finish} has returned.</p> * * <p>It is also the application's responsibility to ensure that there are no other * uses of the model after calling {@link ANeuralNetworksModel_free}. * This includes any compilation or execution object created using the model.</p> */ typedef struct ANeuralNetworksModel ANeuralNetworksModel; /** * ANeuralNetworksCompilation is an opaque type that can be used to compile * a machine learning model. * * <p>To use:<ul> * <li>Create a new compilation instance by calling the * {@link ANeuralNetworksCompilation_create} function.</li> * <li>Set any desired properties on the compilation (for example, * {@link ANeuralNetworksCompilation_setPreference}).</li> * <li>Complete the compilation with {@link ANeuralNetworksCompilation_finish}.</li> * <li>Use the compilation as many times as needed * with {@link ANeuralNetworksExecution_create}.</li> * <li>Destroy the compilation with {@link ANeuralNetworksCompilation_free} * once all executions using the compilation have completed.</li></ul></p> * * A compilation is completed by calling {@link ANeuralNetworksCompilation_finish}. * A compilation is destroyed by calling {@link ANeuralNetworksCompilation_free}. * * <p>A compilation cannot be modified once {@link ANeuralNetworksCompilation_finish} * has been called on it.</p> * * <p>It is the application's responsibility to make sure that only * one thread modifies a compilation at a given time. It is however * safe for more than one thread to use the compilation once * {@link ANeuralNetworksCompilation_finish} has returned.</p> * * <p>It is also the application's responsibility to ensure that there are no other * uses of the compilation after calling {@link ANeuralNetworksCompilation_free}. * This includes any execution object created using the compilation.</p> */ typedef struct ANeuralNetworksCompilation ANeuralNetworksCompilation; /** * ANeuralNetworksExecution is an opaque type that can be used to apply a machine * learning model to a set of inputs. * * <p>To use:<ul> * <li>Create a new execution instance by calling the * {@link ANeuralNetworksExecution_create} function.</li> * <li>Associate data to the model inputs with * {@link ANeuralNetworksExecution_setInput} or * {@link ANeuralNetworksExecution_setInputFromMemory}.</li> * <li>Associate output buffers to the model outputs with * {@link ANeuralNetworksExecution_setOutput} or * {@link ANeuralNetworksExecution_setOutputFromMemory}.</li> * <li>Apply the model with {@link ANeuralNetworksExecution_startCompute}.</li> * <li>Wait for the execution to complete with {@link * ANeuralNetworksEvent_wait}.</li> * <li>Destroy the execution with * {@link ANeuralNetworksExecution_free}.</li></ul></p> * * <p>An execution cannot be modified once {@link ANeuralNetworksExecution_startCompute} * has been called on it.</p> * * <p>An execution can be applied to a model with * {@link ANeuralNetworksExecution_startCompute} only once. Create new executions * to do new evaluations of the model.</p> * * <p>It is the application's responsibility to make sure that only one thread * modifies an execution at a given time. It is however safe for more than one * thread to use {@link ANeuralNetworksEvent_wait} at the same time.</p> * * <p>It is also the application's responsibility to ensure that there are no other * uses of the request after calling {@link ANeuralNetworksExecution_free}.</p> */ typedef struct ANeuralNetworksExecution ANeuralNetworksExecution; /** * ANeuralNetworksOperandType describes the type of an operand. * This structure is used to describe both scalars and tensors. */ typedef struct ANeuralNetworksOperandType { /** The data type, e.g ANEURALNETWORKS_INT8. */ int32_t type; /** The number of dimensions. It should be 0 for scalars. */ uint32_t dimensionCount; /** The dimensions of the tensor. It should be nullptr for scalars. */ const uint32_t* dimensions; /** These two fields are only used for quantized tensors. * They should be zero for scalars and non-fixed point tensors. * The dequantized value of each entry is (value - zeroPoint) * scale. */ float scale; int32_t zeroPoint; } ANeuralNetworksOperandType; typedef int32_t ANeuralNetworksOperationType; /** * ANeuralNetworksEvent is an opaque type that represents an event * that will be signaled once an execution completes. */ typedef struct ANeuralNetworksEvent ANeuralNetworksEvent; /** * Creates a shared memory object from a file descriptor. * * The shared memory is backed by a file descriptor via mmap. * See {@link ANeuralNetworksMemory} for a description on how to use * this shared memory. * * @param size The requested size in bytes. * Must not be larger than the file size. * @param prot The desired memory protection for the mapping. * It is either PROT_NONE or the bitwise OR of one or * more of the following flags: PROT_READ, PROT_WRITE. * @param fd The requested file descriptor. * The file descriptor has to be mmap-able. The file * descriptor will be duplicated. * @param offset The offset to the beginning of the file of the area to map. * The offset has to be aligned to a page size. * @param memory The memory object to be created. * Set to NULL if unsuccessful. * * @return ANEURALNETWORKS_NO_ERROR if the request completed normally. */ int ANeuralNetworksMemory_createFromFd(size_t size, int protect, int fd, size_t offset, ANeuralNetworksMemory** memory); /** * Delete a memory object. * * Destroys the object used by the run time to keep track of the memory. * This will free the underlying actual memory if no other code has open * handles to this memory. * * @param memory The memory object to be freed. */ void ANeuralNetworksMemory_free(ANeuralNetworksMemory* memory); /** * Create an empty {@link ANeuralNetworksModel}. * * <p>This only creates the object. Computation is performed once * {@link ANeuralNetworksExecution_startCompute} is invoked. * * The model should be constructed with calls to * {@link ANeuralNetworksModel_addOperation} and * {@link ANeuralNetworksModel_addOperand} * * <p>{@link ANeuralNetworksModel_finish} should be called once the model * has been fully constructed.</p> * * <p>{@link ANeuralNetworksModel_free} should be called once the model * is no longer needed.</p> * * @param model The {@link ANeuralNetworksModel} to be created. * Set to NULL if unsuccessful. * * @return ANEURALNETWORKS_NO_ERROR if successful. */ int ANeuralNetworksModel_create(ANeuralNetworksModel** model); /** * Destroy a model. * * The model need not have been finished by a call to * {@link ANeuralNetworksModel_finish}. * * See {@link ANeuralNetworksModel} for information on multithreaded usage. * * @param model The model to be destroyed. Passing NULL is acceptable and * results in no operation. */ void ANeuralNetworksModel_free(ANeuralNetworksModel* model); /** * Indicate that we have finished modifying a model. Required before * calling {@link ANeuralNetworksCompilation_create}. * * An application is responsible to make sure that no other thread uses * the model at the same time. * * This function must only be called once for a given model. * * See {@link ANeuralNetworksModel} for information on multithreaded usage. * * @param model The model to be finished. * * @return ANEURALNETWORKS_NO_ERROR if successful. */ int ANeuralNetworksModel_finish(ANeuralNetworksModel* model); /** * Add an operand to a model. * * The order in which the operands are added is important. The first one added * to a model will have the index value 0, the second 1, etc. These indexes are * used as operand identifiers in {@link ANeuralNetworksModel_addOperation}, * {@link ANeuralNetworksExecution_setInput}, * {@link ANeuralNetworksExecution_setInputFromMemory}, * {@link ANeuralNetworksExecution_setOutput}, * {@link ANeuralNetworksExecution_setOutputFromMemory} and * {@link ANeuralNetworksExecution_setOperandValue}. * * To build a model that can accommodate inputs of various sizes, as you may want * to do for a CNN, set the size of the dimensions that will vary at run time to 0. * If you do so, provide the full dimensions when calling * {@link ANeuralNetworksExecution_setInput} or {@link ANeuralNetworksExecution_setInputFromMemory}. * * Attempting to modify a model once {@link ANeuralNetworksModel_finish} has been * called will return an error. * * See {@link ANeuralNetworksModel} for information on multithreaded usage. * * @param model The model to be modified. * @param type The {@link ANeuralNetworksOperandType} that describes the shape * of the operand. * * @return ANEURALNETWORKS_NO_ERROR if successful. */ int ANeuralNetworksModel_addOperand(ANeuralNetworksModel* model, const ANeuralNetworksOperandType* type); /** * Sets an operand to a constant value. * * Values of length smaller or equal to * {@link ANEURALNETWORKS_MAX_SIZE_OF_IMMEDIATELY_COPIED_VALUES} * are immediately copied into the model. * * For values of length greater than {@link ANEURALNETWORKS_MAX_SIZE_OF_IMMEDIATELY_COPIED_VALUES}, * a pointer to the buffer is stored within the model. The application is responsible * for not changing the content of this region until all executions using this model * have completed. As the data may be copied during processing, modifying the data * after this call yields undefined results. * * For large tensors, using {@link ANeuralNetworksModel_setOperandValueFromMemory} * is likely to be more efficient. * * To indicate that an optional operand should be considered missing, * pass nullptr for buffer and 0 for length. * * Attempting to modify a model once {@link ANeuralNetworksModel_finish} has been * called will return an error. * * See {@link ANeuralNetworksModel} for information on multithreaded usage. * * @param model The model to be modified. * @param index The index of the model operand we're setting. * @param buffer A pointer to the data to use. * @param length The size in bytes of the data value. * * @return ANEURALNETWORKS_NO_ERROR if successful. */ int ANeuralNetworksModel_setOperandValue(ANeuralNetworksModel* model, int32_t index, const void* buffer, size_t length); /** * Sets an operand to a value stored in a memory object. * * The content of the memory is not copied. A reference to that memory is stored * inside the model. The application is responsible for not changing the content * of the memory region until all executions using this model have completed. * As the data may be copied during processing, modifying the data after this call * yields undefined results. * * To indicate that an optional operand should be considered missing, * use {@link ANeuralNetworksModel_setOperandValue} instead, passing nullptr for buffer. * * Attempting to modify a model once {@link ANeuralNetworksModel_finish} has been * called will return an error. * * See {@link ANeuralNetworksModel} for information on multithreaded usage. * * @param model The model to be modified. * @param index The index of the model operand we're setting. * @param buffer A pointer to the data to use. * @param memory The memory containing the data. * @param offset This specifies the location of the data within the memory. * The offset is in bytes from the start of memory. * @param length The size in bytes of the data value. * * @return ANEURALNETWORKS_NO_ERROR if successful. */ int ANeuralNetworksModel_setOperandValueFromMemory(ANeuralNetworksModel* model, int32_t index, const ANeuralNetworksMemory* memory, size_t offset, size_t length); /** * Add an operation to a model. * * @param model The model to be modified. * @param type The type of the operation. * @param inputCount The number of entries in the inputs array. * @param inputs An array of indexes identifying each operand. * @param outputCount The number of entries in the outputs array. * @param outputs An array of indexes identifying each operand. * * The operands specified by inputs and outputs must have been * previously added by calls to {@link ANeuralNetworksModel_addOperand}. * * Attempting to modify a model once {@link ANeuralNetworksModel_finish} has been * called will return an error. * * See {@link ANeuralNetworksModel} for information on multithreaded usage. * * @return ANEURALNETWORKS_NO_ERROR if successful. */ int ANeuralNetworksModel_addOperation(ANeuralNetworksModel* model, ANeuralNetworksOperationType type, uint32_t inputCount, const uint32_t* inputs, uint32_t outputCount, const uint32_t* outputs); /** * Specfifies which operands will be the model's inputs and outputs. * * An operand cannot be used for both input and output. Doing so will * return an error. * * @param model The model to be modified. * @param inputCount The number of entries in the inputs array. * @param inputs An array of indexes identifying the input operands. * @param outputCount The number of entries in the outputs array. * @param outputs An array of indexes identifying the output operands. * * The operands specified by inputs and outputs must have been * previously added by calls to {@link ANeuralNetworksModel_addOperand}. * * Attempting to modify a model once {@link ANeuralNetworksModel_finish} has been * called will return an error. * * See {@link ANeuralNetworksModel} for information on multithreaded usage. * */ int ANeuralNetworksModel_identifyInputsAndOutputs(ANeuralNetworksModel* model, uint32_t inputCount, const uint32_t* inputs, uint32_t outputCount, const uint32_t* outputs); /** * Create a {@link ANeuralNetworksCompilation} to compile the given model. * * <p>This only creates the object. Compilation is only performed once * {@link ANeuralNetworksCompilation_finish} is invoked.</p> * * <p>{@link ANeuralNetworksCompilation_finish} should be called once * all desired properties have been set on the compilation.</p> * * <p>{@link ANeuralNetworksModel_free} should be called once the compilation * is no longer needed.</p> * * <p>The provided model must outlive the compilation.</p> * * The model must already have been finished by a call to * {@link ANeuralNetworksModel_finish}. * * See {@link ANeuralNetworksCompilation} for information on multithreaded usage. * * @param model The {@link ANeuralNetworksModel} to be compiled. * @param compilation The newly created object or NULL if unsuccessful. * * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA * if the model is invalid. */ int ANeuralNetworksCompilation_create(ANeuralNetworksModel* model, ANeuralNetworksCompilation** compilation); /** * Destroy a compilation. * * The compilation need not have been finished by a call to * {@link ANeuralNetworksModel_finish}. * * See {@link ANeuralNetworksCompilation} for information on multithreaded usage. * * @param compilation The compilation to be destroyed. Passing NULL is acceptable and * results in no operation. */ void ANeuralNetworksCompilation_free(ANeuralNetworksCompilation* compilation); /** * Sets the execution preference. * * <p>Provides guidance to the runtime when trade-offs are possible.</p> * * See {@link ANeuralNetworksCompilation} for information on multithreaded usage. * * @param compilation The compilation to be modified. * @param preference Either {@link PREFER_LOW_POWER}, * {@link PREFER_SINGLE_FAST_ANSWER}, or * {@link PREFER_SUSTAINED_SPEED}. * * @return ANEURALNETWORKS_NO_ERROR if successful. */ int ANeuralNetworksCompilation_setPreference(ANeuralNetworksCompilation* compilation, int32_t preference); /** * Indicate that we have finished modifying a compilation. Required before * calling {@link ANeuralNetworksExecution_create}. * * An application is responsible to make sure that no other thread uses * the compilation at the same time. * * This function must only be called once for a given compilation. * * See {@link ANeuralNetworksCompilation} for information on multithreaded usage. * * @param compilation The compilation to be finished. * * @return ANEURALNETWORKS_NO_ERROR if successful. */ int ANeuralNetworksCompilation_finish(ANeuralNetworksCompilation* compilation); /** * Create a {@link ANeuralNetworksExecution} to apply the given compilation. * This only creates the object. Computation is only performed once * {@link ANeuralNetworksExecution_startCompute} is invoked. * * <p>The provided compilation must outlive the execution.</p> * * See {@link ANeuralNetworksExecution} for information on multithreaded usage. * * @param compilation The {@link ANeuralNetworksCompilation} to be evaluated. * @param execution The newly created object or NULL if unsuccessful. * * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA * if the compilation is invalid. */ int ANeuralNetworksExecution_create(ANeuralNetworksCompilation* compilation, ANeuralNetworksExecution** execution); /** * Destroy an execution. * * <p>If called on an execution for which * {@link ANeuralNetworksExecution_startCompute} has been called, the * function will return immediately but will mark the execution to be deleted * once the computation completes. The related {@link ANeuralNetworksEvent} * will be signaled and the {@link ANeuralNetworksEvent_wait} will return * ANEURALNETWORKS_ERROR_DELETED. * * See {@link ANeuralNetworksExecution} for information on multithreaded usage. * * @param execution The execution to be destroyed. Passing NULL is acceptable and * results in no operation. */ void ANeuralNetworksExecution_free(ANeuralNetworksExecution* execution); /** * Associate a user buffer with an input of the model of the * {@link ANeuralNetworksExecution}. * * <p>The provided buffer must outlive the execution.</p> * * If the input is optional, you can indicate that it is omitted by * passing nullptr for buffer and 0 for length. * * See {@link ANeuralNetworksExecution} for information on multithreaded usage. * * @param execution The execution to be modified. * @param index The index of the input argument we are setting. It is * an index into the lists passed to * {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not * the index associated with {@link ANeuralNetworksModel_addOperand}. * @param type The type of the operand. This should be used to specify the * dimensions that were set to 0 when the operand was added to the * model. All other properties of the type must be the same as * specified in the model. If the type is the same as specified * when the model was built, NULL can be passed. * @param buffer The buffer containing the data. * @param length The length in bytes of the buffer. * * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA if the * name is not recognized or the buffer is too small for the input. */ int ANeuralNetworksExecution_setInput(ANeuralNetworksExecution* execution, int32_t index, const ANeuralNetworksOperandType* type, const void* buffer, size_t length); /** * Associate part of a memory object with an input of the model of the * {@link ANeuralNetworksExecution}. * * <p>The provided memory must outlive the execution.</p> * * If the input is optional, you can indicate that it is omitted by * using @{Link ANeuralNetworks_setInput} instead, passing nullptr for buffer * and 0 for length. * * See {@link ANeuralNetworksExecution} for information on multithreaded usage. * * @param execution The execution to be modified. * @param index The index of the input argument we are setting. It is * an index into the lists passed to * {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not * the index associated with {@link ANeuralNetworksModel_addOperand}. * @param type The type of the operand. This can be used to specify the * dimensions that were set to 0 when the operand was added to the * model. All other values must be the same as specified in the * model. If the type is the same as specified when the model * was built, NULL can be passed. * @param memory The memory containing the data. * @param offset This specifies the location of the data whithin the memory. * The offset is in bytes from the start of memory. * @param length The size in bytes of the data value. * * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA if the * name is not recognized or the buffer is too small for the input. */ int ANeuralNetworksExecution_setInputFromMemory(ANeuralNetworksExecution* execution, int32_t index, const ANeuralNetworksOperandType* type, const ANeuralNetworksMemory* memory, size_t offset, size_t length); /** * Associate a user buffer with an output of the model of the * {@link ANeuralNetworksExecution}. * * If the output is optional, you can indicate that it is omitted by * passing nullptr for buffer and 0 for length. * * <p>The provided buffer must outlive the execution.</p> * * See {@link ANeuralNetworksExecution} for information on multithreaded usage. * * @param execution The execution to be modified. * @param index The index of the output argument we are setting. It is * an index into the lists passed to * {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not * the index associated with {@link ANeuralNetworksModel_addOperand}. * @param type The type of the operand. This can be used to specify the * dimensions that were set to 0 when the operand was added to the * model. All other values must be the same as specified in the * model. If the type is the same as specified when the model * was built, NULL can be passed. * @param buffer The buffer where the data is to be written. * @param length The length in bytes of the buffer. * * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA if the * name is not recognized or the buffer is too small for the output. */ int ANeuralNetworksExecution_setOutput(ANeuralNetworksExecution* execution, int32_t index, const ANeuralNetworksOperandType* type, void* buffer, size_t length); /** * Associate part of a memory object with an output of the model of the * {@link ANeuralNetworksExecution}. * * If the output is optional, you can indicate that it is omitted by * using @{Link ANeuralNetworks_setOutput} instead, passing nullptr for buffer * and 0 for length. * * <p>The provided memory must outlive the execution.</p> * * See {@link ANeuralNetworksExecution} for information on multithreaded usage. * * @param execution The execution to be modified. * @param index The index of the output argument we are setting. It is * an index into the lists passed to * {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not * the index associated with {@link ANeuralNetworksModel_addOperand}. * @param type The type of the operand. This can be used to specify the * dimensions that were set to 0 when the operand was added to the * model. All other values must be the same as specified in the * model. If the type is the same as specified when the model * was built, NULL can be passed. * @param memory The memory where the data is to be stored. * @param offset This specifies the location of the data whithin the memory. * The offset is in bytes from the start of memory. * @param length The length in bytes of the data value. * * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA if the * name is not recognized or the buffer is too small for the output. */ int ANeuralNetworksExecution_setOutputFromMemory(ANeuralNetworksExecution* execution, int32_t index, const ANeuralNetworksOperandType* type, const ANeuralNetworksMemory* memory, size_t offset, size_t length); /** * Schedule evaluation of the execution. * * <p>Schedules evaluation of the execution. Once the model has been * applied and the outputs are ready to be consumed, the returned event will be * signaled. Use {@link ANeuralNetworksEvent_wait} to wait for that event. * </p> * * Multiple executions can be scheduled and evaluated concurrently. The * runtime makes no guarantee on the ordering of completion of * executions. If it's important to the application, the application * should enforce the ordering by using * {@link ANeuralNetworksEvent_wait}. * * ANeuralNetworksEvent_wait must be called to recuperate the resources used * by the execution. * * See {@link ANeuralNetworksExecution} for information on multithreaded usage. * * @param execution The execution to be scheduled and executed. * @param event The event that will be signaled on completion. event is set to * NULL if there's an error. * * @return ANEURALNETWORKS_NO_ERROR if successful. */ int ANeuralNetworksExecution_startCompute(ANeuralNetworksExecution* execution, ANeuralNetworksEvent** event); /** * Waits until the execution completes. * * More than one thread can wait on an event. When the execution completes, * all threads will be released. * * See {@link ANeuralNetworksExecution} for information on multithreaded usage. * * @return ANEURALNETWORKS_NO_ERROR if the execution completed normally. */ int ANeuralNetworksEvent_wait(ANeuralNetworksEvent* event); /** * Destroys the event. * * See {@link ANeuralNetworksExecution} for information on multithreaded usage. */ void ANeuralNetworksEvent_free(ANeuralNetworksEvent* event); __END_DECLS #endif // ANDROID_ML_NN_RUNTIME_NEURAL_NETWORKS_H /** @} */
31,000
348
<gh_stars>100-1000 {"nom":"Pujols","circ":"3ème circonscription","dpt":"Lot-et-Garonne","inscrits":2924,"abs":1461,"votants":1463,"blancs":108,"nuls":42,"exp":1313,"res":[{"nuance":"REM","nom":"<NAME>","voix":799},{"nuance":"FN","nom":"M. <NAME>","voix":514}]}
108
945
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.iotdb.db.engine.version; /** * VersionController manages the version(a monotonically increasing long) of a storage group. We * define that each memtable flush, data deletion, or data update will generate a new version of * dataset. So upon the above actions are performed, a new version number is generated and assigned * to such actions. Additionally, we also assign versions to TsFiles in their file names, so * hopefully we will compare files directly across IoTDB replicas. NOTICE: Thread-safety should be * guaranteed by the caller. */ public interface VersionController { /** * Get the next version number. * * @return the next version number. */ long nextVersion(); /** * Get the current version number. * * @return the current version number. */ long currVersion(); }
415
1,080
<filename>neutron/tests/unit/extensions/test_port_device_profile.py # Copyright (c) 2020 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ddt from neutron_lib.api.definitions import port_device_profile as apidef from neutron_lib.db import api as db_api from neutron.db import db_base_plugin_v2 from neutron.db import port_device_profile_db as pdp_db from neutron.tests.unit.db import test_db_base_plugin_v2 class PortDeviceProfileExtensionTestPlugin( db_base_plugin_v2.NeutronDbPluginV2, pdp_db.PortDeviceProfileMixin): """Test plugin to mixin the port device profile extension.""" supported_extension_aliases = [apidef.ALIAS] def create_port(self, context, port): with db_api.CONTEXT_WRITER.using(context): new_port = super(PortDeviceProfileExtensionTestPlugin, self).create_port(context, port) self._process_create_port(context, port['port'], new_port) return new_port @ddt.ddt class PortDeviceProfileExtensionTestCase( test_db_base_plugin_v2.NeutronDbPluginV2TestCase): """Test API extension numa_affinity_policy attributes.""" def setUp(self, *args): plugin = ('neutron.tests.unit.extensions.test_port_device_profile.' 'PortDeviceProfileExtensionTestPlugin') super(PortDeviceProfileExtensionTestCase, self).setUp(plugin=plugin) @ddt.data('device_profile_1', None) def test_create_and_check_port_device_profile(self, device_profile): keys = [('name', 'name_1'), ('admin_state_up', True), ('status', self.port_create_status), ('device_profile', device_profile)] with self.port(name='name_1', device_profile=device_profile) as port: for k, v in keys: self.assertEqual(v, port['port'][k]) return port
928
3,897
<reponame>thl-mot/mbed-os /** ****************************************************************************** * @file stm32u5xx_ll_lptim.c * @author MCD Application Team * @brief LPTIM LL module driver. ****************************************************************************** * @attention * * Copyright (c) 2021 STMicroelectronics. * All rights reserved. * * This software is licensed under terms that can be found in the LICENSE file * in the root directory of this software component. * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** */ #if defined(USE_FULL_LL_DRIVER) /* Includes ------------------------------------------------------------------*/ #include "stm32u5xx_ll_lptim.h" #include "stm32u5xx_ll_bus.h" #include "stm32u5xx_ll_rcc.h" #ifdef USE_FULL_ASSERT #include "stm32_assert.h" #else #define assert_param(expr) ((void)0U) #endif /* USE_FULL_ASSERT */ /** @addtogroup STM32U5xx_LL_Driver * @{ */ #if defined (LPTIM1) || defined (LPTIM2) || defined (LPTIM3) || defined (LPTIM4) /** @addtogroup LPTIM_LL * @{ */ /* Private types -------------------------------------------------------------*/ /* Private variables ---------------------------------------------------------*/ /* Private constants ---------------------------------------------------------*/ /* Private macros ------------------------------------------------------------*/ /** @addtogroup LPTIM_LL_Private_Macros * @{ */ #define IS_LL_LPTIM_CLOCK_SOURCE(__VALUE__) (((__VALUE__) == LL_LPTIM_CLK_SOURCE_INTERNAL) \ || ((__VALUE__) == LL_LPTIM_CLK_SOURCE_EXTERNAL)) #define IS_LL_LPTIM_CLOCK_PRESCALER(__VALUE__) (((__VALUE__) == LL_LPTIM_PRESCALER_DIV1) \ || ((__VALUE__) == LL_LPTIM_PRESCALER_DIV2) \ || ((__VALUE__) == LL_LPTIM_PRESCALER_DIV4) \ || ((__VALUE__) == LL_LPTIM_PRESCALER_DIV8) \ || ((__VALUE__) == LL_LPTIM_PRESCALER_DIV16) \ || ((__VALUE__) == LL_LPTIM_PRESCALER_DIV32) \ || ((__VALUE__) == LL_LPTIM_PRESCALER_DIV64) \ || ((__VALUE__) == LL_LPTIM_PRESCALER_DIV128)) #define IS_LL_LPTIM_WAVEFORM(__VALUE__) (((__VALUE__) == LL_LPTIM_OUTPUT_WAVEFORM_PWM) \ || ((__VALUE__) == LL_LPTIM_OUTPUT_WAVEFORM_SETONCE)) /** * @} */ /* Private function prototypes -----------------------------------------------*/ /* Private functions ---------------------------------------------------------*/ /** @defgroup LPTIM_Private_Functions LPTIM Private Functions * @{ */ /** * @} */ /* Exported functions --------------------------------------------------------*/ /** @addtogroup LPTIM_LL_Exported_Functions * @{ */ /** @addtogroup LPTIM_LL_EF_Init * @{ */ /** * @brief Set LPTIMx registers to their reset values. * @param LPTIMx LP Timer instance * @retval An ErrorStatus enumeration value: * - SUCCESS: LPTIMx registers are de-initialized * - ERROR: invalid LPTIMx instance */ ErrorStatus LL_LPTIM_DeInit(LPTIM_TypeDef *LPTIMx) { ErrorStatus result = SUCCESS; /* Check the parameters */ assert_param(IS_LPTIM_INSTANCE(LPTIMx)); if (LPTIMx == LPTIM1) { LL_APB3_GRP1_ForceReset(LL_APB3_GRP1_PERIPH_LPTIM1); LL_APB3_GRP1_ReleaseReset(LL_APB3_GRP1_PERIPH_LPTIM1); } else if (LPTIMx == LPTIM2) { LL_APB1_GRP2_ForceReset(LL_APB1_GRP2_PERIPH_LPTIM2); LL_APB1_GRP2_ReleaseReset(LL_APB1_GRP2_PERIPH_LPTIM2); } else if (LPTIMx == LPTIM3) { LL_APB3_GRP1_ForceReset(LL_APB3_GRP1_PERIPH_LPTIM3); LL_APB3_GRP1_ReleaseReset(LL_APB3_GRP1_PERIPH_LPTIM3); } else if (LPTIMx == LPTIM4) { LL_APB3_GRP1_ForceReset(LL_APB3_GRP1_PERIPH_LPTIM4); LL_APB3_GRP1_ReleaseReset(LL_APB3_GRP1_PERIPH_LPTIM4); } else { result = ERROR; } return result; } /** * @brief Set each fields of the LPTIM_InitStruct structure to its default * value. * @param LPTIM_InitStruct pointer to a @ref LL_LPTIM_InitTypeDef structure * @retval None */ void LL_LPTIM_StructInit(LL_LPTIM_InitTypeDef *LPTIM_InitStruct) { /* Set the default configuration */ LPTIM_InitStruct->ClockSource = LL_LPTIM_CLK_SOURCE_INTERNAL; LPTIM_InitStruct->Prescaler = LL_LPTIM_PRESCALER_DIV1; LPTIM_InitStruct->Waveform = LL_LPTIM_OUTPUT_WAVEFORM_PWM; } /** * @brief Configure the LPTIMx peripheral according to the specified parameters. * @note LL_LPTIM_Init can only be called when the LPTIM instance is disabled. * @note LPTIMx can be disabled using unitary function @ref LL_LPTIM_Disable(). * @param LPTIMx LP Timer Instance * @param LPTIM_InitStruct pointer to a @ref LL_LPTIM_InitTypeDef structure * @retval An ErrorStatus enumeration value: * - SUCCESS: LPTIMx instance has been initialized * - ERROR: LPTIMx instance hasn't been initialized */ ErrorStatus LL_LPTIM_Init(LPTIM_TypeDef *LPTIMx, LL_LPTIM_InitTypeDef *LPTIM_InitStruct) { ErrorStatus result = SUCCESS; /* Check the parameters */ assert_param(IS_LPTIM_INSTANCE(LPTIMx)); assert_param(IS_LL_LPTIM_CLOCK_SOURCE(LPTIM_InitStruct->ClockSource)); assert_param(IS_LL_LPTIM_CLOCK_PRESCALER(LPTIM_InitStruct->Prescaler)); assert_param(IS_LL_LPTIM_WAVEFORM(LPTIM_InitStruct->Waveform)); /* The LPTIMx_CFGR register must only be modified when the LPTIM is disabled (ENABLE bit is reset to 0). */ if (LL_LPTIM_IsEnabled(LPTIMx) == 1UL) { result = ERROR; } else { /* Set CKSEL bitfield according to ClockSource value */ /* Set PRESC bitfield according to Prescaler value */ /* Set WAVE bitfield according to Waveform value */ MODIFY_REG(LPTIMx->CFGR, (LPTIM_CFGR_CKSEL | LPTIM_CFGR_PRESC | LPTIM_CFGR_WAVE), LPTIM_InitStruct->ClockSource | \ LPTIM_InitStruct->Prescaler | \ LPTIM_InitStruct->Waveform); } return result; } /** * @} */ /** * @} */ /** * @} */ #endif /* LPTIM1 || LPTIM2 || LPTIM3 || LPTIM4 */ /** * @} */ #endif /* USE_FULL_LL_DRIVER */
2,819
3,285
""" Copyright 2017 <NAME> Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from cvxpy.expressions import cvxtypes from cvxpy.problems.objective import Maximize, Minimize from cvxpy.reductions.reduction import Reduction class FlipObjective(Reduction): """Flip a minimization objective to a maximization and vice versa. """ def accepts(self, problem) -> bool: return True def apply(self, problem): """:math:`\\max(f(x)) = -\\min(-f(x))` Parameters ---------- problem : Problem The problem whose objective is to be flipped. Returns ------- Problem A problem with a flipped objective. list The inverse data. """ is_maximize = type(problem.objective) == Maximize objective = Minimize if is_maximize else Maximize problem = cvxtypes.problem()(objective(-problem.objective.expr), problem.constraints) return problem, [] def invert(self, solution, inverse_data): """Map the solution of the flipped problem to that of the original. Parameters ---------- solution : Solution A solution object. inverse_data : list The inverse data returned by an invocation to apply. Returns ------- Solution A solution to the original problem. """ if solution.opt_val is not None: solution.opt_val = -solution.opt_val return solution
774
479
<filename>src/util/common.h #pragma once /** * Common definitions and helpers. */ #include <stdlib.h> enum { UTIL_TRISTATE_UNSET, UTIL_TRISTATE_YES, UTIL_TRISTATE_NO, _UTIL_TRISTATE_N, };
115
746
<filename>mmdeploy/backend/ncnn/quant.py # Copyright (c) OpenMMLab. All rights reserved. import os.path as osp from subprocess import call from typing import List import mmcv from .init_plugins import get_ncnn2int8_path def get_quant_model_file(onnx_path: str, work_dir: str) -> List[str]: """Returns the path to quant onnx and table with export result. Args: onnx_path (str): The path to the fp32 onnx model. work_dir (str): The path to the directory for saving the results. Returns: List[str]: The path to the files where the export result will be located. """ mmcv.mkdir_or_exist(osp.abspath(work_dir)) base_name = osp.splitext(osp.split(onnx_path)[1])[0] quant_onnx = osp.join(work_dir, base_name + '_quant.onnx') quant_table = osp.join(work_dir, base_name + '.table') quant_param = osp.join(work_dir, base_name + '_int8.param') quant_bin = osp.join(work_dir, base_name + '_int8.bin') return [quant_onnx, quant_table, quant_param, quant_bin] def ncnn2int8(param: str, bin: str, table: str, int8_param: str, int8_bin: str): """Convert ncnn float model to quantized model. The inputs of ncnn include float model and weight file. We need to use a executable program to convert the float model to int8 model with calibration table. Example: >>> from mmdeploy.backend.ncnn.quant import ncnn2int8 >>> param = 'work_dir/end2end.param' >>> bin = 'work_dir/end2end.bin' >>> table = 'work_dir/end2end.table' >>> int8_param = 'work_dir/end2end_int8.param' >>> int8_bin = 'work_dir/end2end_int8.bin' >>> ncnn2int8(param, bin, table, int8_param, int8_bin) Args: param (str): The path of ncnn float model graph. bin (str): The path of ncnn float weight model weight. table (str): The path of ncnn calibration table. int8_param (str): The path of ncnn low bit model graph. int8_bin (str): The path of ncnn low bit weight model weight. """ ncnn2int8 = get_ncnn2int8_path() call([ncnn2int8, param, bin, int8_param, int8_bin, table])
899
851
<gh_stars>100-1000 /* * Copyright 2007 The WebRTC Project Authors. All rights reserved. * * Use of this source code is governed by a BSD-style license * that can be found in the LICENSE file in the root of the source * tree. An additional intellectual property rights grant can be found * in the file PATENTS. All contributing project authors may * be found in the AUTHORS file in the root of the source tree. */ // // A reuseable entry point for gunit tests. #if defined(WEBRTC_WIN) #include <crtdbg.h> #endif #include "rtc_base/flags.h" #include "rtc_base/gunit.h" #include "rtc_base/logging.h" #include "rtc_base/ssladapter.h" #include "rtc_base/sslstreamadapter.h" #include "system_wrappers/include/field_trial.h" #include "system_wrappers/include/metrics.h" #include "test/field_trial.h" #if defined(WEBRTC_WIN) #include "rtc_base/win32socketinit.h" #endif #if defined(WEBRTC_IOS) #include "test/ios/test_support.h" #endif WEBRTC_DEFINE_bool(help, false, "prints this message"); WEBRTC_DEFINE_string(log, "", "logging options to use"); WEBRTC_DEFINE_string( force_fieldtrials, "", "Field trials control experimental feature code which can be forced. " "E.g. running with --force_fieldtrials=WebRTC-FooFeature/Enable/" " will assign the group Enable to field trial WebRTC-FooFeature."); #if defined(WEBRTC_WIN) WEBRTC_DEFINE_int(crt_break_alloc, -1, "memory allocation to break on"); WEBRTC_DEFINE_bool( default_error_handlers, false, "leave the default exception/dbg handler functions in place"); void TestInvalidParameterHandler(const wchar_t* expression, const wchar_t* function, const wchar_t* file, unsigned int line, uintptr_t pReserved) { // In order to log `expression`, `function`, and `file` here, we would have // to convert them to const char*. std::wcsrtombs can do that, but it's // locale dependent. RTC_LOG(LS_ERROR) << "InvalidParameter Handler called. Exiting."; exit(1); } void TestPureCallHandler() { RTC_LOG(LS_ERROR) << "Purecall Handler called. Exiting."; exit(1); } int TestCrtReportHandler(int report_type, char* msg, int* retval) { RTC_LOG(LS_ERROR) << "CrtReport Handler called..."; RTC_LOG(LS_ERROR) << msg; if (report_type == _CRT_ASSERT) { exit(1); } else { *retval = 0; return TRUE; } } #endif // WEBRTC_WIN int main(int argc, char* argv[]) { testing::InitGoogleTest(&argc, argv); rtc::FlagList::SetFlagsFromCommandLine(&argc, argv, false); if (FLAG_help) { rtc::FlagList::Print(nullptr, false); return 0; } webrtc::test::ValidateFieldTrialsStringOrDie(FLAG_force_fieldtrials); // InitFieldTrialsFromString stores the char*, so the char array must outlive // the application. webrtc::field_trial::InitFieldTrialsFromString(FLAG_force_fieldtrials); webrtc::metrics::Enable(); #if defined(WEBRTC_WIN) rtc::WinsockInitializer winsock_init; if (!FLAG_default_error_handlers) { // Make sure any errors don't throw dialogs hanging the test run. _set_invalid_parameter_handler(TestInvalidParameterHandler); _set_purecall_handler(TestPureCallHandler); _CrtSetReportHook2(_CRT_RPTHOOK_INSTALL, TestCrtReportHandler); } #if !defined(NDEBUG) // Turn on memory leak checking on Windows. _CrtSetDbgFlag(_CRTDBG_ALLOC_MEM_DF | _CRTDBG_LEAK_CHECK_DF); if (FLAG_crt_break_alloc >= 0) { _crtBreakAlloc = FLAG_crt_break_alloc; } #endif #endif // WEBRTC_WIN // By default, log timestamps. Allow overrides by used of a --log flag. rtc::LogMessage::LogTimestamps(); if (*FLAG_log != '\0') { rtc::LogMessage::ConfigureLogging(FLAG_log); } else if (rtc::LogMessage::GetLogToDebug() > rtc::LS_INFO) { // Default to LS_INFO, even for release builds to provide better test // logging. rtc::LogMessage::LogToDebug(rtc::LS_INFO); } // Initialize SSL which are used by several tests. rtc::InitializeSSL(); rtc::SSLStreamAdapter::enable_time_callback_for_testing(); #if defined(WEBRTC_IOS) rtc::test::InitTestSuite(RUN_ALL_TESTS, argc, argv, false); rtc::test::RunTestsFromIOSApp(); #endif const int res = RUN_ALL_TESTS(); rtc::CleanupSSL(); // clean up logging so we don't appear to leak memory. rtc::LogMessage::ConfigureLogging(""); #if defined(WEBRTC_WIN) // Unhook crt function so that we don't ever log after statics have been // uninitialized. if (!FLAG_default_error_handlers) _CrtSetReportHook2(_CRT_RPTHOOK_REMOVE, TestCrtReportHandler); #endif #if defined(ADDRESS_SANITIZER) || defined(LEAK_SANITIZER) || \ defined(MEMORY_SANITIZER) || defined(THREAD_SANITIZER) || \ defined(UNDEFINED_SANITIZER) // We want the test flagged as failed only for sanitizer defects, // in which case the sanitizer will override exit code with 66. return 0; #endif return res; }
1,877
313
package com.daivd.chart.component; import android.graphics.Canvas; import android.graphics.Paint; import android.graphics.Path; import android.graphics.Rect; import com.daivd.chart.component.base.IChartTitle; import com.daivd.chart.component.base.PercentComponent; import com.daivd.chart.data.style.FontStyle; /** * 绘制标题 * @author huangyanbin */ public class ChartTitle extends PercentComponent<String> implements IChartTitle { /** * 图表标题最大占比 */ private static final float MAX_PERCENT =0.4f; /** * 标题字体样式 */ private FontStyle fontStyle= new FontStyle(); private Path path = new Path(); /** * 设置标题占比 * @param percent 百分比 */ @Override public void setPercent(float percent) { if(percent > MAX_PERCENT){ percent = MAX_PERCENT; } super.setPercent(percent); } /** * 绘制标题 * <p>通过设置标题方位绘制标题</p> * @param canvas 画布 * @param chartName 图表标题 * @param paint 画笔 */ @Override public void draw(Canvas canvas, String chartName, Paint paint) { fontStyle.fillPaint(paint); Paint.FontMetrics fontMetrics = paint.getFontMetrics(); paint.setTextAlign(Paint.Align.LEFT); float textHeight = fontMetrics.descent - fontMetrics.ascent; int textWidth = (int)paint.measureText(chartName); Rect rect = getRect(); int startY = rect.centerY(); int startX = rect.centerX(); path.rewind(); switch (direction) { case TOP: case BOTTOM: startY-= textHeight/2; startX -=textWidth/2; canvas.drawText(chartName, startX, startY, paint); break; case LEFT: case RIGHT: path.moveTo(startX,rect.top); path.lineTo(startX,rect.bottom); canvas.drawTextOnPath(chartName,path,(rect.height()-textWidth)/2,0,paint); break; } } /** * 获取标题字体样式 * @return 标题字体样式 */ public FontStyle getFontStyle() { return fontStyle; } /** * 设置标题字体样式 * @param fontStyle 标题字体样式 */ public void setFontStyle(FontStyle fontStyle) { this.fontStyle = fontStyle; } }
1,203
868
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.activemq.artemis.tests.integration.openwire.amq; import javax.jms.Connection; import javax.jms.ConnectionFactory; import javax.jms.MessageConsumer; import javax.jms.MessageProducer; import javax.jms.Queue; import javax.jms.Session; import javax.jms.TextMessage; import org.apache.activemq.ActiveMQConnectionFactory; import org.apache.activemq.artemis.tests.integration.openwire.BasicOpenWireTest; import org.junit.Assert; import org.junit.Test; public class ReconnectFailoverTest extends BasicOpenWireTest { @Test public void testReconnectOnFailoverWithClientID() throws Exception { ConnectionFactory failoverFactory = new ActiveMQConnectionFactory("failover:(tcp://localhost:61616)"); Connection connection = failoverFactory.createConnection(); try { connection.setClientID("foo"); Session session = connection.createSession(false, Session.AUTO_ACKNOWLEDGE); server.getRemotingService().getConnections().forEach(c -> c.getTransportConnection().forceClose()); Queue tempQueue = session.createTemporaryQueue(); MessageProducer producer = session.createProducer(tempQueue); for (int i = 0; i < 10; i++) { producer.send(session.createTextMessage("hello")); } connection.start(); MessageConsumer consumer = session.createConsumer(tempQueue); for (int i = 0; i < 10; i++) { TextMessage message = (TextMessage) consumer.receive(1000); Assert.assertNotNull(message); Assert.assertEquals("hello", message.getText()); } } finally { connection.close(); } } }
811
511
<reponame>SenthilKumarGS/TizenRT /* Copyright 2015-present Samsung Electronics Co., Ltd. and other contributors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /**************************************************************************** * Copyright (C) 2013 <NAME>. All rights reserved. * Author: <NAME> <<EMAIL>> * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * 3. Neither the name NuttX nor the names of its contributors may be * used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * ****************************************************************************/ /**************************************************************************** * Included Files ****************************************************************************/ #include <apps/shell/tash.h> #include <tinyara/arch.h> #include <tinyara/config.h> #include <setjmp.h> #include <stdio.h> #define USE_IOTJS_THREAD 1 /** * Compiler built-in setjmp function. * * @return 0 when called the first time * 1 when returns from a longjmp call */ int setjmp(jmp_buf buf) { return __builtin_setjmp(buf); } /* setjmp */ /** * Compiler built-in longjmp function. * * Note: * ignores value argument */ void longjmp(jmp_buf buf, int value) { /* Must be called with 1. */ __builtin_longjmp(buf, 1); } /* longjmp */ int iotjs_entry(int argc, char **argv); int tuv_cleanup(void); #if USE_IOTJS_THREAD struct iotjs_thread_arg { int argc; char **argv; }; pthread_addr_t iotjs_thread(void *thread_arg) { struct iotjs_thread_arg *arg = thread_arg; int ret = 0; ret = iotjs_entry(arg->argc, arg->argv); tuv_cleanup(); sleep(1); printf("iotjs thread end\n"); return NULL; } int iotjs(int argc, char *argv[]) { pthread_attr_t attr; int status; struct sched_param sparam; pthread_t tid; struct iotjs_thread_arg arg; status = pthread_attr_init(&attr); if (status != 0) { printf("fail to initialize iotjs thread\n"); return -1; } sparam.sched_priority = CONFIG_IOTJS_PRIORITY; status = pthread_attr_setschedparam(&attr, &sparam); status = pthread_attr_setschedpolicy(&attr, SCHED_RR); status = pthread_attr_setstacksize(&attr, CONFIG_IOTJS_STACKSIZE); arg.argc = argc; arg.argv = argv; status = pthread_create(&tid, &attr, iotjs_thread, &arg); if (status < 0) { printf("fail to start iotjs thread\n"); return -1; } pthread_setname_np(tid, "iotjs_thread"); pthread_join(tid, NULL); return 0; } #else static int iotjs(int argc, char *argv[]) { int ret = 0; ret = iotjs_entry(argc, argv); tuv_cleanup(); return ret; } #endif int iotjs_register_cmds(void) { tash_cmd_install("iotjs", iotjs, TASH_EXECMD_SYNC); return 0; } #ifdef CONFIG_BUILD_KERNEL int main(int argc, FAR char *argv[]) #else int iotjs_main(int argc, char *argv[]) #endif { return iotjs_register_cmds(); }
1,539
474
package org.javacord.api.listener.channel.server; import org.javacord.api.event.channel.server.ServerChannelChangeNsfwFlagEvent; import org.javacord.api.listener.GloballyAttachableListener; import org.javacord.api.listener.ObjectAttachableListener; import org.javacord.api.listener.channel.server.text.ServerTextChannelAttachableListener; import org.javacord.api.listener.server.ServerAttachableListener; /** * This listener listens to server channel nsfw flag changes. */ @FunctionalInterface public interface ServerChannelChangeNsfwFlagListener extends ServerAttachableListener, ServerTextChannelAttachableListener, ChannelCategoryAttachableListener, GloballyAttachableListener, ObjectAttachableListener { /** * This method is called every time a server channel's nsfw flag changes. * * @param event The event. */ void onServerChannelChangeNsfwFlag(ServerChannelChangeNsfwFlagEvent event); }
360
711
package com.java110.community.api; import com.alibaba.fastjson.JSONObject; import com.java110.community.bmo.repair.IAppraiseRepairBMO; import com.java110.dto.appraise.AppraiseDto; import com.java110.utils.util.BeanConvertUtil; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.http.ResponseEntity; import org.springframework.web.bind.annotation.RequestBody; import org.springframework.web.bind.annotation.RequestMapping; import org.springframework.web.bind.annotation.RequestMethod; import org.springframework.web.bind.annotation.RestController; /** * 报修 控制类 */ @RestController @RequestMapping("/repair") public class RepairApi { @Autowired private IAppraiseRepairBMO appraiseRepairBMOImpl; /** * 报修评价 * * @param reqJson * @return */ @RequestMapping(value = "/appraiseRepair", method = RequestMethod.POST) public ResponseEntity<String> appraiseRepair(@RequestBody JSONObject reqJson) { AppraiseDto appraiseDto = BeanConvertUtil.covertBean(reqJson,AppraiseDto.class); return appraiseRepairBMOImpl.appraiseRepair(appraiseDto); } }
424
435
<filename>montreal-python/videos/python-et-postgresql-un-mariage-merveilleux-mp54.json<gh_stars>100-1000 { "copyright_text": "Creative Commons Attribution license (reuse allowed)", "description": "Montr\u00e9al, 14 septembre 2014 - Python et PostgreSQL sont deux outils que nous aimons bien utiliser pour nos projets, mais savons-nous vraiment tout d'eux? Cette pr\u00e9sentation de St\u00e9phane Wirtel donne un bon aper\u00e7u de psycopg2, Peewee, SQLAlchemy, Alembic et PL/Python, ces librairies pouvant \u00eatre utilis\u00e9es avec PostgreSQL.\n\n* \u00c0 propos de St\u00e9phane: http://sflx.ca/swirtel\n* Diapositives: https://goo.gl/UsPLwh\n\n\n----------------------------------------\n\nLe myst\u00e8re des tests unitaires #MP54\n\nMontr\u00e9al, 14 septembre 2014 - M\u00e9lanie Dubois conte une tranche de vie sur les tests \u00e0 l'Office National du Film du Canada (ONF). Pendant la pr\u00e9sentation, elle pr\u00e9sente quelques trucs qui ont permis d'acc\u00e9l\u00e9rer la performance de ces tests myst\u00e9rieux, le tout accompagn\u00e9 d'exemples concrets.\n\n* \u00c0 propos de M\u00e9lanie: http://sflx.ca/mdubois\n* \u00c0 propos de l'ONF: https://www.onf.ca/\n* \u00c0 propos des tests unitaires: http://sflx.ca/testunitaire\n\nEnregistr\u00e9 et \u00e9dit\u00e9 en direct par @ChristianAubry \u00e0 Montr\u00e9al Python 54 (MP54: Utopie virtualis\u00e9e) avec un bon gros coup de main de Savoir-faire Linux(http://sflx.ca/carrieres).\n\n* \u00c0 propos de MP54: http://sflx.ca/MP54", "duration": 1676, "language": "fra", "recorded": "2015-09-14", "related_urls": [ { "label": "group web", "url": "https://montrealpython.org" }, { "label": "MP54", "url": "http://montrealpython.org/fr/2015/09/mp54/" }, { "label": "ONF", "url": "https://www.onf.ca/" }, { "label": "slides", "url": "https://goo.gl/UsPLwh" }, { "label": "tests unitaires", "url": "http://sflx.ca/testunitaire" }, { "label": "St\u00e9phane Wirtel", "url": "http://sflx.ca/swirtel" }, { "label": "Savoir-faire Linux", "url": "http://sflx.ca/carrieres" }, { "label": "M\u00e9lanie Dubois", "url": "http://sflx.ca/mdubois" } ], "speakers": [ "St\u00e9<NAME>", "M\u00e9<NAME>" ], "tags": [], "thumbnail_url": "https://i.ytimg.com/vi/zVoihPQhfdc/maxresdefault.jpg", "title": "Python et PostgreSQL, un mariage merveilleux", "videos": [ { "type": "youtube", "url": "https://www.youtube.com/watch?v=zVoihPQhfdc" } ] }
1,206
631
<filename>dorado/dorado-test/dorado-test-integration/src/test/java/com/sankuai/mtthrift/testSuite/idlTest/Tweet.java /** * Autogenerated by Thrift Compiler (0.9.3) * * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING * @generated */ package com.sankuai.mtthrift.testSuite.idlTest; import org.apache.thrift.EncodingUtils; import org.apache.thrift.TException; import org.apache.thrift.protocol.TProtocolException; import org.apache.thrift.protocol.TTupleProtocol; import org.apache.thrift.scheme.IScheme; import org.apache.thrift.scheme.SchemeFactory; import org.apache.thrift.scheme.StandardScheme; import org.apache.thrift.scheme.TupleScheme; import javax.annotation.Generated; import java.util.*; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) @Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2018-10-04") public class Tweet implements org.apache.thrift.TBase<Tweet, Tweet._Fields>, java.io.Serializable, Cloneable, Comparable<Tweet> { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("Tweet"); private static final org.apache.thrift.protocol.TField USER_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("userId", org.apache.thrift.protocol.TType.I32, (short)1); private static final org.apache.thrift.protocol.TField USER_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("userName", org.apache.thrift.protocol.TType.STRING, (short)2); private static final org.apache.thrift.protocol.TField TEXT_FIELD_DESC = new org.apache.thrift.protocol.TField("text", org.apache.thrift.protocol.TType.STRING, (short)3); private static final org.apache.thrift.protocol.TField LOC_FIELD_DESC = new org.apache.thrift.protocol.TField("loc", org.apache.thrift.protocol.TType.STRUCT, (short)4); private static final org.apache.thrift.protocol.TField TWEET_TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("tweetType", org.apache.thrift.protocol.TType.I32, (short)5); private static final org.apache.thrift.protocol.TField AGE_FIELD_DESC = new org.apache.thrift.protocol.TField("age", org.apache.thrift.protocol.TType.I32, (short)16); private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>(); static { schemes.put(StandardScheme.class, new TweetStandardSchemeFactory()); schemes.put(TupleScheme.class, new TweetTupleSchemeFactory()); } public int userId; // required public String userName; // required public String text; // required public Location loc; // optional /** * * @see TweetType */ public TweetType tweetType; // optional public int age; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { USER_ID((short)1, "userId"), USER_NAME((short)2, "userName"), TEXT((short)3, "text"), LOC((short)4, "loc"), /** * * @see TweetType */ TWEET_TYPE((short)5, "tweetType"), AGE((short)16, "age"); private static final Map<String, _Fields> byName = new HashMap<String, _Fields>(); static { for (_Fields field : EnumSet.allOf(_Fields.class)) { byName.put(field.getFieldName(), field); } } /** * Find the _Fields constant that matches fieldId, or null if its not found. */ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { case 1: // USER_ID return USER_ID; case 2: // USER_NAME return USER_NAME; case 3: // TEXT return TEXT; case 4: // LOC return LOC; case 5: // TWEET_TYPE return TWEET_TYPE; case 16: // AGE return AGE; default: return null; } } /** * Find the _Fields constant that matches fieldId, throwing an exception * if it is not found. */ public static _Fields findByThriftIdOrThrow(int fieldId) { _Fields fields = findByThriftId(fieldId); if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); return fields; } /** * Find the _Fields constant that matches name, or null if its not found. */ public static _Fields findByName(String name) { return byName.get(name); } private final short _thriftId; private final String _fieldName; _Fields(short thriftId, String fieldName) { _thriftId = thriftId; _fieldName = fieldName; } public short getThriftFieldId() { return _thriftId; } public String getFieldName() { return _fieldName; } } // isset id assignments private static final int __USERID_ISSET_ID = 0; private static final int __AGE_ISSET_ID = 1; private byte __isset_bitfield = 0; private static final _Fields optionals[] = {_Fields.LOC, _Fields.TWEET_TYPE, _Fields.AGE}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.USER_ID, new org.apache.thrift.meta_data.FieldMetaData("userId", org.apache.thrift.TFieldRequirementType.REQUIRED, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32))); tmpMap.put(_Fields.USER_NAME, new org.apache.thrift.meta_data.FieldMetaData("userName", org.apache.thrift.TFieldRequirementType.REQUIRED, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.TEXT, new org.apache.thrift.meta_data.FieldMetaData("text", org.apache.thrift.TFieldRequirementType.REQUIRED, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.LOC, new org.apache.thrift.meta_data.FieldMetaData("loc", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Location.class))); tmpMap.put(_Fields.TWEET_TYPE, new org.apache.thrift.meta_data.FieldMetaData("tweetType", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, TweetType.class))); tmpMap.put(_Fields.AGE, new org.apache.thrift.meta_data.FieldMetaData("age", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(Tweet.class, metaDataMap); } public Tweet() { this.tweetType = TweetType.TWEET; this.age = 18; } public Tweet( int userId, String userName, String text) { this(); this.userId = userId; setUserIdIsSet(true); this.userName = userName; this.text = text; } /** * Performs a deep copy on <i>other</i>. */ public Tweet(Tweet other) { __isset_bitfield = other.__isset_bitfield; this.userId = other.userId; if (other.isSetUserName()) { this.userName = other.userName; } if (other.isSetText()) { this.text = other.text; } if (other.isSetLoc()) { this.loc = new Location(other.loc); } if (other.isSetTweetType()) { this.tweetType = other.tweetType; } this.age = other.age; } public Tweet deepCopy() { return new Tweet(this); } @Override public void clear() { setUserIdIsSet(false); this.userId = 0; this.userName = null; this.text = null; this.loc = null; this.tweetType = TweetType.TWEET; this.age = 18; } public int getUserId() { return this.userId; } public Tweet setUserId(int userId) { this.userId = userId; setUserIdIsSet(true); return this; } public void unsetUserId() { __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __USERID_ISSET_ID); } /** Returns true if field userId is set (has been assigned a value) and false otherwise */ public boolean isSetUserId() { return EncodingUtils.testBit(__isset_bitfield, __USERID_ISSET_ID); } public void setUserIdIsSet(boolean value) { __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __USERID_ISSET_ID, value); } public String getUserName() { return this.userName; } public Tweet setUserName(String userName) { this.userName = userName; return this; } public void unsetUserName() { this.userName = null; } /** Returns true if field userName is set (has been assigned a value) and false otherwise */ public boolean isSetUserName() { return this.userName != null; } public void setUserNameIsSet(boolean value) { if (!value) { this.userName = null; } } public String getText() { return this.text; } public Tweet setText(String text) { this.text = text; return this; } public void unsetText() { this.text = null; } /** Returns true if field text is set (has been assigned a value) and false otherwise */ public boolean isSetText() { return this.text != null; } public void setTextIsSet(boolean value) { if (!value) { this.text = null; } } public Location getLoc() { return this.loc; } public Tweet setLoc(Location loc) { this.loc = loc; return this; } public void unsetLoc() { this.loc = null; } /** Returns true if field loc is set (has been assigned a value) and false otherwise */ public boolean isSetLoc() { return this.loc != null; } public void setLocIsSet(boolean value) { if (!value) { this.loc = null; } } /** * * @see TweetType */ public TweetType getTweetType() { return this.tweetType; } /** * * @see TweetType */ public Tweet setTweetType(TweetType tweetType) { this.tweetType = tweetType; return this; } public void unsetTweetType() { this.tweetType = null; } /** Returns true if field tweetType is set (has been assigned a value) and false otherwise */ public boolean isSetTweetType() { return this.tweetType != null; } public void setTweetTypeIsSet(boolean value) { if (!value) { this.tweetType = null; } } public int getAge() { return this.age; } public Tweet setAge(int age) { this.age = age; setAgeIsSet(true); return this; } public void unsetAge() { __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __AGE_ISSET_ID); } /** Returns true if field age is set (has been assigned a value) and false otherwise */ public boolean isSetAge() { return EncodingUtils.testBit(__isset_bitfield, __AGE_ISSET_ID); } public void setAgeIsSet(boolean value) { __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __AGE_ISSET_ID, value); } public void setFieldValue(_Fields field, Object value) { switch (field) { case USER_ID: if (value == null) { unsetUserId(); } else { setUserId((Integer)value); } break; case USER_NAME: if (value == null) { unsetUserName(); } else { setUserName((String)value); } break; case TEXT: if (value == null) { unsetText(); } else { setText((String)value); } break; case LOC: if (value == null) { unsetLoc(); } else { setLoc((Location)value); } break; case TWEET_TYPE: if (value == null) { unsetTweetType(); } else { setTweetType((TweetType)value); } break; case AGE: if (value == null) { unsetAge(); } else { setAge((Integer)value); } break; } } public Object getFieldValue(_Fields field) { switch (field) { case USER_ID: return getUserId(); case USER_NAME: return getUserName(); case TEXT: return getText(); case LOC: return getLoc(); case TWEET_TYPE: return getTweetType(); case AGE: return getAge(); } throw new IllegalStateException(); } /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ public boolean isSet(_Fields field) { if (field == null) { throw new IllegalArgumentException(); } switch (field) { case USER_ID: return isSetUserId(); case USER_NAME: return isSetUserName(); case TEXT: return isSetText(); case LOC: return isSetLoc(); case TWEET_TYPE: return isSetTweetType(); case AGE: return isSetAge(); } throw new IllegalStateException(); } @Override public boolean equals(Object that) { if (that == null) return false; if (that instanceof Tweet) return this.equals((Tweet)that); return false; } public boolean equals(Tweet that) { if (that == null) return false; boolean this_present_userId = true; boolean that_present_userId = true; if (this_present_userId || that_present_userId) { if (!(this_present_userId && that_present_userId)) return false; if (this.userId != that.userId) return false; } boolean this_present_userName = true && this.isSetUserName(); boolean that_present_userName = true && that.isSetUserName(); if (this_present_userName || that_present_userName) { if (!(this_present_userName && that_present_userName)) return false; if (!this.userName.equals(that.userName)) return false; } boolean this_present_text = true && this.isSetText(); boolean that_present_text = true && that.isSetText(); if (this_present_text || that_present_text) { if (!(this_present_text && that_present_text)) return false; if (!this.text.equals(that.text)) return false; } boolean this_present_loc = true && this.isSetLoc(); boolean that_present_loc = true && that.isSetLoc(); if (this_present_loc || that_present_loc) { if (!(this_present_loc && that_present_loc)) return false; if (!this.loc.equals(that.loc)) return false; } boolean this_present_tweetType = true && this.isSetTweetType(); boolean that_present_tweetType = true && that.isSetTweetType(); if (this_present_tweetType || that_present_tweetType) { if (!(this_present_tweetType && that_present_tweetType)) return false; if (!this.tweetType.equals(that.tweetType)) return false; } boolean this_present_age = true && this.isSetAge(); boolean that_present_age = true && that.isSetAge(); if (this_present_age || that_present_age) { if (!(this_present_age && that_present_age)) return false; if (this.age != that.age) return false; } return true; } @Override public int hashCode() { List<Object> list = new ArrayList<Object>(); boolean present_userId = true; list.add(present_userId); if (present_userId) list.add(userId); boolean present_userName = true && (isSetUserName()); list.add(present_userName); if (present_userName) list.add(userName); boolean present_text = true && (isSetText()); list.add(present_text); if (present_text) list.add(text); boolean present_loc = true && (isSetLoc()); list.add(present_loc); if (present_loc) list.add(loc); boolean present_tweetType = true && (isSetTweetType()); list.add(present_tweetType); if (present_tweetType) list.add(tweetType.getValue()); boolean present_age = true && (isSetAge()); list.add(present_age); if (present_age) list.add(age); return list.hashCode(); } @Override public int compareTo(Tweet other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; lastComparison = Boolean.valueOf(isSetUserId()).compareTo(other.isSetUserId()); if (lastComparison != 0) { return lastComparison; } if (isSetUserId()) { lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.userId, other.userId); if (lastComparison != 0) { return lastComparison; } } lastComparison = Boolean.valueOf(isSetUserName()).compareTo(other.isSetUserName()); if (lastComparison != 0) { return lastComparison; } if (isSetUserName()) { lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.userName, other.userName); if (lastComparison != 0) { return lastComparison; } } lastComparison = Boolean.valueOf(isSetText()).compareTo(other.isSetText()); if (lastComparison != 0) { return lastComparison; } if (isSetText()) { lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.text, other.text); if (lastComparison != 0) { return lastComparison; } } lastComparison = Boolean.valueOf(isSetLoc()).compareTo(other.isSetLoc()); if (lastComparison != 0) { return lastComparison; } if (isSetLoc()) { lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.loc, other.loc); if (lastComparison != 0) { return lastComparison; } } lastComparison = Boolean.valueOf(isSetTweetType()).compareTo(other.isSetTweetType()); if (lastComparison != 0) { return lastComparison; } if (isSetTweetType()) { lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tweetType, other.tweetType); if (lastComparison != 0) { return lastComparison; } } lastComparison = Boolean.valueOf(isSetAge()).compareTo(other.isSetAge()); if (lastComparison != 0) { return lastComparison; } if (isSetAge()) { lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.age, other.age); if (lastComparison != 0) { return lastComparison; } } return 0; } public _Fields fieldForId(int fieldId) { return _Fields.findByThriftId(fieldId); } public void read(org.apache.thrift.protocol.TProtocol iprot) throws TException { schemes.get(iprot.getScheme()).getScheme().read(iprot, this); } public void write(org.apache.thrift.protocol.TProtocol oprot) throws TException { schemes.get(oprot.getScheme()).getScheme().write(oprot, this); } @Override public String toString() { StringBuilder sb = new StringBuilder("Tweet("); boolean first = true; sb.append("userId:"); sb.append(this.userId); first = false; if (!first) sb.append(", "); sb.append("userName:"); if (this.userName == null) { sb.append("null"); } else { sb.append(this.userName); } first = false; if (!first) sb.append(", "); sb.append("text:"); if (this.text == null) { sb.append("null"); } else { sb.append(this.text); } first = false; if (isSetLoc()) { if (!first) sb.append(", "); sb.append("loc:"); if (this.loc == null) { sb.append("null"); } else { sb.append(this.loc); } first = false; } if (isSetTweetType()) { if (!first) sb.append(", "); sb.append("tweetType:"); if (this.tweetType == null) { sb.append("null"); } else { sb.append(this.tweetType); } first = false; } if (isSetAge()) { if (!first) sb.append(", "); sb.append("age:"); sb.append(this.age); first = false; } sb.append(")"); return sb.toString(); } public void validate() throws TException { // check for required fields // alas, we cannot check 'userId' because it's a primitive and you chose the non-beans generator. if (userName == null) { throw new TProtocolException("Required field 'userName' was not present! Struct: " + toString()); } if (text == null) { throw new TProtocolException("Required field 'text' was not present! Struct: " + toString()); } // check for sub-struct validity if (loc != null) { loc.validate(); } } private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { try { write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); } catch (TException te) { throw new java.io.IOException(te); } } private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { try { // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. __isset_bitfield = 0; read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); } catch (TException te) { throw new java.io.IOException(te); } } private static class TweetStandardSchemeFactory implements SchemeFactory { public TweetStandardScheme getScheme() { return new TweetStandardScheme(); } } private static class TweetStandardScheme extends StandardScheme<Tweet> { public void read(org.apache.thrift.protocol.TProtocol iprot, Tweet struct) throws TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) { schemeField = iprot.readFieldBegin(); if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { break; } switch (schemeField.id) { case 1: // USER_ID if (schemeField.type == org.apache.thrift.protocol.TType.I32) { struct.userId = iprot.readI32(); struct.setUserIdIsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; case 2: // USER_NAME if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { struct.userName = iprot.readString(); struct.setUserNameIsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; case 3: // TEXT if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { struct.text = iprot.readString(); struct.setTextIsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; case 4: // LOC if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { struct.loc = new Location(); struct.loc.read(iprot); struct.setLocIsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; case 5: // TWEET_TYPE if (schemeField.type == org.apache.thrift.protocol.TType.I32) { struct.tweetType = TweetType.findByValue(iprot.readI32()); struct.setTweetTypeIsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; case 16: // AGE if (schemeField.type == org.apache.thrift.protocol.TType.I32) { struct.age = iprot.readI32(); struct.setAgeIsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } iprot.readFieldEnd(); } iprot.readStructEnd(); // check for required fields of primitive type, which can't be checked in the validate method if (!struct.isSetUserId()) { throw new TProtocolException("Required field 'userId' was not found in serialized data! Struct: " + toString()); } struct.validate(); } public void write(org.apache.thrift.protocol.TProtocol oprot, Tweet struct) throws TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); oprot.writeFieldBegin(USER_ID_FIELD_DESC); oprot.writeI32(struct.userId); oprot.writeFieldEnd(); if (struct.userName != null) { oprot.writeFieldBegin(USER_NAME_FIELD_DESC); oprot.writeString(struct.userName); oprot.writeFieldEnd(); } if (struct.text != null) { oprot.writeFieldBegin(TEXT_FIELD_DESC); oprot.writeString(struct.text); oprot.writeFieldEnd(); } if (struct.loc != null) { if (struct.isSetLoc()) { oprot.writeFieldBegin(LOC_FIELD_DESC); struct.loc.write(oprot); oprot.writeFieldEnd(); } } if (struct.tweetType != null) { if (struct.isSetTweetType()) { oprot.writeFieldBegin(TWEET_TYPE_FIELD_DESC); oprot.writeI32(struct.tweetType.getValue()); oprot.writeFieldEnd(); } } if (struct.isSetAge()) { oprot.writeFieldBegin(AGE_FIELD_DESC); oprot.writeI32(struct.age); oprot.writeFieldEnd(); } oprot.writeFieldStop(); oprot.writeStructEnd(); } } private static class TweetTupleSchemeFactory implements SchemeFactory { public TweetTupleScheme getScheme() { return new TweetTupleScheme(); } } private static class TweetTupleScheme extends TupleScheme<Tweet> { @Override public void write(org.apache.thrift.protocol.TProtocol prot, Tweet struct) throws TException { TTupleProtocol oprot = (TTupleProtocol) prot; oprot.writeI32(struct.userId); oprot.writeString(struct.userName); oprot.writeString(struct.text); BitSet optionals = new BitSet(); if (struct.isSetLoc()) { optionals.set(0); } if (struct.isSetTweetType()) { optionals.set(1); } if (struct.isSetAge()) { optionals.set(2); } oprot.writeBitSet(optionals, 3); if (struct.isSetLoc()) { struct.loc.write(oprot); } if (struct.isSetTweetType()) { oprot.writeI32(struct.tweetType.getValue()); } if (struct.isSetAge()) { oprot.writeI32(struct.age); } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, Tweet struct) throws TException { TTupleProtocol iprot = (TTupleProtocol) prot; struct.userId = iprot.readI32(); struct.setUserIdIsSet(true); struct.userName = iprot.readString(); struct.setUserNameIsSet(true); struct.text = iprot.readString(); struct.setTextIsSet(true); BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { struct.loc = new Location(); struct.loc.read(iprot); struct.setLocIsSet(true); } if (incoming.get(1)) { struct.tweetType = TweetType.findByValue(iprot.readI32()); struct.setTweetTypeIsSet(true); } if (incoming.get(2)) { struct.age = iprot.readI32(); struct.setAgeIsSet(true); } } } }
11,478
348
<filename>src/test/java/com/sebastian_daschner/jaxrs_analyzer/analysis/classes/testclasses/resource/response/TestClass17.java<gh_stars>100-1000 /* * Copyright (C) 2015 <NAME>, <EMAIL> * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.sebastian_daschner.jaxrs_analyzer.analysis.classes.testclasses.resource.response; import com.sebastian_daschner.jaxrs_analyzer.model.Types; import com.sebastian_daschner.jaxrs_analyzer.model.elements.HttpResponse; import javax.ws.rs.core.Response; import java.util.Set; import static java.util.Arrays.asList; import static java.util.Collections.singleton; import static java.util.Collections.singletonList; public class TestClass17 { // test not particularly useful; only testing array type support @javax.ws.rs.GET public Response method() { String[] strings = new String[2]; strings[0] = "test"; return Response.ok(strings).build(); } public static Set<HttpResponse> getResult() { final HttpResponse result = new HttpResponse(); result.getStatuses().addAll(singletonList(200)); result.getEntityTypes().addAll(asList("[Ljava/lang/String;", Types.OBJECT)); return singleton(result); } }
579
3,190
<filename>cc/src/core/hash_bucket.h // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT license. #pragma once #include <atomic> #include <cassert> #include <cstdint> #include <thread> #include "address.h" #include "constants.h" #include "malloc_fixed_page_size.h" namespace FASTER { namespace core { static_assert(Address::kAddressBits == 48, "Address::kAddressBits != 48"); /// Entry stored in a hash bucket. Packed into 8 bytes. struct HashBucketEntry { /// Invalid value in the hash table static constexpr uint64_t kInvalidEntry = 0; HashBucketEntry() : control_{ 0 } { } HashBucketEntry(Address address, uint16_t tag, bool tentative) : address_{ address.control() } , tag_{ tag } , reserved_{ 0 } , tentative_{ tentative } { } HashBucketEntry(uint64_t code) : control_{ code } { } HashBucketEntry(const HashBucketEntry& other) : control_{ other.control_ } { } inline HashBucketEntry& operator=(const HashBucketEntry& other) { control_ = other.control_; return *this; } inline bool operator ==(const HashBucketEntry& other) const { return control_ == other.control_; } inline bool operator !=(const HashBucketEntry& other) const { return control_ != other.control_; } inline bool unused() const { return control_ == 0; } inline Address address() const { return Address{ address_ }; } inline uint16_t tag() const { return static_cast<uint16_t>(tag_); } inline bool tentative() const { return static_cast<bool>(tentative_); } inline void set_tentative(bool desired) { tentative_ = desired; } union { struct { uint64_t address_ : 48; // corresponds to logical address uint64_t tag_ : 14; uint64_t reserved_ : 1; uint64_t tentative_ : 1; }; uint64_t control_; }; }; static_assert(sizeof(HashBucketEntry) == 8, "sizeof(HashBucketEntry) != 8"); /// Atomic hash-bucket entry. class AtomicHashBucketEntry { public: AtomicHashBucketEntry(const HashBucketEntry& entry) : control_{ entry.control_ } { } /// Default constructor AtomicHashBucketEntry() : control_{ HashBucketEntry::kInvalidEntry } { } /// Atomic access. inline HashBucketEntry load() const { return HashBucketEntry{ control_.load() }; } inline void store(const HashBucketEntry& desired) { control_.store(desired.control_); } inline bool compare_exchange_strong(HashBucketEntry& expected, HashBucketEntry desired) { uint64_t expected_control = expected.control_; bool result = control_.compare_exchange_strong(expected_control, desired.control_); expected = HashBucketEntry{ expected_control }; return result; } private: /// Atomic address to the hash bucket entry. std::atomic<uint64_t> control_; }; /// Entry stored in a hash bucket that points to the next overflow bucket (if any). struct HashBucketOverflowEntry { HashBucketOverflowEntry() : control_{ 0 } { } HashBucketOverflowEntry(FixedPageAddress address) : address_{ address.control() } , unused_{ 0 } { } HashBucketOverflowEntry(const HashBucketOverflowEntry& other) : control_{ other.control_ } { } HashBucketOverflowEntry(uint64_t code) : control_{ code } { } inline HashBucketOverflowEntry& operator=(const HashBucketOverflowEntry& other) { control_ = other.control_; return *this; } inline bool operator ==(const HashBucketOverflowEntry& other) const { return control_ == other.control_; } inline bool operator !=(const HashBucketOverflowEntry& other) const { return control_ != other.control_; } inline bool unused() const { return address_ == 0; } inline FixedPageAddress address() const { return FixedPageAddress{ address_ }; } union { struct { uint64_t address_ : 48; // corresponds to logical address uint64_t unused_ : 16; }; uint64_t control_; }; }; static_assert(sizeof(HashBucketOverflowEntry) == 8, "sizeof(HashBucketOverflowEntry) != 8"); /// Atomic hash-bucket overflow entry. class AtomicHashBucketOverflowEntry { private: static constexpr uint64_t kPinIncrement = (uint64_t)1 << 48; static constexpr uint64_t kLocked = (uint64_t)1 << 63; public: AtomicHashBucketOverflowEntry(const HashBucketOverflowEntry& entry) : control_{ entry.control_ } { } /// Default constructor AtomicHashBucketOverflowEntry() : control_{ HashBucketEntry::kInvalidEntry } { } /// Atomic access. inline HashBucketOverflowEntry load() const { return HashBucketOverflowEntry{ control_.load() }; } inline void store(const HashBucketOverflowEntry& desired) { control_.store(desired.control_); } inline bool compare_exchange_strong(HashBucketOverflowEntry& expected, HashBucketOverflowEntry desired) { uint64_t expected_control = expected.control_; bool result = control_.compare_exchange_strong(expected_control, desired.control_); expected = HashBucketOverflowEntry{ expected_control }; return result; } private: /// Atomic address to the hash bucket entry. std::atomic<uint64_t> control_; }; /// A bucket consisting of 7 hash bucket entries, plus one hash bucket overflow entry. Fits in /// a cache line. struct alignas(Constants::kCacheLineBytes) HashBucket { /// Number of entries per bucket (excluding overflow entry). static constexpr uint32_t kNumEntries = 7; /// The entries. AtomicHashBucketEntry entries[kNumEntries]; /// Overflow entry points to next overflow bucket, if any. AtomicHashBucketOverflowEntry overflow_entry; }; static_assert(sizeof(HashBucket) == Constants::kCacheLineBytes, "sizeof(HashBucket) != Constants::kCacheLineBytes"); } } // namespace FASTER::core
1,977
1,045
package com.amazonaws.kinesisvideo.demoapp.activity; import android.os.Bundle; import com.google.android.material.navigation.NavigationView; import androidx.fragment.app.Fragment; import androidx.fragment.app.FragmentManager; import androidx.core.view.GravityCompat; import androidx.drawerlayout.widget.DrawerLayout; import androidx.appcompat.app.ActionBarDrawerToggle; import androidx.appcompat.app.AppCompatActivity; import androidx.appcompat.widget.Toolbar; import android.util.Log; import android.view.Menu; import android.view.MenuItem; import com.amazonaws.kinesisvideo.demoapp.R; import com.amazonaws.kinesisvideo.demoapp.fragment.StreamConfigurationFragment; import com.amazonaws.kinesisvideo.demoapp.fragment.StreamingFragment; import com.amazonaws.mobile.client.AWSMobileClient; import com.amazonaws.mobile.client.Callback; import com.amazonaws.mobile.client.UserStateDetails; public class SimpleNavActivity extends AppCompatActivity implements NavigationView.OnNavigationItemSelectedListener { public static final String TAG = SimpleNavActivity.class.getSimpleName(); @Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.activity_simple_nav); Toolbar toolbar = (Toolbar) findViewById(R.id.toolbar); setSupportActionBar(toolbar); DrawerLayout drawer = (DrawerLayout) findViewById(R.id.drawer_layout); ActionBarDrawerToggle toggle = new ActionBarDrawerToggle( this, drawer, toolbar, R.string.navigation_drawer_open, R.string.navigation_drawer_close); drawer.setDrawerListener(toggle); toggle.syncState(); NavigationView navigationView = (NavigationView) findViewById(R.id.nav_view); navigationView.setNavigationItemSelectedListener(this); this.startConfigFragment(); } @Override public void onBackPressed() { DrawerLayout drawer = (DrawerLayout) findViewById(R.id.drawer_layout); if (drawer.isDrawerOpen(GravityCompat.START)) { drawer.closeDrawer(GravityCompat.START); } else { super.onBackPressed(); } } @Override public boolean onCreateOptionsMenu(Menu menu) { // Inflate the menu; this adds items to the action bar if it is present. getMenuInflater().inflate(R.menu.simple_nav, menu); return true; } @Override public boolean onOptionsItemSelected(MenuItem item) { // Handle action bar item clicks here. The action bar will // automatically handle clicks on the Home/Up button, so long // as you specify a parent activity in AndroidManifest.xml. int id = item.getItemId(); //noinspection SimplifiableIfStatement if (id == R.id.action_settings) { return true; } return super.onOptionsItemSelected(item); } @SuppressWarnings("StatementWithEmptyBody") @Override public boolean onNavigationItemSelected(MenuItem item) { // Handle navigation view item clicks here. int id = item.getItemId(); if (id == R.id.nav_camera) { try { startConfigFragment(); } catch (Exception e) { Log.e("", "Failed to initialize streaming demo fragment."); e.printStackTrace(); } } else if (id == R.id.nav_logout) { AWSMobileClient.getInstance().signOut(); AWSMobileClient.getInstance().showSignIn(this, new Callback<UserStateDetails>() { @Override public void onResult(UserStateDetails result) { Log.d(TAG, "onResult: User sign-in " + result.getUserState()); } @Override public void onError(Exception e) { Log.e(TAG, "onError: User sign-in", e); } }); } DrawerLayout drawer = (DrawerLayout) findViewById(R.id.drawer_layout); drawer.closeDrawer(GravityCompat.START); return true; } public void startFragment(Fragment fragment) { FragmentManager fragmentManager = getSupportFragmentManager(); fragmentManager.beginTransaction().replace(R.id.content_simple, fragment).commit(); } public void startStreamingFragment(Bundle extras) { try { Fragment streamFragment = StreamingFragment.newInstance(this); streamFragment.setArguments(extras); this.startFragment(streamFragment); } catch (Exception e) { Log.e("", "Failed to start streaming fragment."); e.printStackTrace(); } } public void startConfigFragment() { try { Fragment streamFragment = StreamConfigurationFragment.newInstance(this); this.startFragment(streamFragment); } catch (Exception e) { Log.e("", "Failed to go back to configure stream."); e.printStackTrace(); } } }
2,049
326
<reponame>mandaputtra/sonicjs<gh_stars>100-1000 {"moduleSystemId":"body-text-shortcodes","systemId":"body-text-shortcodes","title":"Body Text Shortcodes","filePath":"/server/modules/body-text-shortcodes/models/body-text-shortcodes.json","data":{"components":[]}}
83
3,799
<gh_stars>1000+ /* * Copyright 2018 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package androidx.recyclerview.widget; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.verifyNoMoreInteractions; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; import org.junit.runners.JUnit4; @RunWith(JUnit4.class) public class BatchingListUpdateCallbackTest { BatchingListUpdateCallback mBatching; ListUpdateCallback mCallback; @Before public void setup() { mCallback = mock(ListUpdateCallback.class); mBatching = new BatchingListUpdateCallback(mCallback); } @Test public void addSimple() { mBatching.onInserted(3, 2); mBatching.dispatchLastEvent(); verify(mCallback).onInserted(3, 2); verifyNoMoreInteractions(mCallback); } @Test public void addToSamePos() { mBatching.onInserted(3, 2); mBatching.onInserted(3, 1); mBatching.dispatchLastEvent(); verify(mCallback).onInserted(3, 3); verifyNoMoreInteractions(mCallback); } @Test public void addInsidePrevious() { mBatching.onInserted(3, 5); mBatching.onInserted(5, 1); mBatching.dispatchLastEvent(); verify(mCallback).onInserted(3, 6); verifyNoMoreInteractions(mCallback); } @Test public void addBefore() { mBatching.onInserted(3, 5); mBatching.onInserted(2, 1); mBatching.dispatchLastEvent(); verify(mCallback).onInserted(3, 5); verify(mCallback).onInserted(2, 1); verifyNoMoreInteractions(mCallback); } @Test public void removeSimple() { mBatching.onRemoved(3, 2); mBatching.dispatchLastEvent(); verify(mCallback).onRemoved(3, 2); verifyNoMoreInteractions(mCallback); } @Test public void removeSamePosition() { mBatching.onRemoved(3, 2); mBatching.onRemoved(3, 1); mBatching.dispatchLastEvent(); verify(mCallback).onRemoved(3, 3); verifyNoMoreInteractions(mCallback); } @Test public void removeInside() { mBatching.onRemoved(3, 5); mBatching.onRemoved(4, 2); mBatching.dispatchLastEvent(); verify(mCallback).onRemoved(3, 5); verify(mCallback).onRemoved(4, 2); verifyNoMoreInteractions(mCallback); } @Test public void removeBefore() { mBatching.onRemoved(3, 2); mBatching.onRemoved(2, 1); mBatching.dispatchLastEvent(); verify(mCallback).onRemoved(2, 3); verifyNoMoreInteractions(mCallback); } @Test public void removeBefore2() { mBatching.onRemoved(3, 2); mBatching.onRemoved(2, 4); mBatching.dispatchLastEvent(); verify(mCallback).onRemoved(2, 6); verifyNoMoreInteractions(mCallback); } @Test public void removeBefore3() { mBatching.onRemoved(3, 2); mBatching.onRemoved(1, 1); mBatching.dispatchLastEvent(); verify(mCallback).onRemoved(3, 2); verify(mCallback).onRemoved(1, 1); verifyNoMoreInteractions(mCallback); } @Test public void moveSimple() { mBatching.onMoved(3, 2); mBatching.dispatchLastEvent(); verify(mCallback).onMoved(3, 2); verifyNoMoreInteractions(mCallback); } @Test public void moveTwice() { mBatching.onMoved(3, 2); mBatching.onMoved(5, 6); mBatching.dispatchLastEvent(); verify(mCallback).onMoved(3, 2); verify(mCallback).onMoved(5, 6); verifyNoMoreInteractions(mCallback); } @Test public void changeSimple() { mBatching.onChanged(3, 2, null); mBatching.dispatchLastEvent(); verify(mCallback).onChanged(3, 2, null); verifyNoMoreInteractions(mCallback); } @Test public void changeConsecutive() { mBatching.onChanged(3, 2, null); mBatching.onChanged(5, 2, null); mBatching.dispatchLastEvent(); verify(mCallback).onChanged(3, 4, null); verifyNoMoreInteractions(mCallback); } @Test public void changeTheSame() { mBatching.onChanged(3, 2, null); mBatching.onChanged(4, 2, null); mBatching.dispatchLastEvent(); verify(mCallback).onChanged(3, 3, null); verifyNoMoreInteractions(mCallback); } @Test public void changeTheSame2() { mBatching.onChanged(3, 2, null); mBatching.onChanged(3, 2, null); mBatching.dispatchLastEvent(); verify(mCallback).onChanged(3, 2, null); verifyNoMoreInteractions(mCallback); } @Test public void changeBefore() { mBatching.onChanged(3, 2, null); mBatching.onChanged(2, 1, null); mBatching.dispatchLastEvent(); verify(mCallback).onChanged(2, 3, null); verifyNoMoreInteractions(mCallback); } @Test public void changeBeforeOverlap() { mBatching.onChanged(3, 2, null); mBatching.onChanged(2, 2, null); mBatching.dispatchLastEvent(); verify(mCallback).onChanged(2, 3, null); verifyNoMoreInteractions(mCallback); } @Test public void changeSimpleWithPayload() { Object payload = new Object(); mBatching.onChanged(3, 2, payload); mBatching.dispatchLastEvent(); verify(mCallback).onChanged(3, 2, payload); } @Test public void changeConsecutiveWithPayload() { Object payload = new Object(); mBatching.onChanged(3, 2, payload); mBatching.onChanged(5, 2, payload); mBatching.dispatchLastEvent(); verify(mCallback).onChanged(3, 4, payload); verifyNoMoreInteractions(mCallback); } @Test public void changeTheSameWithPayload() { Object payload = new Object(); mBatching.onChanged(3, 2, payload); mBatching.onChanged(4, 2, payload); mBatching.dispatchLastEvent(); verify(mCallback).onChanged(3, 3, payload); verifyNoMoreInteractions(mCallback); } @Test public void changeTheSame2WithPayload() { Object payload = new Object(); mBatching.onChanged(3, 2, payload); mBatching.onChanged(3, 2, payload); mBatching.dispatchLastEvent(); verify(mCallback).onChanged(3, 2, payload); verifyNoMoreInteractions(mCallback); } @Test public void changeBeforeWithPayload() { Object payload = new Object(); mBatching.onChanged(3, 2, payload); mBatching.onChanged(2, 1, payload); mBatching.dispatchLastEvent(); verify(mCallback).onChanged(2, 3, payload); verifyNoMoreInteractions(mCallback); } @Test public void changeBeforeOverlapWithPayload() { Object payload = new Object(); mBatching.onChanged(3, 2, payload); mBatching.onChanged(2, 2, payload); mBatching.dispatchLastEvent(); verify(mCallback).onChanged(2, 3, payload); verifyNoMoreInteractions(mCallback); } @Test public void changeWithNewPayload() { Object payload1 = new Object(); Object payload2 = new Object(); mBatching.onChanged(3, 2, payload1); mBatching.onChanged(2, 2, payload2); mBatching.dispatchLastEvent(); verify(mCallback).onChanged(3, 2, payload1); verify(mCallback).onChanged(2, 2, payload2); verifyNoMoreInteractions(mCallback); } @Test public void changeWithEmptyPayload() { Object payload = new Object(); mBatching.onChanged(3, 2, payload); mBatching.onChanged(2, 2, null); mBatching.dispatchLastEvent(); verify(mCallback).onChanged(3, 2, payload); verify(mCallback).onChanged(2, 2, null); verifyNoMoreInteractions(mCallback); } @Test public void changeWithEmptyPayload2() { Object payload = new Object(); mBatching.onChanged(3, 2, null); mBatching.onChanged(2, 2, payload); mBatching.dispatchLastEvent(); verify(mCallback).onChanged(3, 2, null); verify(mCallback).onChanged(2, 2, payload); verifyNoMoreInteractions(mCallback); } }
3,765
1,351
{ "android":{ "version":"1.1.1", "url":"http://download.flutterchina.club/gitme.1.1.1.fix1.apk", "storeUrl":"http://download.flutterchina.club/gitme.1.1.1.fix1.apk", "fix":1, "force":false }, "ios":{ "version":"1.1.1", "url":"itms-apps://itunes.apple.com/app/id1411822165", "force":false }, "description":"1. 性能优化,优化了首页Tab切换性能。\n2. 动态里的事件类型支持PR评论事件。\n3. Markdown文档页添加查看源码菜单。\n4. 支持Trending。\n5. 修复了一些bug." }
313
918
<filename>tether/boost/geometry/algorithms/detail/buffer/turn_in_piece_visitor.hpp // Boost.Geometry (aka GGL, Generic Geometry Library) // Copyright (c) 2012-2014 <NAME>, Amsterdam, the Netherlands. // Copyright (c) 2017 <NAME>, Lodz, Poland. // This file was modified by Oracle on 2016. // Modifications copyright (c) 2016 Oracle and/or its affiliates. // Contributed and/or modified by <NAME>, on behalf of Oracle // Use, modification and distribution is subject to the Boost Software License, // Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) #ifndef BOOST_GEOMETRY_ALGORITHMS_DETAIL_BUFFER_TURN_IN_PIECE_VISITOR #define BOOST_GEOMETRY_ALGORITHMS_DETAIL_BUFFER_TURN_IN_PIECE_VISITOR #include <boost/core/ignore_unused.hpp> #include <boost/range.hpp> #include <boost/geometry/core/assert.hpp> #include <boost/geometry/arithmetic/dot_product.hpp> #include <boost/geometry/algorithms/assign.hpp> #include <boost/geometry/algorithms/comparable_distance.hpp> #include <boost/geometry/algorithms/equals.hpp> #include <boost/geometry/algorithms/expand.hpp> #include <boost/geometry/algorithms/detail/disjoint/point_box.hpp> #include <boost/geometry/algorithms/detail/disjoint/box_box.hpp> #include <boost/geometry/algorithms/detail/overlay/segment_identifier.hpp> #include <boost/geometry/algorithms/detail/overlay/get_turn_info.hpp> #include <boost/geometry/policies/compare.hpp> #include <boost/geometry/strategies/buffer.hpp> #include <boost/geometry/algorithms/detail/buffer/buffer_policies.hpp> #if defined(BOOST_GEOMETRY_BUFFER_USE_SIDE_OF_INTERSECTION) #include <boost/geometry/strategies/cartesian/side_of_intersection.hpp> #endif namespace boost { namespace geometry { #ifndef DOXYGEN_NO_DETAIL namespace detail { namespace buffer { struct piece_get_box { template <typename Box, typename Piece> static inline void apply(Box& total, Piece const& piece) { geometry::expand(total, piece.robust_envelope); } }; struct piece_ovelaps_box { template <typename Box, typename Piece> static inline bool apply(Box const& box, Piece const& piece) { if (piece.type == strategy::buffer::buffered_flat_end || piece.type == strategy::buffer::buffered_concave) { // Turns cannot be inside a flat end (though they can be on border) // Neither we need to check if they are inside concave helper pieces // Skip all pieces not used as soon as possible return false; } return ! geometry::detail::disjoint::disjoint_box_box(box, piece.robust_envelope); } }; struct turn_get_box { template <typename Box, typename Turn> static inline void apply(Box& total, Turn const& turn) { geometry::expand(total, turn.robust_point); } }; struct turn_ovelaps_box { template <typename Box, typename Turn> static inline bool apply(Box const& box, Turn const& turn) { return ! geometry::detail::disjoint::disjoint_point_box(turn.robust_point, box); } }; enum analyse_result { analyse_unknown, analyse_continue, analyse_disjoint, analyse_within, analyse_on_original_boundary, analyse_on_offsetted #if ! defined(BOOST_GEOMETRY_BUFFER_USE_SIDE_OF_INTERSECTION) , analyse_near_offsetted #endif }; template <typename Point> inline bool in_box(Point const& previous, Point const& current, Point const& point) { // Get its box (TODO: this can be prepared-on-demand later) typedef geometry::model::box<Point> box_type; box_type box; geometry::assign_inverse(box); geometry::expand(box, previous); geometry::expand(box, current); return geometry::covered_by(point, box); } template <typename Point, typename Turn> inline analyse_result check_segment(Point const& previous, Point const& current, Turn const& turn, bool from_monotonic) { #if defined(BOOST_GEOMETRY_BUFFER_USE_SIDE_OF_INTERSECTION) typedef geometry::model::referring_segment<Point const> segment_type; segment_type const p(turn.rob_pi, turn.rob_pj); segment_type const q(turn.rob_qi, turn.rob_qj); segment_type const r(previous, current); int const side = strategy::side::side_of_intersection::apply(p, q, r, turn.robust_point); if (side == 0) { return analyse_on_offsetted; } if (side == -1 && from_monotonic) { return analyse_within; } if (side == 1 && from_monotonic) { return analyse_disjoint; } return analyse_continue; #else typedef typename strategy::side::services::default_strategy < typename cs_tag<Point>::type >::type side_strategy; typedef typename geometry::coordinate_type<Point>::type coordinate_type; coordinate_type const twice_area = side_strategy::template side_value < coordinate_type, coordinate_type >(previous, current, turn.robust_point); if (twice_area == 0) { // Collinear, only on segment if it is covered by its bbox if (in_box(previous, current, turn.robust_point)) { return analyse_on_offsetted; } } else if (twice_area < 0) { // It is in the triangle right-of the segment where the // segment is the hypothenusa. Check if it is close // (within rounding-area) if (twice_area * twice_area < geometry::comparable_distance(previous, current) && in_box(previous, current, turn.robust_point)) { return analyse_near_offsetted; } else if (from_monotonic) { return analyse_within; } } else if (twice_area > 0 && from_monotonic) { // Left of segment return analyse_disjoint; } // Not monotonic, on left or right side: continue analysing return analyse_continue; #endif } class analyse_turn_wrt_point_piece { public : template <typename Turn, typename Piece> static inline analyse_result apply(Turn const& turn, Piece const& piece) { typedef typename Piece::section_type section_type; typedef typename Turn::robust_point_type point_type; typedef typename geometry::coordinate_type<point_type>::type coordinate_type; #if defined(BOOST_GEOMETRY_BUFFER_USE_SIDE_OF_INTERSECTION) typedef geometry::model::referring_segment<point_type const> segment_type; segment_type const p(turn.rob_pi, turn.rob_pj); segment_type const q(turn.rob_qi, turn.rob_qj); #else typedef strategy::within::winding<point_type> strategy_type; typename strategy_type::state_type state; strategy_type strategy; boost::ignore_unused(strategy); #endif BOOST_GEOMETRY_ASSERT(! piece.sections.empty()); coordinate_type const point_x = geometry::get<0>(turn.robust_point); for (std::size_t s = 0; s < piece.sections.size(); s++) { section_type const& section = piece.sections[s]; // If point within horizontal range of monotonic section: if (! section.duplicate && section.begin_index < section.end_index && point_x >= geometry::get<min_corner, 0>(section.bounding_box) - 1 && point_x <= geometry::get<max_corner, 0>(section.bounding_box) + 1) { for (signed_size_type i = section.begin_index + 1; i <= section.end_index; i++) { point_type const& previous = piece.robust_ring[i - 1]; point_type const& current = piece.robust_ring[i]; #if defined(BOOST_GEOMETRY_BUFFER_USE_SIDE_OF_INTERSECTION) // First check if it is in range - if it is not, the // expensive side_of_intersection does not need to be // applied coordinate_type x1 = geometry::get<0>(previous); coordinate_type x2 = geometry::get<0>(current); if (x1 > x2) { std::swap(x1, x2); } if (point_x >= x1 - 1 && point_x <= x2 + 1) { segment_type const r(previous, current); int const side = strategy::side::side_of_intersection::apply(p, q, r, turn.robust_point); // Sections are monotonic in x-dimension if (side == 1) { // Left on segment return analyse_disjoint; } else if (side == 0) { // Collinear - TODO: check if really on segment return analyse_on_offsetted; } } #else analyse_result code = check_segment(previous, current, turn, false); if (code != analyse_continue) { return code; } // Get the state (to determine it is within), we don't have // to cover the on-segment case (covered above) strategy.apply(turn.robust_point, previous, current, state); #endif } } } #if defined(BOOST_GEOMETRY_BUFFER_USE_SIDE_OF_INTERSECTION) // It is nowhere outside, and not on segment, so it is within return analyse_within; #else int const code = strategy.result(state); if (code == 1) { return analyse_within; } else if (code == -1) { return analyse_disjoint; } // Should normally not occur - on-segment is covered return analyse_unknown; #endif } }; class analyse_turn_wrt_piece { template <typename Point, typename Turn> static inline analyse_result check_helper_segment(Point const& s1, Point const& s2, Turn const& turn, #if defined(BOOST_GEOMETRY_BUFFER_USE_SIDE_OF_INTERSECTION) bool , // is on original, to be reused #else bool is_original, #endif Point const& offsetted) { boost::ignore_unused(offsetted); #if defined(BOOST_GEOMETRY_BUFFER_USE_SIDE_OF_INTERSECTION) typedef geometry::model::referring_segment<Point const> segment_type; segment_type const p(turn.rob_pi, turn.rob_pj); segment_type const q(turn.rob_qi, turn.rob_qj); segment_type const r(s1, s2); int const side = strategy::side::side_of_intersection::apply(p, q, r, turn.robust_point); if (side == 1) { // left of segment return analyse_disjoint; } else if (side == 0) { // If is collinear, either on segment or before/after typedef geometry::model::box<Point> box_type; box_type box; geometry::assign_inverse(box); geometry::expand(box, s1); geometry::expand(box, s2); if (geometry::covered_by(turn.robust_point, box)) { // Points on helper-segments (and not on its corners) // are considered as within return analyse_within; } // It is collinear but not on the segment. Because these // segments are convex, it is outside // Unless the offsetted ring is collinear or concave w.r.t. // helper-segment but that scenario is not yet supported return analyse_disjoint; } // right of segment return analyse_continue; #else typedef typename strategy::side::services::default_strategy < typename cs_tag<Point>::type >::type side_strategy; switch(side_strategy::apply(s1, s2, turn.robust_point)) { case 1 : return analyse_disjoint; // left of segment case 0 : { // If is collinear, either on segment or before/after typedef geometry::model::box<Point> box_type; box_type box; geometry::assign_inverse(box); geometry::expand(box, s1); geometry::expand(box, s2); if (geometry::covered_by(turn.robust_point, box)) { // It is on the segment if (! is_original && geometry::comparable_distance(turn.robust_point, offsetted) <= 1) { // It is close to the offsetted-boundary, take // any rounding-issues into account return analyse_near_offsetted; } // Points on helper-segments are considered as within // Points on original boundary are processed differently return is_original ? analyse_on_original_boundary : analyse_within; } // It is collinear but not on the segment. Because these // segments are convex, it is outside // Unless the offsetted ring is collinear or concave w.r.t. // helper-segment but that scenario is not yet supported return analyse_disjoint; } break; } // right of segment return analyse_continue; #endif } template <typename Turn, typename Piece> static inline analyse_result check_helper_segments(Turn const& turn, Piece const& piece) { typedef typename Turn::robust_point_type point_type; geometry::equal_to<point_type> comparator; point_type points[4]; signed_size_type helper_count = static_cast<signed_size_type>(piece.robust_ring.size()) - piece.offsetted_count; if (helper_count == 4) { for (int i = 0; i < 4; i++) { points[i] = piece.robust_ring[piece.offsetted_count + i]; } // 3--offsetted outline--0 // | | // left | | right // | | // 2===>==original===>===1 } else if (helper_count == 3) { // Triangular piece, assign points but assign second twice for (int i = 0; i < 4; i++) { int index = i < 2 ? i : i - 1; points[i] = piece.robust_ring[piece.offsetted_count + index]; } } else { // Some pieces (e.g. around points) do not have helper segments. // Others should have 3 (join) or 4 (side) return analyse_continue; } // First check point-equality point_type const& point = turn.robust_point; if (comparator(point, points[0]) || comparator(point, points[3])) { return analyse_on_offsetted; } if (comparator(point, points[1])) { // On original, right corner return piece.is_flat_end ? analyse_continue : analyse_on_original_boundary; } if (comparator(point, points[2])) { // On original, left corner return piece.is_flat_start ? analyse_continue : analyse_on_original_boundary; } // Right side of the piece analyse_result result = check_helper_segment(points[0], points[1], turn, false, points[0]); if (result != analyse_continue) { return result; } // Left side of the piece result = check_helper_segment(points[2], points[3], turn, false, points[3]); if (result != analyse_continue) { return result; } if (! comparator(points[1], points[2])) { // Side of the piece at side of original geometry result = check_helper_segment(points[1], points[2], turn, true, point); if (result != analyse_continue) { return result; } } // We are within the \/ or |_| shaped piece, where the top is the // offsetted ring. if (! geometry::covered_by(point, piece.robust_offsetted_envelope)) { // Not in offsetted-area. This makes a cheap check possible typedef typename strategy::side::services::default_strategy < typename cs_tag<point_type>::type >::type side_strategy; switch(side_strategy::apply(points[3], points[0], point)) { case 1 : return analyse_disjoint; case -1 : return analyse_within; case 0 : return analyse_disjoint; } } return analyse_continue; } template <typename Turn, typename Piece, typename Compare> static inline analyse_result check_monotonic(Turn const& turn, Piece const& piece, Compare const& compare) { typedef typename Piece::piece_robust_ring_type ring_type; typedef typename ring_type::const_iterator it_type; it_type end = piece.robust_ring.begin() + piece.offsetted_count; it_type it = std::lower_bound(piece.robust_ring.begin(), end, turn.robust_point, compare); if (it != end && it != piece.robust_ring.begin()) { // iterator points to point larger than point // w.r.t. specified direction, and prev points to a point smaller // We now know if it is inside/outside it_type prev = it - 1; return check_segment(*prev, *it, turn, true); } return analyse_continue; } public : template <typename Turn, typename Piece> static inline analyse_result apply(Turn const& turn, Piece const& piece) { typedef typename Turn::robust_point_type point_type; analyse_result code = check_helper_segments(turn, piece); if (code != analyse_continue) { return code; } geometry::equal_to<point_type> comparator; if (piece.offsetted_count > 8) { // If the offset contains some points and is monotonic, we try // to avoid walking all points linearly. // We try it only once. if (piece.is_monotonic_increasing[0]) { code = check_monotonic(turn, piece, geometry::less<point_type, 0>()); if (code != analyse_continue) return code; } else if (piece.is_monotonic_increasing[1]) { code = check_monotonic(turn, piece, geometry::less<point_type, 1>()); if (code != analyse_continue) return code; } else if (piece.is_monotonic_decreasing[0]) { code = check_monotonic(turn, piece, geometry::greater<point_type, 0>()); if (code != analyse_continue) return code; } else if (piece.is_monotonic_decreasing[1]) { code = check_monotonic(turn, piece, geometry::greater<point_type, 1>()); if (code != analyse_continue) return code; } } // It is small or not monotonic, walk linearly through offset // TODO: this will be combined with winding strategy for (signed_size_type i = 1; i < piece.offsetted_count; i++) { point_type const& previous = piece.robust_ring[i - 1]; point_type const& current = piece.robust_ring[i]; // The robust ring can contain duplicates // (on which any side or side-value would return 0) if (! comparator(previous, current)) { code = check_segment(previous, current, turn, false); if (code != analyse_continue) { return code; } } } return analyse_unknown; } }; template <typename Turns, typename Pieces> class turn_in_piece_visitor { Turns& m_turns; // because partition is currently operating on const input only Pieces const& m_pieces; // to check for piece-type template <typename Operation, typename Piece> inline bool skip(Operation const& op, Piece const& piece) const { if (op.piece_index == piece.index) { return true; } Piece const& pc = m_pieces[op.piece_index]; if (pc.left_index == piece.index || pc.right_index == piece.index) { if (pc.type == strategy::buffer::buffered_flat_end) { // If it is a flat end, don't compare against its neighbor: // it will always be located on one of the helper segments return true; } if (pc.type == strategy::buffer::buffered_concave) { // If it is concave, the same applies: the IP will be // located on one of the helper segments return true; } } return false; } #if defined(BOOST_GEOMETRY_BUFFER_USE_SIDE_OF_INTERSECTION) // NOTE: this function returns a side value in {-1, 0, 1} template <typename Turn, typename Piece> static inline int turn_in_convex_piece(Turn const& turn, Piece const& piece) { typedef typename Turn::robust_point_type point_type; typedef typename Piece::piece_robust_ring_type ring_type; typedef geometry::model::referring_segment<point_type const> segment; segment const p(turn.rob_pi, turn.rob_pj); segment const q(turn.rob_qi, turn.rob_qj); typedef typename boost::range_iterator<ring_type const>::type iterator_type; iterator_type it = boost::begin(piece.robust_ring); iterator_type end = boost::end(piece.robust_ring); // A robust ring is always closed, and always clockwise for (iterator_type previous = it++; it != end; ++previous, ++it) { geometry::equal_to<point_type> comparator; if (comparator(*previous, *it)) { // Points are the same continue; } segment r(*previous, *it); int const side = strategy::side::side_of_intersection::apply(p, q, r, turn.robust_point); if (side == 1) { // IP is left of segment, so it is outside return -1; // outside } else if (side == 0) { // IP is collinear with segment. TODO: we should analyze this further // For now we use the fallback point if (in_box(*previous, *it, turn.robust_point)) { return 0; } else { return -1; // outside } } } return 1; // inside } #endif public: inline turn_in_piece_visitor(Turns& turns, Pieces const& pieces) : m_turns(turns) , m_pieces(pieces) {} template <typename Turn, typename Piece> inline bool apply(Turn const& turn, Piece const& piece, bool first = true) { boost::ignore_unused_variable_warning(first); if (turn.count_within > 0) { // Already inside - no need to check again return true; } if (piece.type == strategy::buffer::buffered_flat_end || piece.type == strategy::buffer::buffered_concave) { // Turns cannot be located within flat-end or concave pieces return true; } if (! geometry::covered_by(turn.robust_point, piece.robust_envelope)) { // Easy check: if the turn is not in the envelope, we can safely return return true; } if (skip(turn.operations[0], piece) || skip(turn.operations[1], piece)) { return true; } // TODO: mutable_piece to make some on-demand preparations in analyse Turn& mutable_turn = m_turns[turn.turn_index]; if (piece.type == geometry::strategy::buffer::buffered_point) { // Optimization for buffer around points: if distance from center // is not between min/max radius, the result is clear typedef typename default_comparable_distance_result < typename Turn::robust_point_type >::type distance_type; distance_type const cd = geometry::comparable_distance(piece.robust_center, turn.robust_point); if (cd < piece.robust_min_comparable_radius) { mutable_turn.count_within++; return true; } if (cd > piece.robust_max_comparable_radius) { return true; } } analyse_result analyse_code = piece.type == geometry::strategy::buffer::buffered_point ? analyse_turn_wrt_point_piece::apply(turn, piece) : analyse_turn_wrt_piece::apply(turn, piece); switch(analyse_code) { case analyse_disjoint : return true; case analyse_on_offsetted : mutable_turn.count_on_offsetted++; // value is not used anymore return true; case analyse_on_original_boundary : mutable_turn.count_on_original_boundary++; return true; case analyse_within : mutable_turn.count_within++; return true; #if ! defined(BOOST_GEOMETRY_BUFFER_USE_SIDE_OF_INTERSECTION) case analyse_near_offsetted : mutable_turn.count_within_near_offsetted++; return true; #endif default : break; } #if defined(BOOST_GEOMETRY_BUFFER_USE_SIDE_OF_INTERSECTION) // We don't know (yet) int geometry_code = 0; if (piece.is_convex) { geometry_code = turn_in_convex_piece(turn, piece); } else { // TODO: this point_in_geometry is a performance-bottleneck here and // will be replaced completely by extending analyse_piece functionality geometry_code = detail::within::point_in_geometry(turn.robust_point, piece.robust_ring); } #else int geometry_code = detail::within::point_in_geometry(turn.robust_point, piece.robust_ring); #endif if (geometry_code == 1) { mutable_turn.count_within++; } return true; } }; }} // namespace detail::buffer #endif // DOXYGEN_NO_DETAIL }} // namespace boost::geometry #endif // BOOST_GEOMETRY_ALGORITHMS_DETAIL_BUFFER_TURN_IN_PIECE_VISITOR
13,331
1,056
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ extern jvmtiEnv *_jvmti; extern jvmtiEventCallbacks *_jvmti_callbacks; jlong get_nano_time(); void JNICALL class_file_load_hook( jvmtiEnv *jvmti_env, JNIEnv* jni_env, jclass class_being_redefined, jobject loader, const char* name, jobject protection_domain, jint class_data_len, const unsigned char* class_data, jint* new_class_data_len, unsigned char** new_class_data); void JNICALL native_method_bind_hook( jvmtiEnv *jvmti_env, JNIEnv* env, jthread thread, jmethodID method, void* address, void** new_address_ptr); void JNICALL monitor_contended_enter_hook( jvmtiEnv *jvmti_env, JNIEnv* jni_env, jthread thread, jobject object); void JNICALL monitor_contended_entered_hook( jvmtiEnv *jvmti_env, JNIEnv* jni_env, jthread thread, jobject object); void JNICALL vm_object_alloc( jvmtiEnv *jvmti_env, JNIEnv* jni_env, jthread thread, jobject object, jclass object_klass, jlong size); typedef void (JNICALL *waitCall) (JNIEnv *env, jobject obj, jlong arg); typedef void (JNICALL *sleepCall) (JNIEnv *env, jclass clazz, jlong arg); typedef void (JNICALL *parkCall) (JNIEnv *env, jclass clazz, jboolean arg0, jlong arg1); void JNICALL waitInterceptor(JNIEnv *env, jobject obj, jlong arg); void JNICALL sleepInterceptor(JNIEnv *env, jclass clazz, jlong arg); void JNICALL parkInterceptor(JNIEnv *env, jclass clazz, jboolean arg0, jlong arg1); void get_saved_class_file_bytes(JNIEnv *env, char *name, jobject loader, jint *class_data_len, unsigned char **class_data); void try_removing_bytes_for_unloaded_classes(JNIEnv *env); void cache_loaded_classes(jvmtiEnv *jvmti_env,jclass *classes,jint class_count); void JNICALL vm_init_hook(jvmtiEnv *jvmti_env, JNIEnv* jni_env, jthread thread); void parse_options_and_extract_params(char *options);
1,254
677
<reponame>InfiniteSynthesis/lynx-native<filename>Android/sdk/src/androidTest/java/com/lynx/test/TestActivity.java<gh_stars>100-1000 // Copyright 2017 The Lynx Authors. All rights reserved. package com.lynx.test; import android.app.Activity; import android.os.Bundle; import android.support.annotation.Nullable; import com.lynx.ui.LynxView; import com.lynx.utils.StringUtil; import java.util.Map; public class TestActivity extends Activity { protected LynxView mLynxView; @Override protected void onCreate(@Nullable Bundle savedInstanceState) { super.onCreate(savedInstanceState); mLynxView = new LynxView(this); setContentView(mLynxView); } public void loadBundle(String assetName, Map<String, Object> objectsExposedToJS) { if (objectsExposedToJS != null) { for (String name : objectsExposedToJS.keySet()) { mLynxView.addJavascriptInterface(objectsExposedToJS.get(name), name); } } mLynxView.loadScriptData(readBundleScript(assetName + ".js")); } private String readBundleScript(String fileName) { try { return StringUtil.convertToString(getResources().getAssets().open(fileName)); } catch (Exception e) { e.printStackTrace(); } return null; } public LynxView getLynxView() { return mLynxView; } }
566
988
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT license. package com.mojang.serialization.codecs; import com.google.common.collect.ImmutableList; import com.mojang.datafixers.util.Pair; import com.mojang.datafixers.util.Unit; import com.mojang.serialization.Codec; import com.mojang.serialization.DataResult; import com.mojang.serialization.DynamicOps; import com.mojang.serialization.Lifecycle; import com.mojang.serialization.ListBuilder; import org.apache.commons.lang3.mutable.MutableObject; import java.util.List; import java.util.Objects; import java.util.stream.Stream; public final class ListCodec<A> implements Codec<List<A>> { private final Codec<A> elementCodec; public ListCodec(final Codec<A> elementCodec) { this.elementCodec = elementCodec; } @Override public <T> DataResult<T> encode(final List<A> input, final DynamicOps<T> ops, final T prefix) { final ListBuilder<T> builder = ops.listBuilder(); for (final A a : input) { builder.add(elementCodec.encodeStart(ops, a)); } return builder.build(prefix); } @Override public <T> DataResult<Pair<List<A>, T>> decode(final DynamicOps<T> ops, final T input) { return ops.getList(input).setLifecycle(Lifecycle.stable()).flatMap(stream -> { final ImmutableList.Builder<A> read = ImmutableList.builder(); final Stream.Builder<T> failed = Stream.builder(); // TODO: AtomicReference.getPlain/setPlain in java9+ final MutableObject<DataResult<Unit>> result = new MutableObject<>(DataResult.success(Unit.INSTANCE, Lifecycle.stable())); stream.accept(t -> { final DataResult<Pair<A, T>> element = elementCodec.decode(ops, t); element.error().ifPresent(e -> failed.add(t)); result.setValue(result.getValue().apply2stable((r, v) -> { read.add(v.getFirst()); return r; }, element)); }); final ImmutableList<A> elements = read.build(); final T errors = ops.createList(failed.build()); final Pair<List<A>, T> pair = Pair.of(elements, errors); return result.getValue().map(unit -> pair).setPartial(pair); }); } @Override public boolean equals(final Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } final ListCodec<?> listCodec = (ListCodec<?>) o; return Objects.equals(elementCodec, listCodec.elementCodec); } @Override public int hashCode() { return Objects.hash(elementCodec); } @Override public String toString() { return "ListCodec[" + elementCodec + ']'; } }
1,200
729
<filename>jigsaw-payment-schema/src/main/gen/org/jigsaw/payment/model/AccountTitle.java<gh_stars>100-1000 // Generated by the protocol buffer compiler. DO NOT EDIT! // source: enums.proto package org.jigsaw.payment.model; /** * <pre> ** * 会计科目号, 注意,会计科目号设置影响账号ID生成策略。 * 默认的,采用七位会计科目号。 * </pre> * * Protobuf enum {@code AccountTitle} */ public enum AccountTitle implements com.google.protobuf.ProtocolMessageEnum { /** * <code>UNKNOWN_ACCOUNTTITLE = 0;</code> */ UNKNOWN_ACCOUNTTITLE(0), /** * <pre> *个人存款或钱包 * </pre> * * <code>PERSONAL_DEPOSIT = 2203001;</code> */ PERSONAL_DEPOSIT(2203001), /** * <pre> *对公存款账户 * </pre> * * <code>BUSINESS_DEPOSIT = 2203002;</code> */ BUSINESS_DEPOSIT(2203002), /** * <pre> * 银行卡对私账户 * </pre> * * <code>PERSONAL_BANK_CARD = 3001001;</code> */ PERSONAL_BANK_CARD(3001001), /** * <pre> *银行卡对公账户 * </pre> * * <code>BUSINESS_BANK_CARD = 3001002;</code> */ BUSINESS_BANK_CARD(3001002), /** * <pre> *虚币账户 * </pre> * * <code>VIRTUAL_CURRENCY = 3001003;</code> */ VIRTUAL_CURRENCY(3001003), /** * <pre> *第三方平台账户 * </pre> * * <code>PLATFORM_ACCOUNT = 3001004;</code> */ PLATFORM_ACCOUNT(3001004), ; /** * <code>UNKNOWN_ACCOUNTTITLE = 0;</code> */ public static final int UNKNOWN_ACCOUNTTITLE_VALUE = 0; /** * <pre> *个人存款或钱包 * </pre> * * <code>PERSONAL_DEPOSIT = 2203001;</code> */ public static final int PERSONAL_DEPOSIT_VALUE = 2203001; /** * <pre> *对公存款账户 * </pre> * * <code>BUSINESS_DEPOSIT = 2203002;</code> */ public static final int BUSINESS_DEPOSIT_VALUE = 2203002; /** * <pre> * 银行卡对私账户 * </pre> * * <code>PERSONAL_BANK_CARD = 3001001;</code> */ public static final int PERSONAL_BANK_CARD_VALUE = 3001001; /** * <pre> *银行卡对公账户 * </pre> * * <code>BUSINESS_BANK_CARD = 3001002;</code> */ public static final int BUSINESS_BANK_CARD_VALUE = 3001002; /** * <pre> *虚币账户 * </pre> * * <code>VIRTUAL_CURRENCY = 3001003;</code> */ public static final int VIRTUAL_CURRENCY_VALUE = 3001003; /** * <pre> *第三方平台账户 * </pre> * * <code>PLATFORM_ACCOUNT = 3001004;</code> */ public static final int PLATFORM_ACCOUNT_VALUE = 3001004; public final int getNumber() { return value; } /** * @deprecated Use {@link #forNumber(int)} instead. */ @java.lang.Deprecated public static AccountTitle valueOf(int value) { return forNumber(value); } public static AccountTitle forNumber(int value) { switch (value) { case 0: return UNKNOWN_ACCOUNTTITLE; case 2203001: return PERSONAL_DEPOSIT; case 2203002: return BUSINESS_DEPOSIT; case 3001001: return PERSONAL_BANK_CARD; case 3001002: return BUSINESS_BANK_CARD; case 3001003: return VIRTUAL_CURRENCY; case 3001004: return PLATFORM_ACCOUNT; default: return null; } } public static com.google.protobuf.Internal.EnumLiteMap<AccountTitle> internalGetValueMap() { return internalValueMap; } private static final com.google.protobuf.Internal.EnumLiteMap< AccountTitle> internalValueMap = new com.google.protobuf.Internal.EnumLiteMap<AccountTitle>() { public AccountTitle findValueByNumber(int number) { return AccountTitle.forNumber(number); } }; public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { return getDescriptor().getValues().get(ordinal()); } public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { return getDescriptor(); } public static final com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { return org.jigsaw.payment.model.Enums.getDescriptor().getEnumTypes().get(1); } private static final AccountTitle[] VALUES = values(); public static AccountTitle valueOf( com.google.protobuf.Descriptors.EnumValueDescriptor desc) { if (desc.getType() != getDescriptor()) { throw new java.lang.IllegalArgumentException( "EnumValueDescriptor is not for this type."); } return VALUES[desc.getIndex()]; } private final int value; private AccountTitle(int value) { this.value = value; } // @@protoc_insertion_point(enum_scope:AccountTitle) }
2,081
16,989
<reponame>jobechoi/bazel<filename>src/main/java/com/google/devtools/build/lib/runtime/RcChunkOfArgs.java<gh_stars>1000+ // Copyright 2014 The Bazel Authors. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package com.google.devtools.build.lib.runtime; import java.util.List; /** * We receive the rc file arguments from the client in an order that maintains the location of * "import" statements, expanding the imported rc file in place so that its args override previous * args in the file and are overridden by later arguments. We cannot group the args by rc file for * parsing, as we would lose this ordering, so we store them in these "chunks." * * <p>Each chunk comes from a single rc file, but the args stored here may not contain the entire * file if its contents were interrupted by an import statement. */ final class RcChunkOfArgs { public RcChunkOfArgs(String rcFile, List<String> args) { this.rcFile = rcFile; this.args = args; } private final String rcFile; private final List<String> args; @Override public boolean equals(Object o) { if (o instanceof RcChunkOfArgs) { RcChunkOfArgs other = (RcChunkOfArgs) o; return getRcFile().equals(other.getRcFile()) && getArgs().equals(other.getArgs()); } return false; } @Override public int hashCode() { return getRcFile().hashCode() + getArgs().hashCode(); } /** The name of the rc file, usually a path. */ String getRcFile() { return rcFile; } /** * The list of arguments specified in this rc "chunk". This is all for a single command (or * command:config definition), as different commands will be grouped together, so this list of * arguments can all be parsed as a continuous group. */ List<String> getArgs() { return args; } }
691
66,985
<gh_stars>1000+ /* * Copyright 2012-2020 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.springframework.boot.context.config; import java.io.File; import java.util.List; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; import org.springframework.core.io.DefaultResourceLoader; import org.springframework.util.FileCopyUtils; import static org.assertj.core.api.Assertions.assertThat; import static org.mockito.Mockito.mock; /** * Tests for {@link ConfigTreeConfigDataLocationResolver}. * * @author <NAME> * @author <NAME> */ class ConfigTreeConfigDataLocationResolverTests { private ConfigTreeConfigDataLocationResolver resolver = new ConfigTreeConfigDataLocationResolver( new DefaultResourceLoader()); private ConfigDataLocationResolverContext context = mock(ConfigDataLocationResolverContext.class); @TempDir File temp; @Test void isResolvableWhenPrefixMatchesReturnsTrue() { assertThat(this.resolver.isResolvable(this.context, ConfigDataLocation.of("configtree:/etc/config"))).isTrue(); } @Test void isResolvableWhenPrefixDoesNotMatchReturnsFalse() { assertThat(this.resolver.isResolvable(this.context, ConfigDataLocation.of("http://etc/config"))).isFalse(); assertThat(this.resolver.isResolvable(this.context, ConfigDataLocation.of("/etc/config"))).isFalse(); } @Test void resolveReturnsConfigVolumeMountLocation() { List<ConfigTreeConfigDataResource> locations = this.resolver.resolve(this.context, ConfigDataLocation.of("configtree:/etc/config/")); assertThat(locations.size()).isEqualTo(1); assertThat(locations).extracting(Object::toString) .containsExactly("config tree [" + new File("/etc/config").getAbsolutePath() + "]"); } @Test void resolveWilcardPattern() throws Exception { File directoryA = new File(this.temp, "a"); File directoryB = new File(this.temp, "b"); directoryA.mkdirs(); directoryB.mkdirs(); FileCopyUtils.copy("test".getBytes(), new File(directoryA, "spring")); FileCopyUtils.copy("test".getBytes(), new File(directoryB, "boot")); List<ConfigTreeConfigDataResource> locations = this.resolver.resolve(this.context, ConfigDataLocation.of("configtree:" + this.temp.getAbsolutePath() + "/*/")); assertThat(locations.size()).isEqualTo(2); assertThat(locations).extracting(Object::toString).containsExactly( "config tree [" + directoryA.getAbsolutePath() + "]", "config tree [" + directoryB.getAbsolutePath() + "]"); } }
957
879
package com.bookstore; import com.bookstore.entity.Book; import com.bookstore.service.BookstoreService; import org.springframework.boot.ApplicationRunner; import org.springframework.boot.SpringApplication; import org.springframework.boot.autoconfigure.SpringBootApplication; import org.springframework.context.annotation.Bean; @SpringBootApplication public class MainApplication { private final BookstoreService bookstoreService; public MainApplication(BookstoreService bookstoreService) { this.bookstoreService = bookstoreService; } public static void main(String[] args) { SpringApplication.run(MainApplication.class, args); } @Bean public ApplicationRunner init() { return args -> { System.out.println("Find a book:"); // this can come via a controller endpoint Book book = new Book(); book.setTitle("Carrie"); book.setGenre("Horror"); book.setIsbn("001-OG"); book.setAuthor("<NAME>"); book.setPrice(23); boolean foundAnd = bookstoreService.existsBook1(book); System.out.println("Found (existsBook1): " + foundAnd + "\n"); boolean foundOr = bookstoreService.existsBook2(book); System.out.println("Found (existsBook2): " + foundOr + "\n"); boolean foundIgnorePath = bookstoreService.existsBook3(book); System.out.println("Found (existsBook3): " + foundIgnorePath + "\n"); }; } }
686
1,509
package com.fc.v2.model.auto; import com.fasterxml.jackson.annotation.JsonFormat; import com.fc.v2.util.DateUtils; import java.io.Serializable; import java.util.Date; /** * 公告 SysNotice * @author fuce_自动生成 * @email <EMAIL> * @date 2019-09-08 01:38:44 */ public class SysNotice implements Serializable { private static final long serialVersionUID = 1L; /** 主键 **/ private String id; /** 标题 **/ private String title; /** 内容 **/ private String content; /** 类型 **/ private Integer type; /** 创建人id **/ private String createId; /** 创建人name **/ private String createUsername; /** 发信时间 **/ @JsonFormat(pattern = "yyyy-MM-dd HH:mm:ss",timezone="GMT+8") private Date createTime; public String getId() { return id; } public void setId(String id) { this.id = id; } public String getTitle() { return title; } public void setTitle(String title) { this.title = title; } public String getContent() { return content; } public void setContent(String content) { this.content = content; } public Integer getType() { return type; } public void setType(Integer type) { this.type = type; } public String getCreateId() { return createId; } public void setCreateId(String createId) { this.createId = createId; } public String getCreateUsername() { return createUsername; } public void setCreateUsername(String createUsername) { this.createUsername = createUsername; } public Date getCreateTime() { return createTime; } public void setCreateTime(Date createTime) { this.createTime = createTime; } public SysNotice() { super(); } public SysNotice(String id,String title,String content,Integer type,String createId,String createUsername,Date createTime) { this.id = id; this.title = title; this.content = content; this.type = type; this.createId = createId; this.createUsername = createUsername; this.createTime = createTime; } /** * 格式化时间 * @return YYYY_MM_DD */ public String getdate(){ return DateUtils.dateTime(this.createTime); } }
1,047
1,920
# -*- coding: utf-8 -*- from mathics_scanner import is_symbol_name from mathics.core.parser.feed import ( MathicsFileLineFeeder, MathicsLineFeeder, MathicsMultiLineFeeder, MathicsSingleLineFeeder, ) from mathics.core.parser.util import parse, parse_builtin_rule from mathics.core.parser.operators import all_operator_names
123
373
// Rar5Handler.h #ifndef __RAR5_HANDLER_H #define __RAR5_HANDLER_H #include "../../../../C/Blake2.h" #include "../../../Common/MyBuffer.h" #include "../../../Windows/PropVariant.h" #include "../../Common/CreateCoder.h" #include "../IArchive.h" namespace NArchive { namespace NRar5 { namespace NHeaderFlags { const unsigned kExtra = 1 << 0; const unsigned kData = 1 << 1; // const unsigned kUnknown = 1 << 2; const unsigned kPrevVol = 1 << 3; const unsigned kNextVol = 1 << 4; // const unsigned kIsChild = 1 << 5; // const unsigned kPreserveChild = 1 << 6; } namespace NHeaderType { enum { kArc = 1, kFile, kService, kArcEncrypt, kEndOfArc }; } namespace NArcFlags { const unsigned kVol = 1 << 0; const unsigned kVolNumber = 1 << 1; const unsigned kSolid = 1 << 2; // const unsigned kRecovery = 1 << 3; // const unsigned kLocked = 1 << 4; } const unsigned kArcExtraRecordType_Locator = 1; namespace NLocatorFlags { const unsigned kQuickOpen = 1 << 0; const unsigned kRecovery = 1 << 1; } namespace NFileFlags { const unsigned kIsDir = 1 << 0; const unsigned kUnixTime = 1 << 1; const unsigned kCrc32 = 1 << 2; const unsigned kUnknownSize = 1 << 3; } namespace NMethodFlags { // const unsigned kVersionMask = 0x3F; const unsigned kSolid = 1 << 6; } namespace NArcEndFlags { const unsigned kMoreVols = 1 << 0; } enum EHostOS { kHost_Windows = 0, kHost_Unix }; // ---------- Extra ---------- namespace NExtraRecordType { enum { kCrypto = 1, kHash, kTime, kVersion, kLink, kUnixOwner, kSubdata }; } // const unsigned kCryptoAlgo_AES = 0; namespace NCryptoFlags { const unsigned kPswCheck = 1 << 0; const unsigned kUseMAC = 1 << 1; } struct CCryptoInfo { UInt64 Algo; UInt64 Flags; Byte Cnt; bool UseMAC() const { return (Flags & NCryptoFlags::kUseMAC) != 0; } bool IsThereCheck() const { return (Flags & NCryptoFlags::kPswCheck) != 0; } bool Parse(const Byte *p, size_t size); }; const unsigned kHashID_Blake2sp = 0; namespace NTimeRecord { enum { k_Index_MTime = 0, k_Index_CTime, k_Index_ATime }; namespace NFlags { const unsigned kUnixTime = 1 << 0; const unsigned kMTime = 1 << 1; // const unsigned kCTime = 1 << 2; // const unsigned kATime = 1 << 3; } } namespace NLinkType { enum { kUnixSymLink = 1, kWinSymLink, kWinJunction, kHardLink, kFileCopy }; } namespace NLinkFlags { const unsigned kTargetIsDir = 1 << 0; } struct CItem { UInt32 CommonFlags; UInt32 Flags; Byte RecordType; bool Version_Defined; int ACL; AString Name; int VolIndex; int NextItem; UInt32 UnixMTime; UInt32 CRC; UInt32 Attrib; UInt32 Method; CByteBuffer Extra; UInt64 Size; UInt64 PackSize; UInt64 HostOS; UInt64 DataPos; UInt64 Version; CItem() { Clear(); } void Clear() { CommonFlags = 0; Flags = 0; VolIndex = 0; NextItem = -1; Version_Defined = false; Version = 0; Name.Empty(); Extra.Free(); ACL = -1; } bool IsSplitBefore() const { return (CommonFlags & NHeaderFlags::kPrevVol) != 0; } bool IsSplitAfter() const { return (CommonFlags & NHeaderFlags::kNextVol) != 0; } bool IsSplit() const { return (CommonFlags & (NHeaderFlags::kPrevVol | NHeaderFlags::kNextVol)) != 0; } bool IsDir() const { return (Flags & NFileFlags::kIsDir) != 0; } bool Has_UnixMTime() const { return (Flags & NFileFlags::kUnixTime) != 0; } bool Has_CRC() const { return (Flags & NFileFlags::kCrc32) != 0; } bool Is_UnknownSize() const { return (Flags & NFileFlags::kUnknownSize) != 0; } bool IsNextForItem(const CItem &prev) const { return !IsDir() && !prev.IsDir() && IsSplitBefore() && prev.IsSplitAfter() && (Name == prev.Name); // && false; } bool IsSolid() const { return ((UInt32)Method & NMethodFlags::kSolid) != 0; } unsigned GetAlgoVersion() const { return (unsigned)Method & 0x3F; } unsigned GetMethod() const { return ((unsigned)Method >> 7) & 0x7; } UInt32 GetDictSize() const { return (((UInt32)Method >> 10) & 0xF); } bool IsService() const { return RecordType == NHeaderType::kService; } bool Is_STM() const { return IsService() && Name == "STM"; } bool Is_CMT() const { return IsService() && Name == "CMT"; } bool Is_ACL() const { return IsService() && Name == "ACL"; } // bool Is_QO() const { return IsService() && Name == "QO"; } int FindExtra(unsigned type, unsigned &recordDataSize) const; bool IsEncrypted() const { unsigned size; return FindExtra(NExtraRecordType::kCrypto, size) >= 0; } int FindExtra_Blake() const { unsigned size = 0; int offset = FindExtra(NExtraRecordType::kHash, size); if (offset >= 0 && size == BLAKE2S_DIGEST_SIZE + 1 && Extra[(unsigned)offset] == kHashID_Blake2sp) return offset + 1; return -1; } bool FindExtra_Version(UInt64 &version) const; struct CLinkInfo { UInt64 Type; UInt64 Flags; unsigned NameOffset; unsigned NameLen; }; bool FindExtra_Link(CLinkInfo &link) const; void Link_to_Prop(unsigned linkType, NWindows::NCOM::CPropVariant &prop) const; bool Is_CopyLink() const; bool NeedUse_as_CopyLink() const { return PackSize == 0 && Is_CopyLink(); } bool GetAltStreamName(AString &name) const; UInt32 GetWinAttrib() const { UInt32 a; switch (HostOS) { case kHost_Windows: a = Attrib; break; case kHost_Unix: a = (Attrib << 16); break; default: a = 0; } // if (IsDir()) a |= FILE_ATTRIBUTE_DIRECTORY; return a; } UInt64 GetDataPosition() const { return DataPos; } }; struct CInArcInfo { UInt64 Flags; UInt64 VolNumber; UInt64 StartPos; UInt64 EndPos; UInt64 EndFlags; bool EndOfArchive_was_Read; bool IsEncrypted; // CByteBuffer Extra; /* struct CLocator { UInt64 Flags; UInt64 QuickOpen; UInt64 Recovery; bool Is_QuickOpen() const { return (Flags & NLocatorFlags::kQuickOpen) != 0; } bool Is_Recovery() const { return (Flags & NLocatorFlags::kRecovery) != 0; } }; int FindExtra(unsigned type, unsigned &recordDataSize) const; bool FindExtra_Locator(CLocator &locator) const; */ CInArcInfo(): Flags(0), VolNumber(0), StartPos(0), EndPos(0), EndFlags(0), EndOfArchive_was_Read(false), IsEncrypted(false) {} /* void Clear() { Flags = 0; VolNumber = 0; StartPos = 0; EndPos = 0; EndFlags = 0; EndOfArchive_was_Read = false; Extra.Free(); } */ UInt64 GetPhySize() const { return EndPos - StartPos; } bool AreMoreVolumes() const { return (EndFlags & NArcEndFlags::kMoreVols) != 0; } bool IsVolume() const { return (Flags & NArcFlags::kVol) != 0; } bool IsSolid() const { return (Flags & NArcFlags::kSolid) != 0; } bool Is_VolNumber_Defined() const { return (Flags & NArcFlags::kVolNumber) != 0; } UInt64 GetVolIndex() const { return Is_VolNumber_Defined() ? VolNumber : 0; } }; struct CRefItem { unsigned Item; unsigned Last; int Parent; int Link; }; struct CArc { CMyComPtr<IInStream> Stream; CInArcInfo Info; }; class CHandler: public IInArchive, public IArchiveGetRawProps, PUBLIC_ISetCompressCodecsInfo public CMyUnknownImp { public: CRecordVector<CRefItem> _refs; CObjectVector<CItem> _items; private: CObjectVector<CArc> _arcs; CObjectVector<CByteBuffer> _acls; UInt32 _errorFlags; // UInt32 _warningFlags; bool _isArc; CByteBuffer _comment; UString _missingVolName; DECL_EXTERNAL_CODECS_VARS UInt64 GetPackSize(unsigned refIndex) const; void FillLinks(); HRESULT Open2(IInStream *stream, const UInt64 *maxCheckStartPosition, IArchiveOpenCallback *openCallback); public: MY_QUERYINTERFACE_BEGIN2(IInArchive) MY_QUERYINTERFACE_ENTRY(IArchiveGetRawProps) QUERY_ENTRY_ISetCompressCodecsInfo MY_QUERYINTERFACE_END MY_ADDREF_RELEASE INTERFACE_IInArchive(;) INTERFACE_IArchiveGetRawProps(;) DECL_ISetCompressCodecsInfo }; }} #endif
3,277
575
<reponame>iridium-browser/iridium-browser // Copyright 2019 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "chrome/browser/sharing/sharing_ui_controller.h" #include <utility> #include "base/time/time.h" #include "chrome/browser/sharing/features.h" #include "chrome/browser/sharing/sharing_dialog.h" #include "chrome/browser/sharing/sharing_dialog_data.h" #include "chrome/browser/sharing/sharing_service_factory.h" #include "chrome/browser/ui/browser_finder.h" #include "chrome/browser/ui/browser_window.h" #include "chrome/browser/ui/singleton_tabs.h" #include "chrome/common/url_constants.h" #include "chrome/grit/chromium_strings.h" #include "components/sync_device_info/device_info.h" #include "ui/base/l10n/l10n_util.h" #include "ui/gfx/vector_icon_types.h" #include "ui/strings/grit/ui_strings.h" namespace { BrowserWindow* GetWindowFromWebContents(content::WebContents* web_contents) { Browser* browser = chrome::FindBrowserWithWebContents(web_contents); return browser ? browser->window() : nullptr; } content::WebContents* GetCurrentWebContents( content::WebContents* web_contents) { Browser* browser = chrome::FindBrowserWithWebContents(web_contents); return browser ? browser->tab_strip_model()->GetActiveWebContents() : nullptr; } SharingDialogType GetSharingDialogType(bool has_devices, bool has_apps) { if (has_devices) return SharingDialogType::kDialogWithDevicesMaybeApps; if (has_apps) return SharingDialogType::kDialogWithoutDevicesWithApp; return SharingDialogType::kEducationalDialog; } } // namespace SharingUiController::SharingUiController(content::WebContents* web_contents) : web_contents_(web_contents), sharing_service_(SharingServiceFactory::GetForBrowserContext( web_contents->GetBrowserContext())) {} SharingUiController::~SharingUiController() = default; std::u16string SharingUiController::GetTitle(SharingDialogType dialog_type) { // We only handle error messages generically. DCHECK_EQ(SharingDialogType::kErrorDialog, dialog_type); switch (send_result()) { case SharingSendMessageResult::kDeviceNotFound: case SharingSendMessageResult::kNetworkError: case SharingSendMessageResult::kAckTimeout: case SharingSendMessageResult::kCommitTimeout: return l10n_util::GetStringFUTF16( IDS_BROWSER_SHARING_ERROR_DIALOG_TITLE_GENERIC_ERROR, base::ToLowerASCII(GetContentType())); case SharingSendMessageResult::kSuccessful: case SharingSendMessageResult::kCancelled: NOTREACHED(); FALLTHROUGH; case SharingSendMessageResult::kPayloadTooLarge: case SharingSendMessageResult::kInternalError: case SharingSendMessageResult::kEncryptionError: return l10n_util::GetStringFUTF16( IDS_BROWSER_SHARING_ERROR_DIALOG_TITLE_INTERNAL_ERROR, base::ToLowerASCII(GetContentType())); } } std::u16string SharingUiController::GetErrorDialogText() const { switch (send_result()) { case SharingSendMessageResult::kDeviceNotFound: return l10n_util::GetStringFUTF16( IDS_BROWSER_SHARING_ERROR_DIALOG_TEXT_DEVICE_NOT_FOUND, GetTargetDeviceName()); case SharingSendMessageResult::kCommitTimeout: case SharingSendMessageResult::kNetworkError: return l10n_util::GetStringUTF16( IDS_BROWSER_SHARING_ERROR_DIALOG_TEXT_NETWORK_ERROR); case SharingSendMessageResult::kAckTimeout: return l10n_util::GetStringFUTF16( IDS_BROWSER_SHARING_ERROR_DIALOG_TEXT_DEVICE_ACK_TIMEOUT, GetTargetDeviceName()); case SharingSendMessageResult::kSuccessful: case SharingSendMessageResult::kCancelled: return std::u16string(); case SharingSendMessageResult::kPayloadTooLarge: case SharingSendMessageResult::kInternalError: case SharingSendMessageResult::kEncryptionError: return l10n_util::GetStringUTF16( IDS_BROWSER_SHARING_ERROR_DIALOG_TEXT_INTERNAL_ERROR); } } void SharingUiController::OnDialogClosed(SharingDialog* dialog) { // Ignore already replaced dialogs. if (dialog != dialog_) return; dialog_ = nullptr; UpdateIcon(); } void SharingUiController::OnDialogShown(bool has_devices, bool has_apps) { if (on_dialog_shown_closure_for_testing_) std::move(on_dialog_shown_closure_for_testing_).Run(); } void SharingUiController::ClearLastDialog() { last_dialog_id_++; is_loading_ = false; send_result_ = SharingSendMessageResult::kSuccessful; CloseDialog(); } void SharingUiController::UpdateAndShowDialog( const base::Optional<url::Origin>& initiating_origin) { ClearLastDialog(); DoUpdateApps(base::BindOnce(&SharingUiController::OnAppsReceived, weak_ptr_factory_.GetWeakPtr(), last_dialog_id_, initiating_origin)); } std::vector<std::unique_ptr<syncer::DeviceInfo>> SharingUiController::GetDevices() const { return sharing_service_->GetDeviceCandidates(GetRequiredFeature()); } bool SharingUiController::HasSendFailed() const { return send_result_ != SharingSendMessageResult::kSuccessful; } void SharingUiController::MaybeShowErrorDialog() { if (HasSendFailed() && web_contents_ == GetCurrentWebContents(web_contents_)) ShowNewDialog(CreateDialogData(SharingDialogType::kErrorDialog)); } SharingDialogData SharingUiController::CreateDialogData( SharingDialogType dialog_type) { SharingDialogData data; data.type = dialog_type; data.prefix = GetFeatureMetricsPrefix(); data.title = GetTitle(data.type); data.error_text = GetErrorDialogText(); auto weak_ptr = weak_ptr_factory_.GetWeakPtr(); data.device_callback = base::BindOnce(&SharingUiController::OnDeviceChosen, weak_ptr); data.app_callback = base::BindOnce(&SharingUiController::OnAppChosen, weak_ptr); data.close_callback = base::BindOnce(&SharingUiController::OnDialogClosed, weak_ptr); return data; } base::OnceClosure SharingUiController::SendMessageToDevice( const syncer::DeviceInfo& device, base::Optional<base::TimeDelta> response_timeout, chrome_browser_sharing::SharingMessage sharing_message, base::Optional<SharingMessageSender::ResponseCallback> custom_callback) { last_dialog_id_++; is_loading_ = true; send_result_ = SharingSendMessageResult::kSuccessful; target_device_name_ = device.client_name(); UpdateIcon(); SharingMessageSender::ResponseCallback response_callback = base::BindOnce( &SharingUiController::OnResponse, weak_ptr_factory_.GetWeakPtr(), last_dialog_id_, std::move(custom_callback)); return sharing_service_->SendMessageToDevice( device, response_timeout.value_or( base::TimeDelta::FromSeconds(kSharingMessageTTLSeconds.Get())), std::move(sharing_message), std::move(response_callback)); } void SharingUiController::UpdateIcon() { BrowserWindow* window = GetWindowFromWebContents(web_contents_); if (!window) return; window->UpdatePageActionIcon(GetIconType()); } void SharingUiController::CloseDialog() { if (!dialog_) return; dialog_->Hide(); // SharingDialog::Hide may close the dialog asynchronously, and therefore not // call OnDialogClosed immediately. If that is the case, call OnDialogClosed // now to notify subclasses and clear |dialog_|. if (dialog_) OnDialogClosed(dialog_); DCHECK(!dialog_); } void SharingUiController::ShowNewDialog(SharingDialogData dialog_data) { CloseDialog(); BrowserWindow* window = GetWindowFromWebContents(web_contents_); if (!window) return; bool has_devices = !dialog_data.devices.empty(); bool has_apps = !dialog_data.apps.empty(); dialog_ = window->ShowSharingDialog(web_contents(), std::move(dialog_data)); UpdateIcon(); OnDialogShown(has_devices, has_apps); } std::u16string SharingUiController::GetTargetDeviceName() const { return base::UTF8ToUTF16(target_device_name_); } void SharingUiController::OnResponse( int dialog_id, base::Optional<SharingMessageSender::ResponseCallback> custom_callback, SharingSendMessageResult result, std::unique_ptr<chrome_browser_sharing::ResponseMessage> response) { if (custom_callback) std::move(custom_callback.value()).Run(result, std::move(response)); if (dialog_id != last_dialog_id_) return; is_loading_ = false; send_result_ = result; UpdateIcon(); } void SharingUiController::OnAppsReceived( int dialog_id, const base::Optional<url::Origin>& initiating_origin, std::vector<SharingApp> apps) { if (dialog_id != last_dialog_id_) return; auto devices = GetDevices(); SharingDialogData dialog_data = CreateDialogData(GetSharingDialogType(!devices.empty(), !apps.empty())); dialog_data.devices = std::move(devices); dialog_data.apps = std::move(apps); dialog_data.initiating_origin = initiating_origin; ShowNewDialog(std::move(dialog_data)); }
3,126
14,668
<reponame>zealoussnow/chromium // Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef COMPONENTS_CRASH_CONTENT_BROWSER_CHILD_PROCESS_CRASH_OBSERVER_ANDROID_H_ #define COMPONENTS_CRASH_CONTENT_BROWSER_CHILD_PROCESS_CRASH_OBSERVER_ANDROID_H_ #include "base/memory/scoped_refptr.h" #include "base/task/sequenced_task_runner.h" #include "components/crash/content/browser/child_exit_observer_android.h" namespace crash_reporter { // Records metrics and initiates minidump upload in response to child process // crashes. class ChildProcessCrashObserver : public crash_reporter::ChildExitObserver::Client { public: ChildProcessCrashObserver(); ChildProcessCrashObserver(const ChildProcessCrashObserver&) = delete; ChildProcessCrashObserver& operator=(const ChildProcessCrashObserver&) = delete; ~ChildProcessCrashObserver() override; // crash_reporter::ChildExitObserver::Client implementation: void OnChildExit(const ChildExitObserver::TerminationInfo& info) override; private: void OnChildExitImpl(const ChildExitObserver::TerminationInfo& info); scoped_refptr<base::SequencedTaskRunner> task_runner_; }; } // namespace crash_reporter #endif // COMPONENTS_CRASH_CONTENT_BROWSER_CHILD_PROCESS_CRASH_OBSERVER_ANDROID_H_
448
471
package ip_range_to_cidr; import java.util.*; import org.junit.*; import static org.junit.Assert.*; public class IPRangetoCIDR { /* IP range to CIDR https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing http://www.ipaddressguide.com/cidr https://stackoverflow.com/questions/33443914/how-to-convert-ip-address-range-to-cidr-in-java AirBnB Interview Question */ public class Solution { private long ipToLong(String strIP) { long[] ip = new long[4]; String[] ipSec = strIP.split("\\."); for (int k = 0; k < 4; k++) { ip[k] = Long.valueOf(ipSec[k]); } return (ip[0] << 24) + (ip[1] << 16) + (ip[2] << 8) + ip[3]; } private String longToIP(long longIP) { StringBuffer sb = new StringBuffer(""); sb.append(String.valueOf(longIP >>> 24)); sb.append("."); sb.append(String.valueOf((longIP & 0x00FFFFFF) >>> 16)); sb.append("."); sb.append(String.valueOf((longIP & 0x0000FFFF) >>> 8)); sb.append("."); sb.append(String.valueOf(longIP & 0x000000FF)); return sb.toString(); } public List<String> ipRange2Cidr(String startIp, int range) { // check parameters String a = ""; long start = ipToLong(startIp); long end = start + range - 1; List<String> res = new ArrayList<>(); while (start <= end) { // identify the location of first 1's from lower bit to higher bit of start IP // e.g. 00000001.00000001.00000001.01101100, return 4 (100) long locOfFirstOne = start & (-start); int curMask = 32 - (int) (Math.log(locOfFirstOne) / Math.log(2)); // calculate how many IP addresses between the start and end // e.g. between 172.16.31.10 and 172.16.31.10, there are 10 IP address // 3 bits to represent 8 IPs, from 172.16.58.3 to 172.16.31.10 (119 - 112 + 1 = 8) double currRange = Math.log(end - start + 1) / Math.log(2); int currRangeMask = 32 - (int) Math.floor(currRange); // why max? // if the currRangeMask is larger than curMask // which means the numbers of IPs from start to end is smaller than mask range // so we can't use as many as bits we want to mask the start IP to avoid exceed the end IP // Otherwise, if currRangeMask is smaller than curMask, which means number of IPs is larger than mask range // in this case we can use curMask to mask as many as IPs from start we want. curMask = Math.max(currRangeMask, curMask); // Add to results String ip = longToIP(start); res.add(ip + "/" + curMask); // We have already included 2^(32 - curMask) numbers of IP into result // So the next roundUp start must insert that number start += Math.pow(2, (32 - curMask)); } return res; } } public static class UnitTest { @Test public void test1() { Solution sol = new IPRangetoCIDR().new Solution(); List<String> res = sol.ipRange2Cidr("255.0.0.7", 10); assertEquals(3, res.size()); assertEquals("255.0.0.7/32", res.get(0)); assertEquals("255.0.0.8/29", res.get(1)); assertEquals("255.0.0.16/32", res.get(2)); res = sol.ipRange2Cidr("1.1.1.0", 4); assertEquals(1, res.size()); assertEquals("1.1.1.0/30", res.get(0)); res = sol.ipRange2Cidr("1.1.1.1", 4); assertEquals(3, res.size()); assertEquals("1.1.1.1/32", res.get(0)); assertEquals("1.1.1.2/31", res.get(1)); assertEquals("1.1.1.4/32", res.get(2)); } } }
2,021
3,075
<filename>tests/mockito/junit4/src/test/java/samples/powermockito/junit4/bugs/github716/B.java package samples.powermockito.junit4.bugs.github716; public class B { }
68
484
<gh_stars>100-1000 /* * Copyright Camunda Services GmbH and/or licensed to Camunda Services GmbH under * one or more contributor license agreements. See the NOTICE file distributed * with this work for additional information regarding copyright ownership. * Licensed under the Zeebe Community License 1.1. You may not use this file * except in compliance with the Zeebe Community License 1.1. */ package io.camunda.zeebe.dmn.impl; import io.camunda.zeebe.dmn.ParsedDecision; public final class ParsedDmnScalaDecision implements ParsedDecision { private final String decisionId; private final String decisionName; public ParsedDmnScalaDecision(final String decisionId, final String decisionName) { this.decisionId = decisionId; this.decisionName = decisionName; } @Override public String getName() { return decisionName; } @Override public String getId() { return decisionId; } }
266
892
<reponame>westonsteimel/advisory-database-github { "schema_version": "1.2.0", "id": "GHSA-v2gf-33gc-47jc", "modified": "2022-04-30T18:16:12Z", "published": "2022-04-30T18:16:12Z", "aliases": [ "CVE-2001-0513" ], "details": "Oracle listener process on Windows NT redirects connection requests to another port and creates a separate thread to process the request, which allows remote attackers to cause a denial of service by repeatedly connecting to the Oracle listener but not connecting to the redirected port.", "severity": [ ], "affected": [ ], "references": [ { "type": "ADVISORY", "url": "https://nvd.nist.gov/vuln/detail/CVE-2001-0513" }, { "type": "WEB", "url": "https://exchange.xforce.ibmcloud.com/vulnerabilities/6717" }, { "type": "WEB", "url": "http://www.kb.cert.org/vuls/id/105259" }, { "type": "WEB", "url": "http://www.osvdb.org/5600" }, { "type": "WEB", "url": "http://xforce.iss.net/alerts/advise81.php" } ], "database_specific": { "cwe_ids": [ ], "severity": "MODERATE", "github_reviewed": false } }
510
772
{ "banner": [ "/**", " * UI-Router Extras: Sticky states, Future States, Deep State Redirect, Transition promise", " * <%= module %>", " * @version <%= pkg.version %>", " * @link http://christopherthielen.github.io/ui-router-extras/", " * @license MIT License, http://www.opensource.org/licenses/MIT", " */" ], "minbanner": [ "/** UI-Router Extras v.<%= pkg.version %> <%= module %> http://christopherthielen.github.io/ui-router-extras/ - MIT License */" ] }
202
1,749
<filename>test/fwd_test.cpp<gh_stars>1000+ // Copyright 2015, <NAME> and the FunctionalPlus contributors. // https://github.com/Dobiasd/FunctionalPlus // Distributed under the Boost Software License, Version 1.0. // (See accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) #include <doctest/doctest.h> #include <fplus/fplus.hpp> namespace { typedef std::vector<int> IntVector; bool is_odd_int(int x) { return (x % 2 != 0); } bool is_even_int(int x) { return (x % 2 == 0); } int times_3(int x) { return 3 * x; } int as_string_length(int i) { return static_cast<int>(std::to_string(i).size()); } const auto times_3_lambda = [](int x){return times_3(x);}; const auto is_odd_int_lambda = [](int x){return is_odd_int(x);}; const auto as_string_length_lambda = [](int x){return as_string_length(x);}; int (*times_3_fn_ptr)(int) = &times_3; struct times_3_struct { int operator() (const int x) { return times_3(x); } static int sttcMemF(int x) { return times_3(x); } }; std::function<int(int)> times_3_std_function = times_3_lambda; } TEST_CASE("fwd_test - apply") { using namespace fplus; const auto result_old_style = sum( transform(as_string_length, drop_if(is_odd_int, transform(times_3, numbers(0, 10))))); const auto result_new_style = fwd::apply( numbers(0, 10) , fwd::transform(times_3) , fwd::drop_if(is_odd_int) , fwd::transform(as_string_length) , fwd::sum()); REQUIRE_EQ(result_old_style, result_new_style); } TEST_CASE("fwd_test - compose") { using namespace fplus; const auto function_chain_old_style = compose( bind_1st_of_2(transform<decltype(times_3), const std::vector<int>&, std::vector<int>>, times_3), bind_1st_of_2(drop_if<decltype(is_odd_int), const std::vector<int>&>, is_odd_int), bind_1st_of_2(transform<decltype(as_string_length_lambda), const std::vector<int>&>, as_string_length_lambda), sum<std::vector<int>>); const auto function_chain_new_style = fwd::compose( fwd::transform(times_3), fwd::drop_if(is_odd_int_lambda), fwd::transform(as_string_length), fwd::sum()); const auto xs = numbers(0, 10); REQUIRE_EQ(function_chain_old_style(xs), function_chain_new_style(xs)); } TEST_CASE("fwd_test - and_then_maybe") { using namespace fplus; const auto sqrtToMaybeInt = [](int x) -> fplus::maybe<int> { return x < 0 ? fplus::nothing<int>() : fplus::just(fplus::round(sqrt(static_cast<float>(x)))); }; REQUIRE_EQ( fwd::apply(just(4) , fwd::and_then_maybe(sqrtToMaybeInt)) , just(2)); } TEST_CASE("fwd_test - fold_left") { using namespace fplus; const auto fold_result_old_style = fold_left(std::plus<int>(), 0, numbers(0, 10)); const auto fold_result_new_style = fwd::apply( numbers(0, 10) , fwd::fold_left(std::plus<int>(), 0)); REQUIRE_EQ(fold_result_old_style, fold_result_new_style); } TEST_CASE("fwd_test - transform_nested") { using namespace fplus; typedef std::vector<int> ints; const std::vector<ints> nested_ints = {{1,2,3},{4,5,6}}; const auto nested_transformed_old_style = transform( bind_1st_of_2(transform<decltype(times_3), const std::vector<int>&, std::vector<int>>, times_3), nested_ints); const auto nested_transformed_new_style = fwd::apply( nested_ints , fwd::transform(fwd::transform(times_3_lambda))); REQUIRE_EQ(nested_transformed_old_style, nested_transformed_new_style); } TEST_CASE("fwd_test - different_function_types_apply") { using namespace fplus; const std::vector<int> xs = {1,2,3}; const auto result = transform(times_3, xs); REQUIRE_EQ(fwd::apply(xs, fwd::transform(times_3)), result); REQUIRE_EQ(fwd::apply(xs, fwd::transform(times_3_lambda)), result); REQUIRE_EQ(fwd::apply(xs, fwd::transform(times_3_std_function)), result); REQUIRE_EQ(fwd::apply(xs, fwd::transform(times_3_fn_ptr)), result); REQUIRE_EQ(fwd::apply(xs, fwd::transform(&times_3_struct::sttcMemF)), result); REQUIRE_EQ(fwd::apply(xs, fwd::transform(times_3_struct())), result); } TEST_CASE("fwd_test - different_function_types_compose") { using namespace fplus; const std::vector<int> xs = {1,2,3}; const auto result = transform(times_3, transform(times_3, xs)); REQUIRE_EQ(fwd::transform(fwd::compose(times_3, times_3))(xs), result); REQUIRE_EQ(fwd::transform(fwd::compose(times_3_lambda, times_3_lambda))(xs), result); REQUIRE_EQ(fwd::transform(fwd::compose(times_3_std_function, times_3_std_function))(xs), result); REQUIRE_EQ(fwd::transform(fwd::compose(&times_3_struct::sttcMemF, &times_3_struct::sttcMemF))(xs), result); REQUIRE_EQ(fwd::transform(fwd::compose(times_3_fn_ptr, times_3_fn_ptr))(xs), result); } std::list<int> collatz_seq(int x) { std::list<int> result; while (x > 1) { result.push_back(x); if (x % 2 == 0) x = x / 2; else x = 3 * x + 1; } result.push_back(x); return result; } TEST_CASE("fwd_test - collatz") { using namespace fplus; auto collatz_dict = fwd::apply( fplus::numbers<int>(0, 20) , fwd::create_map_with(fwd::compose( collatz_seq, fwd::show_cont_with(" => "))) ); } TEST_CASE("fwd_test - fwd_flip") { using namespace fplus; std::vector<std::vector<std::size_t>> idxs = {{0,1,2}, {2,0}}; const std::vector<int> xs = {0,10,20}; const std::vector<int> ys = fwd::transform_and_concat(fwd::flip::elems_at_idxs(xs))(idxs); const std::vector<int> result = {0,10,20,20,0}; REQUIRE_EQ(ys, result); } TEST_CASE("fwd_test - keep_if") { const std::vector<int> v = { 1, 2, 3, 2, 4, 5 }; auto result = fplus::fwd::keep_if(is_even_int)(v); REQUIRE_EQ(result, std::vector<int>({2, 2, 4})); } TEST_CASE("fwd_test - keep_if_r_value") { auto result = fplus::fwd::keep_if(is_even_int)(std::vector<int>({1,2,3,2,4,5})); REQUIRE_EQ(result, std::vector<int>({2, 2, 4})); } TEST_CASE("fwd_test - zip_with") { using namespace fplus; const auto multiply_int = [](int x, int y) -> int { return x * y; }; const auto multiply_generic = [](auto x, auto y){ return x * y; }; IntVector xs = {1,2,3,4,2}; IntVector ys = {2,2,3,1}; IntVector xs_mult_ys = {2,4,9,4}; REQUIRE_EQ(fwd::zip_with(multiply_int, ys)(xs), xs_mult_ys); REQUIRE_EQ(fwd::zip_with(multiply_generic, ys)(xs), xs_mult_ys); } TEST_CASE("fwd_test - append") { using namespace fplus; IntVector xs = {1,2,3,4,2}; IntVector ys = {2,2,3,1}; IntVector xs_append_ys = {1,2,3,4,2,2,2,3,1}; REQUIRE_EQ(fwd::append(xs)(ys), xs_append_ys); }
3,223
3,461
<gh_stars>1000+ [{ "twitterHandle": "@jackmcdade", "githubUsername": "jackmcdade", "githubUserId": "44739", "since": "Nov 2019" }]
58
994
package name.caiyao.microreader.presenter; /** * Created by 蔡小木 on 2016/4/24 0024. */ public interface ISettingPresenter extends BasePresenter { void checkUpdate(); }
62
335
{ "word": "Quinoline", "definitions": [ "A pungent oily liquid present in coal tar." ], "parts-of-speech": "Noun" }
64
669
<reponame>lchang20/onnxruntime // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #include "core/providers/cuda/shared_inc/fast_divmod.h" namespace onnxruntime { namespace cuda { template <typename T1, typename T2> void InstanceNormImpl( cudaStream_t stream, const T1* input_data, const T1* scale, const T1* bias, const T2* mean, const T2* variance, const double variance_correction, const double epsilon, const fast_divmod& fdm_HW, const fast_divmod& fdm_C, T1* output_data, size_t count); } // namespace cuda } // namespace onnxruntime
254
2,053
/* * Copyright 2015 the original author or authors. * @https://github.com/scouter-project/scouter * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package scouterx.webapp.framework.client.net; import lombok.Getter; import lombok.Setter; import lombok.extern.slf4j.Slf4j; import scouter.io.DataInputX; import scouter.io.DataOutputX; import scouter.lang.pack.MapPack; import scouter.lang.pack.Pack; import scouter.lang.value.Value; import scouter.net.RequestCmd; import scouter.net.TcpFlag; import scouterx.webapp.framework.client.server.Server; import scouterx.webapp.framework.client.server.ServerManager; import scouterx.webapp.framework.exception.ErrorState; import scouterx.webapp.framework.configure.ConfigureAdaptor; import scouterx.webapp.framework.configure.ConfigureManager; import java.io.IOException; import java.net.InetAddress; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.stream.Collectors; @Slf4j public class TcpProxy implements AutoCloseable { private static final ConfigureAdaptor conf = ConfigureManager.getConfigure(); private final ClientTCP tcp = new ClientTCP(); private Server server; @Getter @Setter private long lastUsed; protected TcpProxy() { } protected TcpProxy(int serverId) { this.server = ServerManager.getInstance().getServer(serverId); log.info("TcpProxy created : pool-size:{}, {}", server.getConnectionPool().getCurrentPoolSize(), this); } public static synchronized TcpProxy getTcpProxy(final Server server) { Server _server = ServerManager.getInstance().getServerIfNullDefault(server); if (_server == null || _server.isOpen() == false) { throw ErrorState.COLLECTOR_NOT_CONNECTED.newBizException("server is not exist or before initializing! - " + ((server == null) ? "null" : String.valueOf(server.getId()))); } ConnectionPool pool = _server.getConnectionPool(); TcpProxy tcpProxy = pool.getTcpProxy(); return tcpProxy != null ? tcpProxy : new TcpProxy(_server.getId()); } public static synchronized TcpProxy getTcpProxy(int serverId) { return getTcpProxy(ServerManager.getInstance().getServer(serverId)); } public static synchronized void close(TcpProxy t) { if (t == null) return; try { t.close(); } catch (Throwable throwable) { } } protected ClientTCP getClientTcp() { return tcp; } public Server getServer() { return this.server; } public synchronized void open() { if (tcp.connected() == false) { tcp.open(this.server.getId()); if (tcp.connected() == false) { server.setOpen(false); } else { server.setOpen(true); } } } public synchronized void realClose() { String lastStack = ""; if (conf.isTrace()) { lastStack = Arrays.stream(new Exception().getStackTrace()).map(StackTraceElement::toString).limit(6).collect(Collectors.joining("\\n ")); } log.info("TcpProxy closed : pool-size:{}, {}, stack:{}", server.getConnectionPool().getCurrentPoolSize(), this, lastStack); sendClose(); tcp.close(); } @Override protected void finalize() throws Throwable { tcp.close(); } public Pack getSingle(String cmd, Pack param) { List<Pack> values = process(cmd, param); if (values == null || values.size() == 0) return null; else return values.get(0); } public List<Pack> process(String cmd, Pack param) { final List<Pack> list = new ArrayList<Pack>(); process(cmd, param, in -> { Pack p = in.readPack(); list.add(p); }); return list; } public Value getSingleValue(String cmd, Pack param) { List<Value> values = processValues(cmd, param); if (values == null || values.size() == 0) return null; else return values.get(0); } public Value getSingleValue(String cmd, Value param) { List<Value> values = processValues(cmd, param); if (values == null || values.size() == 0) return null; else return values.get(0); } public List<Value> processValues(String cmd, Value param) { final List<Value> list = new ArrayList<Value>(); process(cmd, param, in -> { Value v = in.readValue(); list.add(v); }); return list; } public List<Value> processValues(String cmd, Pack param) { final List<Value> list = new ArrayList<Value>(); process(cmd, param, in -> { Value v = in.readValue(); list.add(v); }); return list; } public boolean isValid() { return tcp.connected(); } public synchronized void process(String cmd, Object param, INetReader recv) { open(); if (tcp.connected() == false) { throw ErrorState.CLIENT_SOCKET_CLOSED.newBizException("[TcpProxy.process] client socket closed."); } long session = this.server.getSession(); DataOutputX out = tcp.getOutput(); DataInputX in = tcp.getInput(); try { out.writeText(cmd); out.writeLong(session); if (param instanceof Value) { out.writeValue((Value) param); } else if (param instanceof Pack) { out.writePack((Pack) param); } out.flush(); byte resFlag; while ((resFlag = in.readByte()) == TcpFlag.HasNEXT) { recv.process(in); } if (resFlag == TcpFlag.INVALID_SESSION) { server.setSession(0); // SessionObserver will relogin tcp.close(); throw ErrorState.COLLECTOR_INVALID_SESSION.newBizException(); } } catch (Throwable e) { tcp.close(); throw new RuntimeException(e); } } public synchronized void sendClose() { if (tcp.connected() == false) { return; } DataOutputX out = tcp.getOutput(); try { out.writeText(RequestCmd.CLOSE); out.flush(); } catch (Exception e) { } } public static MapPack loginByCleanConnection(int serverId, MapPack param) throws IOException { TcpProxy proxy = new TcpProxy(serverId); proxy.open(); if (proxy.isValid() == false) { return null; } param.put("ip", proxy.getLocalInetAddress().getHostAddress()); DataOutputX out = proxy.getClientTcp().getOutput(); DataInputX in = proxy.getClientTcp().getInput(); try { out.writeText(RequestCmd.LOGIN); out.writeLong(0); out.writePack(param); out.flush(); MapPack pack = null; while (in.readByte() == TcpFlag.HasNEXT) { pack = (MapPack) in.readPack(); } return pack; } finally { proxy.realClose(); } } public InetAddress getLocalInetAddress() { return tcp.getSocket().getLocalAddress(); } @Override public void close() { ConnectionPool pool = this.getServer().getConnectionPool(); if (this.isValid()) { pool.put(this); } else { this.realClose(); } } }
3,479
1,350
<gh_stars>1000+ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. package com.azure.security.attestation.implementation.models; import com.azure.core.util.logging.ClientLogger; import com.azure.core.util.serializer.JacksonAdapter; import com.azure.core.util.serializer.SerializerAdapter; import com.azure.core.util.serializer.SerializerEncoding; import com.azure.security.attestation.models.AttestationOpenIdMetadata; import com.fasterxml.jackson.annotation.JsonProperty; import com.nimbusds.jose.util.JSONObjectUtils; import java.io.IOException; import java.util.LinkedHashMap; public class AttestationOpenIdMetadataImpl implements AttestationOpenIdMetadata { @JsonProperty(value = "jwks_uri") private String jwksUri; @JsonProperty(value = "issuer") private String issuer; @JsonProperty(value = "response_types_supported") private String[] responseTypesSupported; @JsonProperty(value = "id_token_signing_alg_values_supported") private String[] tokenSigningAlgorithmsSupported; @JsonProperty(value = "claims_supported") private String[] supportedClaims; @Override public String getJsonWebKeySetUrl() { return jwksUri; } @Override public String getIssuer() { return issuer; } @Override public String[] getResponseTypesSupported() { return responseTypesSupported.clone(); } @Override public String[] getTokenSigningAlgorithmsSupported() { return tokenSigningAlgorithmsSupported.clone(); } @Override public String[] getSupportedClaims() { return supportedClaims.clone(); } public static AttestationOpenIdMetadata fromGenerated(Object generated) { ClientLogger logger = new ClientLogger(AttestationOpenIdMetadataImpl.class); SerializerAdapter serializerAdapter = new JacksonAdapter(); AttestationOpenIdMetadataImpl metadataImpl; try { @SuppressWarnings("unchecked") String generatedString = JSONObjectUtils.toJSONString((LinkedHashMap<String, Object>) generated); metadataImpl = serializerAdapter.deserialize(generatedString, AttestationOpenIdMetadataImpl.class, SerializerEncoding.JSON); } catch (IOException e) { throw logger.logExceptionAsError(new RuntimeException(e.getMessage())); } return metadataImpl; } }
816
651
package org.lwjglb.engine.graph; import java.util.ArrayList; import org.lwjglb.engine.graph.lights.PointLight; import org.lwjglb.engine.graph.lights.DirectionalLight; import java.util.List; import java.util.Map; import org.joml.Matrix4f; import org.joml.Vector3f; import org.joml.Vector4f; import static org.lwjgl.opengl.GL11.*; import static org.lwjgl.opengl.GL13.*; import static org.lwjgl.opengl.GL14.*; import static org.lwjgl.opengl.GL30.*; import org.lwjglb.engine.items.GameItem; import org.lwjglb.engine.Scene; import org.lwjglb.engine.SceneLight; import org.lwjglb.engine.items.SkyBox; import org.lwjglb.engine.Utils; import org.lwjglb.engine.Window; import org.lwjglb.engine.graph.anim.AnimGameItem; import org.lwjglb.engine.graph.anim.AnimatedFrame; import org.lwjglb.engine.graph.particles.IParticleEmitter; import org.lwjglb.engine.graph.shadow.ShadowCascade; import org.lwjglb.engine.graph.shadow.ShadowRenderer; import org.lwjglb.engine.loaders.assimp.StaticMeshesLoader; public class Renderer { private final Transformation transformation; private final ShadowRenderer shadowRenderer; private ShaderProgram skyBoxShaderProgram; private ShaderProgram particlesShaderProgram; private ShaderProgram gBufferShaderProgram; private ShaderProgram dirLightShaderProgram; private ShaderProgram pointLightShaderProgram; private ShaderProgram fogShaderProgram; private final float specularPower; private final FrustumCullingFilter frustumFilter; private final List<GameItem> filteredItems; private GBuffer gBuffer; private SceneBuffer sceneBuffer; private Mesh bufferPassMesh; private Matrix4f bufferPassModelMatrix; private Vector4f tmpVec; public Renderer() { transformation = new Transformation(); specularPower = 10f; shadowRenderer = new ShadowRenderer(); frustumFilter = new FrustumCullingFilter(); filteredItems = new ArrayList<>(); tmpVec = new Vector4f(); } public void init(Window window) throws Exception { shadowRenderer.init(window); gBuffer = new GBuffer(window); sceneBuffer = new SceneBuffer(window); setupSkyBoxShader(); setupParticlesShader(); setupGeometryShader(); setupDirLightShader(); setupPointLightShader(); setupFogShader(); bufferPassModelMatrix = new Matrix4f(); bufferPassMesh = StaticMeshesLoader.load("models/buffer_pass_mess.obj", "models")[0]; } public void render(Window window, Camera camera, Scene scene, boolean sceneChanged) { clear(); if (window.getOptions().frustumCulling) { frustumFilter.updateFrustum(window.getProjectionMatrix(), camera.getViewMatrix()); frustumFilter.filter(scene.getGameMeshes()); frustumFilter.filter(scene.getGameInstancedMeshes()); } // Render depth map before view ports has been set up if (scene.isRenderShadows() && sceneChanged) { shadowRenderer.render(window, scene, camera, transformation, this); } glViewport(0, 0, window.getWidth(), window.getHeight()); // Update projection matrix once per render cycle window.updateProjectionMatrix(); renderGeometry(window, camera, scene); initLightRendering(); renderPointLights(window, camera, scene); renderDirectionalLight(window, camera, scene); endLightRendering(); renderFog(window, camera, scene); renderSkyBox(window, camera, scene); renderParticles(window, camera, scene); } private void setupParticlesShader() throws Exception { particlesShaderProgram = new ShaderProgram(); particlesShaderProgram.createVertexShader(Utils.loadResource("/shaders/particles_vertex.vs")); particlesShaderProgram.createFragmentShader(Utils.loadResource("/shaders/particles_fragment.fs")); particlesShaderProgram.link(); particlesShaderProgram.createUniform("viewMatrix"); particlesShaderProgram.createUniform("projectionMatrix"); particlesShaderProgram.createUniform("texture_sampler"); particlesShaderProgram.createUniform("numCols"); particlesShaderProgram.createUniform("numRows"); } private void setupSkyBoxShader() throws Exception { skyBoxShaderProgram = new ShaderProgram(); skyBoxShaderProgram.createVertexShader(Utils.loadResource("/shaders/sb_vertex.vs")); skyBoxShaderProgram.createFragmentShader(Utils.loadResource("/shaders/sb_fragment.fs")); skyBoxShaderProgram.link(); // Create uniforms for projection matrix skyBoxShaderProgram.createUniform("projectionMatrix"); skyBoxShaderProgram.createUniform("modelViewMatrix"); skyBoxShaderProgram.createUniform("texture_sampler"); skyBoxShaderProgram.createUniform("ambientLight"); skyBoxShaderProgram.createUniform("colour"); skyBoxShaderProgram.createUniform("hasTexture"); skyBoxShaderProgram.createUniform("depthsText"); skyBoxShaderProgram.createUniform("screenSize"); } private void setupGeometryShader() throws Exception { gBufferShaderProgram = new ShaderProgram(); gBufferShaderProgram.createVertexShader(Utils.loadResource("/shaders/gbuffer_vertex.vs")); gBufferShaderProgram.createFragmentShader(Utils.loadResource("/shaders/gbuffer_fragment.fs")); gBufferShaderProgram.link(); gBufferShaderProgram.createUniform("projectionMatrix"); gBufferShaderProgram.createUniform("viewMatrix"); gBufferShaderProgram.createUniform("texture_sampler"); gBufferShaderProgram.createUniform("normalMap"); gBufferShaderProgram.createMaterialUniform("material"); gBufferShaderProgram.createUniform("isInstanced"); gBufferShaderProgram.createUniform("modelNonInstancedMatrix"); gBufferShaderProgram.createUniform("selectedNonInstanced"); gBufferShaderProgram.createUniform("jointsMatrix"); gBufferShaderProgram.createUniform("numCols"); gBufferShaderProgram.createUniform("numRows"); // Create uniforms for shadow mapping for (int i = 0; i < ShadowRenderer.NUM_CASCADES; i++) { gBufferShaderProgram.createUniform("shadowMap_" + i); } gBufferShaderProgram.createUniform("orthoProjectionMatrix", ShadowRenderer.NUM_CASCADES); gBufferShaderProgram.createUniform("lightViewMatrix", ShadowRenderer.NUM_CASCADES); gBufferShaderProgram.createUniform("cascadeFarPlanes", ShadowRenderer.NUM_CASCADES); gBufferShaderProgram.createUniform("renderShadow"); } private void setupDirLightShader() throws Exception { dirLightShaderProgram = new ShaderProgram(); dirLightShaderProgram.createVertexShader(Utils.loadResource("/shaders/light_vertex.vs")); dirLightShaderProgram.createFragmentShader(Utils.loadResource("/shaders/dir_light_fragment.fs")); dirLightShaderProgram.link(); dirLightShaderProgram.createUniform("modelMatrix"); dirLightShaderProgram.createUniform("projectionMatrix"); dirLightShaderProgram.createUniform("screenSize"); dirLightShaderProgram.createUniform("positionsText"); dirLightShaderProgram.createUniform("diffuseText"); dirLightShaderProgram.createUniform("specularText"); dirLightShaderProgram.createUniform("normalsText"); dirLightShaderProgram.createUniform("shadowText"); dirLightShaderProgram.createUniform("specularPower"); dirLightShaderProgram.createUniform("ambientLight"); dirLightShaderProgram.createDirectionalLightUniform("directionalLight"); } private void setupPointLightShader() throws Exception { pointLightShaderProgram = new ShaderProgram(); pointLightShaderProgram.createVertexShader(Utils.loadResource("/shaders/light_vertex.vs")); pointLightShaderProgram.createFragmentShader(Utils.loadResource("/shaders/point_light_fragment.fs")); pointLightShaderProgram.link(); pointLightShaderProgram.createUniform("modelMatrix"); pointLightShaderProgram.createUniform("projectionMatrix"); pointLightShaderProgram.createUniform("screenSize"); pointLightShaderProgram.createUniform("positionsText"); pointLightShaderProgram.createUniform("diffuseText"); pointLightShaderProgram.createUniform("specularText"); pointLightShaderProgram.createUniform("normalsText"); pointLightShaderProgram.createUniform("shadowText"); pointLightShaderProgram.createUniform("specularPower"); pointLightShaderProgram.createPointLightUniform("pointLight"); } private void setupFogShader() throws Exception { fogShaderProgram = new ShaderProgram(); fogShaderProgram.createVertexShader(Utils.loadResource("/shaders/light_vertex.vs")); fogShaderProgram.createFragmentShader(Utils.loadResource("/shaders/fog_fragment.fs")); fogShaderProgram.link(); fogShaderProgram.createUniform("modelMatrix"); fogShaderProgram.createUniform("viewMatrix"); fogShaderProgram.createUniform("projectionMatrix"); fogShaderProgram.createUniform("screenSize"); fogShaderProgram.createUniform("positionsText"); fogShaderProgram.createUniform("depthText"); fogShaderProgram.createUniform("sceneText"); fogShaderProgram.createFogUniform("fog"); fogShaderProgram.createUniform("ambientLight"); fogShaderProgram.createUniform("lightColour"); fogShaderProgram.createUniform("lightIntensity"); } public void clear() { glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT); } private void renderGeometry(Window window, Camera camera, Scene scene) { // Render G-Buffer for writing glBindFramebuffer(GL_DRAW_FRAMEBUFFER, gBuffer.getGBufferId()); clear(); glDisable(GL_BLEND); gBufferShaderProgram.bind(); Matrix4f viewMatrix = camera.getViewMatrix(); Matrix4f projectionMatrix = window.getProjectionMatrix(); gBufferShaderProgram.setUniform("viewMatrix", viewMatrix); gBufferShaderProgram.setUniform("projectionMatrix", projectionMatrix); gBufferShaderProgram.setUniform("texture_sampler", 0); gBufferShaderProgram.setUniform("normalMap", 1); List<ShadowCascade> shadowCascades = shadowRenderer.getShadowCascades(); for (int i = 0; i < ShadowRenderer.NUM_CASCADES; i++) { ShadowCascade shadowCascade = shadowCascades.get(i); gBufferShaderProgram.setUniform("orthoProjectionMatrix", shadowCascade.getOrthoProjMatrix(), i); gBufferShaderProgram.setUniform("cascadeFarPlanes", ShadowRenderer.CASCADE_SPLITS[i], i); gBufferShaderProgram.setUniform("lightViewMatrix", shadowCascade.getLightViewMatrix(), i); } shadowRenderer.bindTextures(GL_TEXTURE2); int start = 2; for (int i = 0; i < ShadowRenderer.NUM_CASCADES; i++) { gBufferShaderProgram.setUniform("shadowMap_" + i, start + i); } gBufferShaderProgram.setUniform("renderShadow", scene.isRenderShadows() ? 1 : 0); renderNonInstancedMeshes(scene); renderInstancedMeshes(scene, viewMatrix); gBufferShaderProgram.unbind(); glEnable(GL_BLEND); } private void initLightRendering() { // Bind scene buffer glBindFramebuffer(GL_FRAMEBUFFER, sceneBuffer.getBufferId()); // Clear G-Buffer clear(); // Disable depth testing to allow the drawing of multiple layers with the same depth glDisable(GL_DEPTH_TEST); glEnable(GL_BLEND); glBlendEquation(GL_FUNC_ADD); glBlendFunc(GL_ONE, GL_ONE); // Bind GBuffer for reading glBindFramebuffer(GL_READ_FRAMEBUFFER, gBuffer.getGBufferId()); } private void endLightRendering() { // Bind screen for writing glBindFramebuffer(GL_FRAMEBUFFER, 0); glEnable(GL_DEPTH_TEST); glDisable(GL_BLEND); } private void renderPointLights(Window window, Camera camera, Scene scene) { pointLightShaderProgram.bind(); Matrix4f viewMatrix = camera.getViewMatrix(); Matrix4f projectionMatrix = window.getProjectionMatrix(); pointLightShaderProgram.setUniform("modelMatrix", bufferPassModelMatrix); pointLightShaderProgram.setUniform("projectionMatrix", projectionMatrix); // Specular factor pointLightShaderProgram.setUniform("specularPower", specularPower); // Bind the G-Buffer textures int[] textureIds = this.gBuffer.getTextureIds(); int numTextures = textureIds != null ? textureIds.length : 0; for (int i=0; i<numTextures; i++) { glActiveTexture(GL_TEXTURE0 + i); glBindTexture(GL_TEXTURE_2D, textureIds[i]); } pointLightShaderProgram.setUniform("positionsText", 0); pointLightShaderProgram.setUniform("diffuseText", 1); pointLightShaderProgram.setUniform("specularText", 2); pointLightShaderProgram.setUniform("normalsText", 3); pointLightShaderProgram.setUniform("shadowText", 4); pointLightShaderProgram.setUniform("screenSize", (float) gBuffer.getWidth(), (float)gBuffer.getHeight()); SceneLight sceneLight = scene.getSceneLight(); PointLight[] pointLights = sceneLight.getPointLightList(); int numPointLights = pointLights != null ? pointLights.length : 0; for(int i=0; i<numPointLights; i++) { // Get a copy of the point light object and transform its position to view coordinates PointLight currPointLight = new PointLight(pointLights[i]); Vector3f lightPos = currPointLight.getPosition(); tmpVec.set(lightPos, 1); tmpVec.mul(viewMatrix); lightPos.x = tmpVec.x; lightPos.y = tmpVec.y; lightPos.z = tmpVec.z; pointLightShaderProgram.setUniform("pointLight", currPointLight); bufferPassMesh.render(); } pointLightShaderProgram.unbind(); } private void renderDirectionalLight(Window window, Camera camera, Scene scene) { dirLightShaderProgram.bind(); Matrix4f viewMatrix = camera.getViewMatrix(); Matrix4f projectionMatrix = window.getProjectionMatrix(); dirLightShaderProgram.setUniform("modelMatrix", bufferPassModelMatrix); dirLightShaderProgram.setUniform("projectionMatrix", projectionMatrix); // Specular factor dirLightShaderProgram.setUniform("specularPower", specularPower); // Bind the G-Buffer textures int[] textureIds = this.gBuffer.getTextureIds(); int numTextures = textureIds != null ? textureIds.length : 0; for (int i=0; i<numTextures; i++) { glActiveTexture(GL_TEXTURE0 + i); glBindTexture(GL_TEXTURE_2D, textureIds[i]); } dirLightShaderProgram.setUniform("positionsText", 0); dirLightShaderProgram.setUniform("diffuseText", 1); dirLightShaderProgram.setUniform("specularText", 2); dirLightShaderProgram.setUniform("normalsText", 3); dirLightShaderProgram.setUniform("shadowText", 4); dirLightShaderProgram.setUniform("screenSize", (float) gBuffer.getWidth(), (float)gBuffer.getHeight()); // Ambient light SceneLight sceneLight = scene.getSceneLight(); dirLightShaderProgram.setUniform("ambientLight", sceneLight.getAmbientLight()); // Directional light // Get a copy of the directional light object and transform its position to view coordinates DirectionalLight currDirLight = new DirectionalLight(sceneLight.getDirectionalLight()); tmpVec.set(currDirLight.getDirection(), 0); tmpVec.mul(viewMatrix); currDirLight.setDirection(new Vector3f(tmpVec.x, tmpVec.y, tmpVec.z)); dirLightShaderProgram.setUniform("directionalLight", currDirLight); bufferPassMesh.render(); dirLightShaderProgram.unbind(); } private void renderFog(Window window, Camera camera, Scene scene) { fogShaderProgram.bind(); Matrix4f viewMatrix = camera.getViewMatrix(); Matrix4f projectionMatrix = window.getProjectionMatrix(); fogShaderProgram.setUniform("modelMatrix", bufferPassModelMatrix); fogShaderProgram.setUniform("viewMatrix", viewMatrix); fogShaderProgram.setUniform("projectionMatrix", projectionMatrix); // Bind the scene buffer texture and the the depth texture of the G-Buffer glActiveTexture(GL_TEXTURE0); glBindTexture(GL_TEXTURE_2D, gBuffer.getPositionTexture()); glActiveTexture(GL_TEXTURE1); glBindTexture(GL_TEXTURE_2D, gBuffer.getDepthTexture()); glActiveTexture(GL_TEXTURE2); glBindTexture(GL_TEXTURE_2D, sceneBuffer.getTextureId()); fogShaderProgram.setUniform("positionsText", 0); fogShaderProgram.setUniform("depthText", 1); fogShaderProgram.setUniform("sceneText", 2); fogShaderProgram.setUniform("screenSize", (float) window.getWidth(), (float)window.getHeight()); fogShaderProgram.setUniform("fog", scene.getFog()); SceneLight sceneLight = scene.getSceneLight(); fogShaderProgram.setUniform("ambientLight", sceneLight.getAmbientLight()); DirectionalLight dirLight = sceneLight.getDirectionalLight(); fogShaderProgram.setUniform("lightColour", dirLight.getColor()); fogShaderProgram.setUniform("lightIntensity", dirLight.getIntensity()); bufferPassMesh.render(); fogShaderProgram.unbind(); } private void renderParticles(Window window, Camera camera, Scene scene) { // Support for transparencies glEnable(GL_BLEND); glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); particlesShaderProgram.bind(); Matrix4f viewMatrix = camera.getViewMatrix(); particlesShaderProgram.setUniform("viewMatrix", viewMatrix); particlesShaderProgram.setUniform("texture_sampler", 0); Matrix4f projectionMatrix = window.getProjectionMatrix(); particlesShaderProgram.setUniform("projectionMatrix", projectionMatrix); IParticleEmitter[] emitters = scene.getParticleEmitters(); int numEmitters = emitters != null ? emitters.length : 0; glDepthMask(false); glBlendFunc(GL_SRC_ALPHA, GL_ONE); for (int i = 0; i < numEmitters; i++) { IParticleEmitter emitter = emitters[i]; InstancedMesh mesh = (InstancedMesh) emitter.getBaseParticle().getMesh(); Texture text = mesh.getMaterial().getTexture(); particlesShaderProgram.setUniform("numCols", text.getNumCols()); particlesShaderProgram.setUniform("numRows", text.getNumRows()); mesh.renderListInstanced(emitter.getParticles(), true, transformation, viewMatrix); } glDisable(GL_BLEND); glDepthMask(true); particlesShaderProgram.unbind(); } private void renderSkyBox(Window window, Camera camera, Scene scene) { SkyBox skyBox = scene.getSkyBox(); if (skyBox != null) { skyBoxShaderProgram.bind(); skyBoxShaderProgram.setUniform("texture_sampler", 0); Matrix4f projectionMatrix = window.getProjectionMatrix(); skyBoxShaderProgram.setUniform("projectionMatrix", projectionMatrix); Matrix4f viewMatrix = camera.getViewMatrix(); float m30 = viewMatrix.m30(); viewMatrix.m30(0); float m31 = viewMatrix.m31(); viewMatrix.m31(0); float m32 = viewMatrix.m32(); viewMatrix.m32(0); Mesh mesh = skyBox.getMesh(); Matrix4f modelViewMatrix = transformation.buildModelViewMatrix(skyBox, viewMatrix); skyBoxShaderProgram.setUniform("modelViewMatrix", modelViewMatrix); skyBoxShaderProgram.setUniform("ambientLight", scene.getSceneLight().getSkyBoxLight()); skyBoxShaderProgram.setUniform("colour", mesh.getMaterial().getDiffuseColour()); skyBoxShaderProgram.setUniform("hasTexture", mesh.getMaterial().isTextured() ? 1 : 0); glActiveTexture(GL_TEXTURE1); glBindTexture(GL_TEXTURE_2D, gBuffer.getDepthTexture()); skyBoxShaderProgram.setUniform("screenSize", (float)window.getWidth(), (float)window.getHeight()); skyBoxShaderProgram.setUniform("depthsText", 1); mesh.render(); viewMatrix.m30(m30); viewMatrix.m31(m31); viewMatrix.m32(m32); skyBoxShaderProgram.unbind(); } } private void renderNonInstancedMeshes(Scene scene) { gBufferShaderProgram.setUniform("isInstanced", 0); // Render each mesh with the associated game Items Map<Mesh, List<GameItem>> mapMeshes = scene.getGameMeshes(); for (Mesh mesh : mapMeshes.keySet()) { gBufferShaderProgram.setUniform("material", mesh.getMaterial()); Texture text = mesh.getMaterial().getTexture(); if (text != null) { gBufferShaderProgram.setUniform("numCols", text.getNumCols()); gBufferShaderProgram.setUniform("numRows", text.getNumRows()); } mesh.renderList(mapMeshes.get(mesh), (GameItem gameItem) -> { gBufferShaderProgram.setUniform("selectedNonInstanced", gameItem.isSelected() ? 1.0f : 0.0f); Matrix4f modelMatrix = transformation.buildModelMatrix(gameItem); gBufferShaderProgram.setUniform("modelNonInstancedMatrix", modelMatrix); if (gameItem instanceof AnimGameItem) { AnimGameItem animGameItem = (AnimGameItem) gameItem; AnimatedFrame frame = animGameItem.getCurrentAnimation().getCurrentFrame(); gBufferShaderProgram.setUniform("jointsMatrix", frame.getJointMatrices()); } } ); } } private void renderInstancedMeshes(Scene scene, Matrix4f viewMatrix) { gBufferShaderProgram.setUniform("isInstanced", 1); // Render each mesh with the associated game Items Map<InstancedMesh, List<GameItem>> mapMeshes = scene.getGameInstancedMeshes(); for (InstancedMesh mesh : mapMeshes.keySet()) { Texture text = mesh.getMaterial().getTexture(); if (text != null) { gBufferShaderProgram.setUniform("numCols", text.getNumCols()); gBufferShaderProgram.setUniform("numRows", text.getNumRows()); } gBufferShaderProgram.setUniform("material", mesh.getMaterial()); filteredItems.clear(); for (GameItem gameItem : mapMeshes.get(mesh)) { if (gameItem.isInsideFrustum()) { filteredItems.add(gameItem); } } mesh.renderListInstanced(filteredItems, transformation, viewMatrix); } } public void cleanup() { if (shadowRenderer != null) { shadowRenderer.cleanup(); } if (skyBoxShaderProgram != null) { skyBoxShaderProgram.cleanup(); } if (particlesShaderProgram != null) { particlesShaderProgram.cleanup(); } if (gBufferShaderProgram != null) { gBufferShaderProgram.cleanup(); } if (dirLightShaderProgram != null) { dirLightShaderProgram.cleanup(); } if (pointLightShaderProgram != null) { pointLightShaderProgram.cleanup(); } if (gBuffer != null) { gBuffer.cleanUp(); } if (bufferPassMesh != null) { bufferPassMesh.cleanUp(); } } }
9,613
386
/** @file BNCApplication+BNCTest.h @package Branch-SDK-Tests @brief Expose BNCApplication interfaces for testing. @author <NAME> @date May 4, 2018 @copyright Copyright © 2018 Branch. All rights reserved. */ #import <Foundation/Foundation.h> #import "BNCApplication.h" @interface BNCApplication (BNCTest) - (void) setAppOriginalInstallDate:(NSDate*)originalInstallDate firstInstallDate:(NSDate*)firstInstallDate lastUpdateDate:(NSDate*)lastUpdateDate; @end
202
313
<reponame>corindwyer/titus-control-plane /* * Copyright 2018 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.titus.master.jobmanager.service.common.action; import java.util.Optional; import java.util.function.Supplier; import com.netflix.titus.api.jobmanager.model.job.Job; import com.netflix.titus.api.jobmanager.model.job.JobFunctions; import com.netflix.titus.api.jobmanager.model.job.Task; import com.netflix.titus.api.jobmanager.model.job.TaskState; import com.netflix.titus.common.framework.reconciler.EntityHolder; import com.netflix.titus.common.util.retry.Retryer; import com.netflix.titus.common.util.retry.Retryers; import com.netflix.titus.common.util.time.Clock; /** * A set of primitive functions for dealing with task retry rules. Each task is associated at its creation time * with a {@link Retryer} instance. This instance determines the retry delay time in case the current task fails. * The delay is measured from the time when the task moved to Finished state. * The resubmit delay time is computed and added to the task reference model, at the very same moment as the task * state change is recorded in it. Resubmit delay is also added to task context, as a hint to external clients why * the resubmit process is delayed. */ public class TaskRetryers { public static final String ATTR_TASK_RETRY = "retryer"; public static final String ATTR_TASK_RETRY_DELAY_MS = "retryDelayMs"; public static Optional<Retryer> getCurrentTaskRetryer(EntityHolder taskHolder) { return Optional.ofNullable((Retryer) taskHolder.getAttributes().get(ATTR_TASK_RETRY)); } public static Retryer getNextTaskRetryer(Supplier<Retryer> systemRetryer, Job<?> job, EntityHolder taskHolder) { return getCurrentTaskRetryer(taskHolder) .map(Retryer::retry) .orElseGet(() -> Retryers.max(systemRetryer.get(), JobFunctions.retryer(job))); } public static long getCurrentRetryerDelayMs(EntityHolder taskHolder, long minRetryIntervalMs, long taskRetryerResetTimeMs, Clock clock) { return getCurrentTaskRetryer(taskHolder).map(retryer -> { long timeInStartedState = JobFunctions.getTimeInState(taskHolder.getEntity(), TaskState.Started, clock).orElse(0L); return timeInStartedState >= taskRetryerResetTimeMs ? 0L : Math.max(minRetryIntervalMs, retryer.getDelayMs().orElse(minRetryIntervalMs)); }).orElse(minRetryIntervalMs); } public static boolean shouldRetryNow(EntityHolder taskHolder, Clock clock) { long delayMs = (long) taskHolder.getAttributes().getOrDefault(ATTR_TASK_RETRY_DELAY_MS, 0L); if (delayMs == 0) { return true; } Task task = taskHolder.getEntity(); long delayUntil = task.getStatus().getTimestamp() + delayMs; return delayUntil <= clock.wallTime(); } }
1,164
4,324
/* Copyright JS Foundation and other contributors, http://js.foundation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef ECMA_LINE_INFO_H #define ECMA_LINE_INFO_H /** \addtogroup ecma ECMA * @{ * * \addtogroup ecmalineinfo Line info * @{ */ #if JERRY_LINE_INFO #include "ecma-globals.h" /** * Increase the current value of line or column. */ #define ECMA_LINE_INFO_INCREASE 0x0 /** * Decrease the current value of line or column. */ #define ECMA_LINE_INFO_DECREASE 0x1 /** * Line update is present. */ #define ECMA_LINE_INFO_HAS_LINE 0x1 /** * A default value for columns after a line update. */ #define ECMA_LINE_INFO_COLUMN_DEFAULT 127 /** * Vlq encoding: flag which is set for all bytes except the last one. */ #define ECMA_LINE_INFO_VLQ_CONTINUE 0x80 /** * Vlq encoding: mask to decode the number fragment. */ #define ECMA_LINE_INFO_VLQ_MASK 0x7f /** * Vlq encoding: number of bits stored in a byte. */ #define ECMA_LINE_INFO_VLQ_SHIFT 7 /** * Small encoding: a value which represents a two byte long number. */ #define ECMA_LINE_INFO_ENCODE_TWO_BYTE (UINT8_MAX - 1) /** * Small encoding: minimum value of an encoded two byte long number. */ #define ECMA_LINE_INFO_ENCODE_TWO_BYTE_MIN (UINT8_MAX - 1) /** * Small encoding: a value which represents a three byte long number. */ #define ECMA_LINE_INFO_ENCODE_VLQ UINT8_MAX /** * Small encoding: minimum value of an encoded three byte long number. */ #define ECMA_LINE_INFO_ENCODE_VLQ_MIN (ECMA_LINE_INFO_ENCODE_TWO_BYTE_MIN + UINT8_MAX + 1) /** * Maximum number of line/column entries stored in a stream. */ #define ECMA_LINE_INFO_STREAM_VALUE_COUNT_MAX 48 /** * Minimum size of a stream (except the last one). */ #define ECMA_LINE_INFO_STREAM_SIZE_MIN ((2 * ECMA_LINE_INFO_STREAM_VALUE_COUNT_MAX) - 1) /* Helper functions for parser/js/js-parser-line-info-create.c. */ uint32_t ecma_line_info_decode_vlq (uint8_t **buffer_p); uint32_t ecma_line_info_difference_update (uint32_t current_value, uint32_t difference_value); /* General functions. */ void ecma_line_info_free (uint8_t *line_info_p); void ecma_line_info_get (uint8_t *line_info_p, uint32_t offset, jerry_frame_location_t *location_p); #if JERRY_PARSER_DUMP_BYTE_CODE void ecma_line_info_dump (uint8_t *line_info_p); #endif /* JERRY_PARSER_DUMP_BYTE_CODE */ #endif /* ECMA_LINE_INFO_H */ /** * @} * @} */ #endif /* !ECMA_LINE_INFO_H */
1,042
4,612
from twisted.internet import reactor def gotIP(ip): print("IP of 'localhost' is", ip) reactor.stop() reactor.resolve("localhost").addCallback(gotIP) reactor.run()
60
416
// // MTLVertexDescriptor.h // Metal // // Copyright (c) 2014 Apple Inc. All rights reserved. // #import <Metal/MTLDefines.h> #import <Metal/MTLDevice.h> NS_ASSUME_NONNULL_BEGIN /*! @enum MTLVertexFormat @abstract specifies how the vertex attribute data is laid out in memory. */ typedef NS_ENUM(NSUInteger, MTLVertexFormat) { MTLVertexFormatInvalid = 0, MTLVertexFormatUChar2 = 1, MTLVertexFormatUChar3 = 2, MTLVertexFormatUChar4 = 3, MTLVertexFormatChar2 = 4, MTLVertexFormatChar3 = 5, MTLVertexFormatChar4 = 6, MTLVertexFormatUChar2Normalized = 7, MTLVertexFormatUChar3Normalized = 8, MTLVertexFormatUChar4Normalized = 9, MTLVertexFormatChar2Normalized = 10, MTLVertexFormatChar3Normalized = 11, MTLVertexFormatChar4Normalized = 12, MTLVertexFormatUShort2 = 13, MTLVertexFormatUShort3 = 14, MTLVertexFormatUShort4 = 15, MTLVertexFormatShort2 = 16, MTLVertexFormatShort3 = 17, MTLVertexFormatShort4 = 18, MTLVertexFormatUShort2Normalized = 19, MTLVertexFormatUShort3Normalized = 20, MTLVertexFormatUShort4Normalized = 21, MTLVertexFormatShort2Normalized = 22, MTLVertexFormatShort3Normalized = 23, MTLVertexFormatShort4Normalized = 24, MTLVertexFormatHalf2 = 25, MTLVertexFormatHalf3 = 26, MTLVertexFormatHalf4 = 27, MTLVertexFormatFloat = 28, MTLVertexFormatFloat2 = 29, MTLVertexFormatFloat3 = 30, MTLVertexFormatFloat4 = 31, MTLVertexFormatInt = 32, MTLVertexFormatInt2 = 33, MTLVertexFormatInt3 = 34, MTLVertexFormatInt4 = 35, MTLVertexFormatUInt = 36, MTLVertexFormatUInt2 = 37, MTLVertexFormatUInt3 = 38, MTLVertexFormatUInt4 = 39, MTLVertexFormatInt1010102Normalized = 40, MTLVertexFormatUInt1010102Normalized = 41, MTLVertexFormatUChar4Normalized_BGRA API_AVAILABLE(macos(10.13), ios(11.0)) = 42, MTLVertexFormatUChar API_AVAILABLE(macos(10.13), ios(11.0)) = 45, MTLVertexFormatChar API_AVAILABLE(macos(10.13), ios(11.0)) = 46, MTLVertexFormatUCharNormalized API_AVAILABLE(macos(10.13), ios(11.0)) = 47, MTLVertexFormatCharNormalized API_AVAILABLE(macos(10.13), ios(11.0)) = 48, MTLVertexFormatUShort API_AVAILABLE(macos(10.13), ios(11.0)) = 49, MTLVertexFormatShort API_AVAILABLE(macos(10.13), ios(11.0)) = 50, MTLVertexFormatUShortNormalized API_AVAILABLE(macos(10.13), ios(11.0)) = 51, MTLVertexFormatShortNormalized API_AVAILABLE(macos(10.13), ios(11.0)) = 52, MTLVertexFormatHalf API_AVAILABLE(macos(10.13), ios(11.0)) = 53, } API_AVAILABLE(macos(10.11), ios(8.0)); typedef NS_ENUM(NSUInteger, MTLVertexStepFunction) { MTLVertexStepFunctionConstant = 0, MTLVertexStepFunctionPerVertex = 1, MTLVertexStepFunctionPerInstance = 2, MTLVertexStepFunctionPerPatch API_AVAILABLE(macos(10.12), ios(10.0)) = 3, MTLVertexStepFunctionPerPatchControlPoint API_AVAILABLE(macos(10.12), ios(10.0)) = 4, } API_AVAILABLE(macos(10.11), ios(8.0)); MTL_EXPORT API_AVAILABLE(macos(10.11), ios(8.0)) @interface MTLVertexBufferLayoutDescriptor : NSObject <NSCopying> @property (assign, nonatomic) NSUInteger stride; @property (assign, nonatomic) MTLVertexStepFunction stepFunction; @property (assign, nonatomic) NSUInteger stepRate; @end MTL_EXPORT API_AVAILABLE(macos(10.11), ios(8.0)) @interface MTLVertexBufferLayoutDescriptorArray : NSObject - (MTLVertexBufferLayoutDescriptor *)objectAtIndexedSubscript:(NSUInteger)index; - (void)setObject:(nullable MTLVertexBufferLayoutDescriptor *)bufferDesc atIndexedSubscript:(NSUInteger)index; @end MTL_EXPORT API_AVAILABLE(macos(10.11), ios(8.0)) @interface MTLVertexAttributeDescriptor : NSObject <NSCopying> @property (assign, nonatomic) MTLVertexFormat format; @property (assign, nonatomic) NSUInteger offset; @property (assign, nonatomic) NSUInteger bufferIndex; @end MTL_EXPORT API_AVAILABLE(macos(10.11), ios(8.0)) @interface MTLVertexAttributeDescriptorArray : NSObject - (MTLVertexAttributeDescriptor *)objectAtIndexedSubscript:(NSUInteger)index; - (void)setObject:(nullable MTLVertexAttributeDescriptor *)attributeDesc atIndexedSubscript:(NSUInteger)index; @end /* MTLVertexDescriptor */ MTL_EXPORT API_AVAILABLE(macos(10.11), ios(8.0)) @interface MTLVertexDescriptor : NSObject <NSCopying> + (MTLVertexDescriptor *)vertexDescriptor; @property (readonly) MTLVertexBufferLayoutDescriptorArray *layouts; @property (readonly) MTLVertexAttributeDescriptorArray *attributes; - (void)reset; @end NS_ASSUME_NONNULL_END
1,953
1,379
// // AppDelegate.h // PulsingHaloDemo // // Created by shuichi on 12/5/13. // Copyright (c) 2013 <NAME>. All rights reserved. // #import <UIKit/UIKit.h> @interface AppDelegate : UIResponder <UIApplicationDelegate> @property (strong, nonatomic) UIWindow *window; @end
106
678
<gh_stars>100-1000 /** * This header is generated by class-dump-z 0.2b. * * Source: /System/Library/PrivateFrameworks/Conference.framework/Conference */ #import <Conference/XXUnknownSuperclass.h> @interface CNFUIUtilities : XXUnknownSuperclass { } + (int)statusForState:(unsigned)state; // 0xce49 + (id)statusStringForContactName:(id)contactName status:(int)status useFloatingHUD:(BOOL)hud; // 0xcff1 + (id)statusStringForState:(unsigned)state; // 0x369c1 + (id)formattedPhoneNumberFromString:(id)string; // 0x36b4d + (id)faceTimeDisplayNameForDestination:(id)destination image:(id *)image; // 0x36b25 + (id)faceTimeDisplayNameForDestination:(id)destination image:(id *)image fullscreenImage:(id *)image3 isPhoneNumber:(BOOL *)number; // 0xc121 + (id)currentCallDurationString; // 0x36a01 + (int)interfaceOrientationForDeviceOrientation:(int)deviceOrientation; // 0x14169 @end
306