max_stars_count
int64
301
224k
text
stringlengths
6
1.05M
token_count
int64
3
727k
1,025
<reponame>lnc441401369/lnc.github.io package com.myblog.websocket; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.web.socket.TextMessage; import org.springframework.web.socket.WebSocketSession; import org.springframework.web.socket.handler.TextWebSocketHandler; import javax.websocket.Session; import java.util.concurrent.CopyOnWriteArraySet; /** * @author Zephery * @since 2018/1/15 19:44 */ public class KafkaWebSocket extends TextWebSocketHandler { //logger private static final Logger logger = LoggerFactory.getLogger(KafkaWebSocket.class); public static CopyOnWriteArraySet<KafkaWebSocket> wbSockets = new CopyOnWriteArraySet<>(); //此处定义静态变量,以在其他方法中获取到所有连接 private Session session; //接收文本消息,并发送出去 @Override public void handleTextMessage(WebSocketSession session, TextMessage message) throws Exception { while (true) { super.handleTextMessage(session, message); } } }
403
14,668
// Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "ash/login/ui/note_action_launch_button.h" #include <memory> #include <vector> #include "ash/login/ui/login_test_base.h" #include "ash/login/ui/views_utils.h" #include "ash/public/mojom/tray_action.mojom.h" #include "ash/shell.h" #include "ash/tray_action/test_tray_action_client.h" #include "base/memory/ptr_util.h" #include "base/time/time.h" #include "ui/events/test/event_generator.h" #include "ui/gfx/geometry/point.h" #include "ui/gfx/geometry/rect.h" #include "ui/gfx/geometry/vector2d.h" #include "ui/views/layout/box_layout.h" #include "ui/views/view.h" #include "ui/views/widget/widget.h" namespace ash { namespace { // The note action button bubble sizes: constexpr int kLargeButtonRadiusDp = 56; constexpr int kSmallButtonRadiusDp = 48; constexpr float kSqrt2 = 1.4142; } // namespace class NoteActionLaunchButtonTest : public LoginTestBase { public: NoteActionLaunchButtonTest() = default; NoteActionLaunchButtonTest(const NoteActionLaunchButtonTest&) = delete; NoteActionLaunchButtonTest& operator=(const NoteActionLaunchButtonTest&) = delete; ~NoteActionLaunchButtonTest() override = default; void SetUp() override { LoginTestBase::SetUp(); Shell::Get()->tray_action()->SetClient( tray_action_client_.CreateRemoteAndBind(), mojom::TrayActionState::kAvailable); } TestTrayActionClient* tray_action_client() { return &tray_action_client_; } void PerformClick(const gfx::Point& point) { ui::test::EventGenerator* generator = GetEventGenerator(); generator->MoveMouseTo(point.x(), point.y()); generator->ClickLeftButton(); Shell::Get()->tray_action()->FlushMojoForTesting(); } void GestureFling(const gfx::Point& start, const gfx::Point& end) { ui::test::EventGenerator* generator = GetEventGenerator(); generator->GestureScrollSequence(start, end, base::Milliseconds(10), 2); Shell::Get()->tray_action()->FlushMojoForTesting(); } private: TestTrayActionClient tray_action_client_; }; // Verifies that note action button is not visible if lock screen note taking // is not enabled. TEST_F(NoteActionLaunchButtonTest, VisibilityActionNotAvailable) { auto note_action_button = std::make_unique<NoteActionLaunchButton>( mojom::TrayActionState::kNotAvailable); EXPECT_FALSE(note_action_button->GetVisible()); } // Verifies that note action button is shown and enabled if lock screen note // taking is available. TEST_F(NoteActionLaunchButtonTest, VisibilityActionAvailable) { auto note_action_button = std::make_unique<NoteActionLaunchButton>( mojom::TrayActionState::kAvailable); NoteActionLaunchButton::TestApi test_api(note_action_button.get()); EXPECT_TRUE(note_action_button->GetVisible()); EXPECT_TRUE(note_action_button->GetEnabled()); EXPECT_TRUE(test_api.ActionButtonView()->GetVisible()); EXPECT_TRUE(test_api.ActionButtonView()->GetEnabled()); EXPECT_TRUE(test_api.BackgroundView()->GetVisible()); } // Tests that clicking Enter while lock screen action button is focused requests // a new note action. TEST_F(NoteActionLaunchButtonTest, KeyboardTest) { auto* note_action_button = new NoteActionLaunchButton(mojom::TrayActionState::kAvailable); std::unique_ptr<views::Widget> widget = CreateWidgetWithContent(login_views_utils::WrapViewForPreferredSize( base::WrapUnique(note_action_button)) .release()); NoteActionLaunchButton::TestApi test_api(note_action_button); note_action_button->RequestFocus(); // Focusing the whole note action launch button view should give the image // button sub-view the focus. EXPECT_TRUE(test_api.ActionButtonView()->HasFocus()); ui::test::EventGenerator* generator = GetEventGenerator(); generator->PressKey(ui::KeyboardCode::VKEY_RETURN, ui::EF_NONE); Shell::Get()->tray_action()->FlushMojoForTesting(); EXPECT_EQ(std::vector<mojom::LockScreenNoteOrigin>( {mojom::LockScreenNoteOrigin::kLockScreenButtonKeyboard}), tray_action_client()->note_origins()); } // The button hit area is expected to be a circle centered in the top right // corner of the view with kSmallButtonRadiusDp (and clipped but the view // bounds). The test verifies clicking the button within the button's hit area // requests a new note action. TEST_F(NoteActionLaunchButtonTest, ClickTest) { auto* note_action_button = new NoteActionLaunchButton(mojom::TrayActionState::kAvailable); std::unique_ptr<views::Widget> widget = CreateWidgetWithContent(login_views_utils::WrapViewForPreferredSize( base::WrapUnique(note_action_button)) .release()); const gfx::Size action_size = note_action_button->GetPreferredSize(); EXPECT_EQ(gfx::Size(kLargeButtonRadiusDp, kLargeButtonRadiusDp), action_size); const gfx::Rect view_bounds = note_action_button->GetBoundsInScreen(); ASSERT_EQ(gfx::Rect(gfx::Point(), action_size), view_bounds); const std::vector<mojom::LockScreenNoteOrigin> expected_actions = { mojom::LockScreenNoteOrigin::kLockScreenButtonTap}; // Point near the center of the view, inside the actionable area: PerformClick(view_bounds.top_right() + gfx::Vector2d(-kSmallButtonRadiusDp / kSqrt2 + 2, kSmallButtonRadiusDp / kSqrt2 - 2)); EXPECT_EQ(expected_actions, tray_action_client()->note_origins()); tray_action_client()->ClearRecordedRequests(); // Point near the center of the view, outside the actionable area: PerformClick(view_bounds.top_right() + gfx::Vector2d(-kSmallButtonRadiusDp / kSqrt2 - 2, kSmallButtonRadiusDp / kSqrt2 + 2)); EXPECT_TRUE(tray_action_client()->note_origins().empty()); tray_action_client()->ClearRecordedRequests(); // Point near the top right corner: PerformClick(view_bounds.top_right() + gfx::Vector2d(-2, 2)); EXPECT_EQ(expected_actions, tray_action_client()->note_origins()); tray_action_client()->ClearRecordedRequests(); // Point near the bottom left corner: PerformClick(view_bounds.bottom_left() + gfx::Vector2d(2, -2)); EXPECT_TRUE(tray_action_client()->note_origins().empty()); tray_action_client()->ClearRecordedRequests(); // Point near the origin: PerformClick(view_bounds.origin() + gfx::Vector2d(2, 2)); EXPECT_TRUE(tray_action_client()->note_origins().empty()); tray_action_client()->ClearRecordedRequests(); // Point near the origin of the actionable area bounds (inside the bounds): PerformClick(view_bounds.top_right() + gfx::Vector2d(-kSmallButtonRadiusDp + 2, 2)); EXPECT_EQ(expected_actions, tray_action_client()->note_origins()); tray_action_client()->ClearRecordedRequests(); // Point near the origin of the actionable area bounds (outside the bounds): PerformClick(view_bounds.top_right() + gfx::Vector2d(-kSmallButtonRadiusDp - 2, 2)); EXPECT_TRUE(tray_action_client()->note_origins().empty()); tray_action_client()->ClearRecordedRequests(); // Point near the bottom right corner: PerformClick(view_bounds.bottom_right() + gfx::Vector2d(0, -2)); EXPECT_TRUE(tray_action_client()->note_origins().empty()); tray_action_client()->ClearRecordedRequests(); // Point near the bottom right corner of the actionable area bounds (inside // the bounds): PerformClick(view_bounds.top_right() + gfx::Vector2d(-2, kSmallButtonRadiusDp - 2)); EXPECT_EQ(expected_actions, tray_action_client()->note_origins()); tray_action_client()->ClearRecordedRequests(); // Point near the bottom right corner of the actionable area bounds (outside // the bounds): PerformClick(view_bounds.top_right() + gfx::Vector2d(-2, kSmallButtonRadiusDp + 2)); EXPECT_TRUE(tray_action_client()->note_origins().empty()); tray_action_client()->ClearRecordedRequests(); // Point near the bottom edge: PerformClick(view_bounds.bottom_left() + gfx::Vector2d(kSmallButtonRadiusDp / 2, -1)); EXPECT_TRUE(tray_action_client()->note_origins().empty()); tray_action_client()->ClearRecordedRequests(); // Point near the top edge: PerformClick(view_bounds.origin() + gfx::Vector2d(kSmallButtonRadiusDp / 2, 1)); EXPECT_EQ(expected_actions, tray_action_client()->note_origins()); tray_action_client()->ClearRecordedRequests(); // Point near the left edge: PerformClick(view_bounds.origin() + gfx::Vector2d(1, kSmallButtonRadiusDp / 2)); EXPECT_TRUE(tray_action_client()->note_origins().empty()); tray_action_client()->ClearRecordedRequests(); // Point near the right edge: PerformClick(view_bounds.top_right() + gfx::Vector2d(-1, kSmallButtonRadiusDp / 2)); EXPECT_EQ(expected_actions, tray_action_client()->note_origins()); tray_action_client()->ClearRecordedRequests(); // Point in the center of the actionable area: PerformClick( view_bounds.top_right() + gfx::Vector2d(-kSmallButtonRadiusDp / 2, kSmallButtonRadiusDp / 2)); EXPECT_EQ(expected_actions, tray_action_client()->note_origins()); tray_action_client()->ClearRecordedRequests(); // Point outside the view bounds: PerformClick(view_bounds.top_right() + gfx::Vector2d(2, 2)); EXPECT_TRUE(tray_action_client()->note_origins().empty()); tray_action_client()->ClearRecordedRequests(); } // Tests tap gesture in and outside of the note action launch button. TEST_F(NoteActionLaunchButtonTest, TapTest) { auto* note_action_button = new NoteActionLaunchButton(mojom::TrayActionState::kAvailable); std::unique_ptr<views::Widget> widget = CreateWidgetWithContent(login_views_utils::WrapViewForPreferredSize( base::WrapUnique(note_action_button)) .release()); const gfx::Size action_size = note_action_button->GetPreferredSize(); EXPECT_EQ(gfx::Size(kLargeButtonRadiusDp, kLargeButtonRadiusDp), action_size); const gfx::Rect view_bounds = note_action_button->GetBoundsInScreen(); ASSERT_EQ(gfx::Rect(gfx::Point(), action_size), view_bounds); const std::vector<mojom::LockScreenNoteOrigin> expected_actions = { mojom::LockScreenNoteOrigin::kLockScreenButtonTap}; ui::test::EventGenerator* generator = GetEventGenerator(); // Tap in actionable area of the button requests action: generator->GestureTapAt(view_bounds.top_right() + gfx::Vector2d(-kSmallButtonRadiusDp / kSqrt2 + 2, kSmallButtonRadiusDp / kSqrt2 - 2)); Shell::Get()->tray_action()->FlushMojoForTesting(); EXPECT_EQ(expected_actions, tray_action_client()->note_origins()); tray_action_client()->ClearRecordedRequests(); // Tap in non-actionable area of the button does not request action: generator->GestureTapAt(view_bounds.top_right() + gfx::Vector2d(-kSmallButtonRadiusDp / kSqrt2 - 2, kSmallButtonRadiusDp / kSqrt2 + 2)); Shell::Get()->tray_action()->FlushMojoForTesting(); EXPECT_TRUE(tray_action_client()->note_origins().empty()); tray_action_client()->ClearRecordedRequests(); } // Tests a number of fling gestures that interact with the note action button. // Verifies that only a fling from the button's actionable area to bottom right // direction generate an action request. TEST_F(NoteActionLaunchButtonTest, FlingGesture) { auto* note_action_button = new NoteActionLaunchButton(mojom::TrayActionState::kAvailable); std::unique_ptr<views::Widget> widget = CreateWidgetWithContent(login_views_utils::WrapViewForPreferredSize( base::WrapUnique(note_action_button)) .release()); const gfx::Size action_size = note_action_button->GetPreferredSize(); EXPECT_EQ(gfx::Size(kLargeButtonRadiusDp, kLargeButtonRadiusDp), action_size); // Offset note action button closer to the center of the test widget, to give // extra space for performing gestures. gfx::Rect view_bounds = note_action_button->GetBoundsInScreen(); view_bounds.Offset(200, 200); note_action_button->SetBoundsRect(view_bounds); ASSERT_EQ(gfx::Rect(gfx::Point(200, 200), action_size), note_action_button->GetBoundsInScreen()); const std::vector<mojom::LockScreenNoteOrigin> expected_actions = { mojom::LockScreenNoteOrigin::kLockScreenButtonSwipe}; // Point in the center of the note action element's actionable area: gfx::Point start = view_bounds.top_right() + gfx::Vector2d(-kSmallButtonRadiusDp / 2, kSmallButtonRadiusDp / 2); // Fling from the center of the actionable area to bottom left: GestureFling(start, view_bounds.bottom_left() + gfx::Vector2d(-50, 50)); EXPECT_EQ(expected_actions, tray_action_client()->note_origins()); tray_action_client()->ClearRecordedRequests(); // Fling from the center of the actionable area to bottom right: GestureFling(start, view_bounds.bottom_right() + gfx::Vector2d(0, 50)); EXPECT_TRUE(tray_action_client()->note_origins().empty()); // Fling from the center of the actionable area to top left: GestureFling(start, view_bounds.origin() + gfx::Vector2d(-50, 0)); EXPECT_TRUE(tray_action_client()->note_origins().empty()); // Fling accross the button: GestureFling(view_bounds.top_right() + gfx::Vector2d(25, -25), view_bounds.bottom_left() + gfx::Vector2d(-25, 25)); EXPECT_TRUE(tray_action_client()->note_origins().empty()); // Fling from non-actionable area of the button: GestureFling(view_bounds.top_right() + gfx::Vector2d(-kSmallButtonRadiusDp / kSqrt2 - 2, kSmallButtonRadiusDp / kSqrt2 + 2), view_bounds.bottom_left() + gfx::Vector2d(-25, 25)); EXPECT_TRUE(tray_action_client()->note_origins().empty()); } // Generates multi-finger fling in the direction that would be accepted for // single finger fling, and verifies no action is requested. TEST_F(NoteActionLaunchButtonTest, MultiFingerFling) { auto* note_action_button = new NoteActionLaunchButton(mojom::TrayActionState::kAvailable); std::unique_ptr<views::Widget> widget = CreateWidgetWithContent(login_views_utils::WrapViewForPreferredSize( base::WrapUnique(note_action_button)) .release()); const gfx::Size action_size = note_action_button->GetPreferredSize(); EXPECT_EQ(gfx::Size(kLargeButtonRadiusDp, kLargeButtonRadiusDp), action_size); // Offset note action button closer to the center of the test widget, to give // extra space for performing gestures: gfx::Rect view_bounds = note_action_button->GetBoundsInScreen(); view_bounds.Offset(200, 200); note_action_button->SetBoundsRect(view_bounds); ASSERT_EQ(gfx::Rect(gfx::Point(200, 200), action_size), note_action_button->GetBoundsInScreen()); const int kTouchPoints = 3; const gfx::Point start_points[kTouchPoints] = { view_bounds.top_right() + gfx::Vector2d(-2, 2), view_bounds.top_right() + gfx::Vector2d(-20, 15), view_bounds.top_right() + gfx::Vector2d(-35, 40)}; const gfx::Vector2d deltas[kTouchPoints] = {gfx::Vector2d(-100, 100), gfx::Vector2d(-100, 100), gfx::Vector2d(-100, 100)}; int delays_adding_fingers_ms[kTouchPoints] = {0, 4, 8}; int delays_releasing_fingers_ms[kTouchPoints] = {20, 16, 18}; ui::test::EventGenerator* generator = GetEventGenerator(); generator->GestureMultiFingerScrollWithDelays( kTouchPoints, start_points, deltas, delays_adding_fingers_ms, delays_releasing_fingers_ms, 4 /* event_separaation_time_ms*/, 5 /*steps*/); Shell::Get()->tray_action()->FlushMojoForTesting(); EXPECT_TRUE(tray_action_client()->note_origins().empty()); } } // namespace ash
6,100
2,453
// // Generated by class-dump 3.5 (64 bit) (Debug version compiled Sep 30 2020 21:18:12). // // Copyright (C) 1997-2019 <NAME>. // #import <objc/NSObject.h> @class DVTDeviceType, NSSet; @interface _DVTDeviceTypeTreeNode : NSObject { NSSet *_derivedDeviceTypes; _DVTDeviceTypeTreeNode *_parentNode; DVTDeviceType *_deviceType; } - (void).cxx_destruct; @property(readonly) NSSet *derivedDeviceTypes; // @synthesize derivedDeviceTypes=_derivedDeviceTypes; @property(readonly) DVTDeviceType *deviceType; // @synthesize deviceType=_deviceType; @property(readonly) __weak _DVTDeviceTypeTreeNode *parentNode; // @synthesize parentNode=_parentNode; - (void)setDerivedDeviceTypes:(id)arg1; - (id)initWithParentNode:(id)arg1 deviceType:(id)arg2; @end
281
9,724
<reponame>yunku2002/emscripten #pragma once #include <GLES2/gl2.h> #include "webgl_api.h" WEBGL_APICALL void GL_APIENTRY emscripten_glActiveTexture (GLenum texture); WEBGL_APICALL void GL_APIENTRY emscripten_glAttachShader (GLuint program, GLuint shader); WEBGL_APICALL void GL_APIENTRY emscripten_glBindAttribLocation (GLuint program, GLuint index, const GLchar *name); WEBGL_APICALL void GL_APIENTRY emscripten_glBindBuffer (GLenum target, GLuint buffer); WEBGL_APICALL void GL_APIENTRY emscripten_glBindFramebuffer (GLenum target, GLuint framebuffer); WEBGL_APICALL void GL_APIENTRY emscripten_glBindRenderbuffer (GLenum target, GLuint renderbuffer); WEBGL_APICALL void GL_APIENTRY emscripten_glBindTexture (GLenum target, GLuint texture); WEBGL_APICALL void GL_APIENTRY emscripten_glBlendColor (GLfloat red, GLfloat green, GLfloat blue, GLfloat alpha); WEBGL_APICALL void GL_APIENTRY emscripten_glBlendEquation (GLenum mode); WEBGL_APICALL void GL_APIENTRY emscripten_glBlendEquationSeparate (GLenum modeRGB, GLenum modeAlpha); WEBGL_APICALL void GL_APIENTRY emscripten_glBlendFunc (GLenum sfactor, GLenum dfactor); WEBGL_APICALL void GL_APIENTRY emscripten_glBlendFuncSeparate (GLenum sfactorRGB, GLenum dfactorRGB, GLenum sfactorAlpha, GLenum dfactorAlpha); WEBGL_APICALL void GL_APIENTRY emscripten_glBufferData (GLenum target, GLsizeiptr size, const void *data, GLenum usage); WEBGL_APICALL void GL_APIENTRY emscripten_glBufferSubData (GLenum target, GLintptr offset, GLsizeiptr size, const void *data); WEBGL_APICALL GLenum GL_APIENTRY emscripten_glCheckFramebufferStatus (GLenum target); WEBGL_APICALL void GL_APIENTRY emscripten_glClear (GLbitfield mask); WEBGL_APICALL void GL_APIENTRY emscripten_glClearColor (GLfloat red, GLfloat green, GLfloat blue, GLfloat alpha); WEBGL_APICALL void GL_APIENTRY emscripten_glClearDepthf (GLfloat d); WEBGL_APICALL void GL_APIENTRY emscripten_glClearStencil (GLint s); WEBGL_APICALL void GL_APIENTRY emscripten_glColorMask (GLboolean red, GLboolean green, GLboolean blue, GLboolean alpha); WEBGL_APICALL void GL_APIENTRY emscripten_glCompileShader (GLuint shader); WEBGL_APICALL void GL_APIENTRY emscripten_glCompressedTexImage2D (GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLint border, GLsizei imageSize, const void *data); WEBGL_APICALL void GL_APIENTRY emscripten_glCompressedTexSubImage2D (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLsizei imageSize, const void *data); WEBGL_APICALL void GL_APIENTRY emscripten_glCopyTexImage2D (GLenum target, GLint level, GLenum internalformat, GLint x, GLint y, GLsizei width, GLsizei height, GLint border); WEBGL_APICALL void GL_APIENTRY emscripten_glCopyTexSubImage2D (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint x, GLint y, GLsizei width, GLsizei height); WEBGL_APICALL GLuint GL_APIENTRY emscripten_glCreateProgram (void); WEBGL_APICALL GLuint GL_APIENTRY emscripten_glCreateShader (GLenum type); WEBGL_APICALL void GL_APIENTRY emscripten_glCullFace (GLenum mode); WEBGL_APICALL void GL_APIENTRY emscripten_glDeleteBuffers (GLsizei n, const GLuint *buffers); WEBGL_APICALL void GL_APIENTRY emscripten_glDeleteFramebuffers (GLsizei n, const GLuint *framebuffers); WEBGL_APICALL void GL_APIENTRY emscripten_glDeleteProgram (GLuint program); WEBGL_APICALL void GL_APIENTRY emscripten_glDeleteRenderbuffers (GLsizei n, const GLuint *renderbuffers); WEBGL_APICALL void GL_APIENTRY emscripten_glDeleteShader (GLuint shader); WEBGL_APICALL void GL_APIENTRY emscripten_glDeleteTextures (GLsizei n, const GLuint *textures); WEBGL_APICALL void GL_APIENTRY emscripten_glDepthFunc (GLenum func); WEBGL_APICALL void GL_APIENTRY emscripten_glDepthMask (GLboolean flag); WEBGL_APICALL void GL_APIENTRY emscripten_glDepthRangef (GLfloat n, GLfloat f); WEBGL_APICALL void GL_APIENTRY emscripten_glDetachShader (GLuint program, GLuint shader); WEBGL_APICALL void GL_APIENTRY emscripten_glDisable (GLenum cap); WEBGL_APICALL void GL_APIENTRY emscripten_glDisableVertexAttribArray (GLuint index); WEBGL_APICALL void GL_APIENTRY emscripten_glDrawArrays (GLenum mode, GLint first, GLsizei count); WEBGL_APICALL void GL_APIENTRY emscripten_glDrawElements (GLenum mode, GLsizei count, GLenum type, const void *indices); WEBGL_APICALL void GL_APIENTRY emscripten_glEnable (GLenum cap); WEBGL_APICALL void GL_APIENTRY emscripten_glEnableVertexAttribArray (GLuint index); WEBGL_APICALL void GL_APIENTRY emscripten_glFinish (void); WEBGL_APICALL void GL_APIENTRY emscripten_glFlush (void); WEBGL_APICALL void GL_APIENTRY emscripten_glFramebufferRenderbuffer (GLenum target, GLenum attachment, GLenum renderbuffertarget, GLuint renderbuffer); WEBGL_APICALL void GL_APIENTRY emscripten_glFramebufferTexture2D (GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level); WEBGL_APICALL void GL_APIENTRY emscripten_glFrontFace (GLenum mode); WEBGL_APICALL void GL_APIENTRY emscripten_glGenBuffers (GLsizei n, GLuint *buffers); WEBGL_APICALL void GL_APIENTRY emscripten_glGenerateMipmap (GLenum target); WEBGL_APICALL void GL_APIENTRY emscripten_glGenFramebuffers (GLsizei n, GLuint *framebuffers); WEBGL_APICALL void GL_APIENTRY emscripten_glGenRenderbuffers (GLsizei n, GLuint *renderbuffers); WEBGL_APICALL void GL_APIENTRY emscripten_glGenTextures (GLsizei n, GLuint *textures); WEBGL_APICALL void GL_APIENTRY emscripten_glGetActiveAttrib (GLuint program, GLuint index, GLsizei bufSize, GLsizei *length, GLint *size, GLenum *type, GLchar *name); WEBGL_APICALL void GL_APIENTRY emscripten_glGetActiveUniform (GLuint program, GLuint index, GLsizei bufSize, GLsizei *length, GLint *size, GLenum *type, GLchar *name); WEBGL_APICALL void GL_APIENTRY emscripten_glGetAttachedShaders (GLuint program, GLsizei maxCount, GLsizei *count, GLuint *shaders); WEBGL_APICALL GLint GL_APIENTRY emscripten_glGetAttribLocation (GLuint program, const GLchar *name); WEBGL_APICALL void GL_APIENTRY emscripten_glGetBooleanv (GLenum pname, GLboolean *data); WEBGL_APICALL void GL_APIENTRY emscripten_glGetBufferParameteriv (GLenum target, GLenum pname, GLint *params); WEBGL_APICALL GLenum GL_APIENTRY emscripten_glGetError (void); WEBGL_APICALL void GL_APIENTRY emscripten_glGetFloatv (GLenum pname, GLfloat *data); WEBGL_APICALL void GL_APIENTRY emscripten_glGetFramebufferAttachmentParameteriv (GLenum target, GLenum attachment, GLenum pname, GLint *params); WEBGL_APICALL void GL_APIENTRY emscripten_glGetIntegerv (GLenum pname, GLint *data); WEBGL_APICALL void GL_APIENTRY emscripten_glGetProgramiv (GLuint program, GLenum pname, GLint *params); WEBGL_APICALL void GL_APIENTRY emscripten_glGetProgramInfoLog (GLuint program, GLsizei bufSize, GLsizei *length, GLchar *infoLog); WEBGL_APICALL void GL_APIENTRY emscripten_glGetRenderbufferParameteriv (GLenum target, GLenum pname, GLint *params); WEBGL_APICALL void GL_APIENTRY emscripten_glGetShaderiv (GLuint shader, GLenum pname, GLint *params); WEBGL_APICALL void GL_APIENTRY emscripten_glGetShaderInfoLog (GLuint shader, GLsizei bufSize, GLsizei *length, GLchar *infoLog); WEBGL_APICALL void GL_APIENTRY emscripten_glGetShaderPrecisionFormat (GLenum shadertype, GLenum precisiontype, GLint *range, GLint *precision); WEBGL_APICALL void GL_APIENTRY emscripten_glGetShaderSource (GLuint shader, GLsizei bufSize, GLsizei *length, GLchar *source); WEBGL_APICALL const GLubyte *GL_APIENTRY emscripten_glGetString (GLenum name); WEBGL_APICALL void GL_APIENTRY emscripten_glGetTexParameterfv (GLenum target, GLenum pname, GLfloat *params); WEBGL_APICALL void GL_APIENTRY emscripten_glGetTexParameteriv (GLenum target, GLenum pname, GLint *params); WEBGL_APICALL void GL_APIENTRY emscripten_glGetUniformfv (GLuint program, GLint location, GLfloat *params); WEBGL_APICALL void GL_APIENTRY emscripten_glGetUniformiv (GLuint program, GLint location, GLint *params); WEBGL_APICALL GLint GL_APIENTRY emscripten_glGetUniformLocation (GLuint program, const GLchar *name); WEBGL_APICALL void GL_APIENTRY emscripten_glGetVertexAttribfv (GLuint index, GLenum pname, GLfloat *params); WEBGL_APICALL void GL_APIENTRY emscripten_glGetVertexAttribiv (GLuint index, GLenum pname, GLint *params); WEBGL_APICALL void GL_APIENTRY emscripten_glGetVertexAttribPointerv (GLuint index, GLenum pname, void **pointer); WEBGL_APICALL void GL_APIENTRY emscripten_glHint (GLenum target, GLenum mode); WEBGL_APICALL GLboolean GL_APIENTRY emscripten_glIsBuffer (GLuint buffer); WEBGL_APICALL GLboolean GL_APIENTRY emscripten_glIsEnabled (GLenum cap); WEBGL_APICALL GLboolean GL_APIENTRY emscripten_glIsFramebuffer (GLuint framebuffer); WEBGL_APICALL GLboolean GL_APIENTRY emscripten_glIsProgram (GLuint program); WEBGL_APICALL GLboolean GL_APIENTRY emscripten_glIsRenderbuffer (GLuint renderbuffer); WEBGL_APICALL GLboolean GL_APIENTRY emscripten_glIsShader (GLuint shader); WEBGL_APICALL GLboolean GL_APIENTRY emscripten_glIsTexture (GLuint texture); WEBGL_APICALL void GL_APIENTRY emscripten_glLineWidth (GLfloat width); WEBGL_APICALL void GL_APIENTRY emscripten_glLinkProgram (GLuint program); WEBGL_APICALL void GL_APIENTRY emscripten_glPixelStorei (GLenum pname, GLint param); WEBGL_APICALL void GL_APIENTRY emscripten_glPolygonOffset (GLfloat factor, GLfloat units); WEBGL_APICALL void GL_APIENTRY emscripten_glReadPixels (GLint x, GLint y, GLsizei width, GLsizei height, GLenum format, GLenum type, void *pixels); WEBGL_APICALL void GL_APIENTRY emscripten_glReleaseShaderCompiler (void); WEBGL_APICALL void GL_APIENTRY emscripten_glRenderbufferStorage (GLenum target, GLenum internalformat, GLsizei width, GLsizei height); WEBGL_APICALL void GL_APIENTRY emscripten_glSampleCoverage (GLfloat value, GLboolean invert); WEBGL_APICALL void GL_APIENTRY emscripten_glScissor (GLint x, GLint y, GLsizei width, GLsizei height); WEBGL_APICALL void GL_APIENTRY emscripten_glShaderBinary (GLsizei count, const GLuint *shaders, GLenum binaryformat, const void *binary, GLsizei length); WEBGL_APICALL void GL_APIENTRY emscripten_glShaderSource (GLuint shader, GLsizei count, const GLchar *const*string, const GLint *length); WEBGL_APICALL void GL_APIENTRY emscripten_glStencilFunc (GLenum func, GLint ref, GLuint mask); WEBGL_APICALL void GL_APIENTRY emscripten_glStencilFuncSeparate (GLenum face, GLenum func, GLint ref, GLuint mask); WEBGL_APICALL void GL_APIENTRY emscripten_glStencilMask (GLuint mask); WEBGL_APICALL void GL_APIENTRY emscripten_glStencilMaskSeparate (GLenum face, GLuint mask); WEBGL_APICALL void GL_APIENTRY emscripten_glStencilOp (GLenum fail, GLenum zfail, GLenum zpass); WEBGL_APICALL void GL_APIENTRY emscripten_glStencilOpSeparate (GLenum face, GLenum sfail, GLenum dpfail, GLenum dppass); WEBGL_APICALL void GL_APIENTRY emscripten_glTexImage2D (GLenum target, GLint level, GLint internalformat, GLsizei width, GLsizei height, GLint border, GLenum format, GLenum type, const void *pixels); WEBGL_APICALL void GL_APIENTRY emscripten_glTexParameterf (GLenum target, GLenum pname, GLfloat param); WEBGL_APICALL void GL_APIENTRY emscripten_glTexParameterfv (GLenum target, GLenum pname, const GLfloat *params); WEBGL_APICALL void GL_APIENTRY emscripten_glTexParameteri (GLenum target, GLenum pname, GLint param); WEBGL_APICALL void GL_APIENTRY emscripten_glTexParameteriv (GLenum target, GLenum pname, const GLint *params); WEBGL_APICALL void GL_APIENTRY emscripten_glTexSubImage2D (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLenum type, const void *pixels); WEBGL_APICALL void GL_APIENTRY emscripten_glUniform1f (GLint location, GLfloat v0); WEBGL_APICALL void GL_APIENTRY emscripten_glUniform1fv (GLint location, GLsizei count, const GLfloat *value); WEBGL_APICALL void GL_APIENTRY emscripten_glUniform1i (GLint location, GLint v0); WEBGL_APICALL void GL_APIENTRY emscripten_glUniform1iv (GLint location, GLsizei count, const GLint *value); WEBGL_APICALL void GL_APIENTRY emscripten_glUniform2f (GLint location, GLfloat v0, GLfloat v1); WEBGL_APICALL void GL_APIENTRY emscripten_glUniform2fv (GLint location, GLsizei count, const GLfloat *value); WEBGL_APICALL void GL_APIENTRY emscripten_glUniform2i (GLint location, GLint v0, GLint v1); WEBGL_APICALL void GL_APIENTRY emscripten_glUniform2iv (GLint location, GLsizei count, const GLint *value); WEBGL_APICALL void GL_APIENTRY emscripten_glUniform3f (GLint location, GLfloat v0, GLfloat v1, GLfloat v2); WEBGL_APICALL void GL_APIENTRY emscripten_glUniform3fv (GLint location, GLsizei count, const GLfloat *value); WEBGL_APICALL void GL_APIENTRY emscripten_glUniform3i (GLint location, GLint v0, GLint v1, GLint v2); WEBGL_APICALL void GL_APIENTRY emscripten_glUniform3iv (GLint location, GLsizei count, const GLint *value); WEBGL_APICALL void GL_APIENTRY emscripten_glUniform4f (GLint location, GLfloat v0, GLfloat v1, GLfloat v2, GLfloat v3); WEBGL_APICALL void GL_APIENTRY emscripten_glUniform4fv (GLint location, GLsizei count, const GLfloat *value); WEBGL_APICALL void GL_APIENTRY emscripten_glUniform4i (GLint location, GLint v0, GLint v1, GLint v2, GLint v3); WEBGL_APICALL void GL_APIENTRY emscripten_glUniform4iv (GLint location, GLsizei count, const GLint *value); WEBGL_APICALL void GL_APIENTRY emscripten_glUniformMatrix2fv (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); WEBGL_APICALL void GL_APIENTRY emscripten_glUniformMatrix3fv (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); WEBGL_APICALL void GL_APIENTRY emscripten_glUniformMatrix4fv (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); WEBGL_APICALL void GL_APIENTRY emscripten_glUseProgram (GLuint program); WEBGL_APICALL void GL_APIENTRY emscripten_glValidateProgram (GLuint program); WEBGL_APICALL void GL_APIENTRY emscripten_glVertexAttrib1f (GLuint index, GLfloat x); WEBGL_APICALL void GL_APIENTRY emscripten_glVertexAttrib1fv (GLuint index, const GLfloat *v); WEBGL_APICALL void GL_APIENTRY emscripten_glVertexAttrib2f (GLuint index, GLfloat x, GLfloat y); WEBGL_APICALL void GL_APIENTRY emscripten_glVertexAttrib2fv (GLuint index, const GLfloat *v); WEBGL_APICALL void GL_APIENTRY emscripten_glVertexAttrib3f (GLuint index, GLfloat x, GLfloat y, GLfloat z); WEBGL_APICALL void GL_APIENTRY emscripten_glVertexAttrib3fv (GLuint index, const GLfloat *v); WEBGL_APICALL void GL_APIENTRY emscripten_glVertexAttrib4f (GLuint index, GLfloat x, GLfloat y, GLfloat z, GLfloat w); WEBGL_APICALL void GL_APIENTRY emscripten_glVertexAttrib4fv (GLuint index, const GLfloat *v); WEBGL_APICALL void GL_APIENTRY emscripten_glVertexAttribPointer (GLuint index, GLint size, GLenum type, GLboolean normalized, GLsizei stride, const void *pointer); WEBGL_APICALL void GL_APIENTRY emscripten_glViewport (GLint x, GLint y, GLsizei width, GLsizei height);
5,354
608
package com.google.android.stardroid.activities; import android.app.Activity; import android.content.Context; import android.os.Handler; import com.google.android.stardroid.inject.PerActivity; import dagger.Module; import dagger.Provides; /** * Created by johntaylor on 4/15/16. */ @Module public class DiagnosticActivityModule { private DiagnosticActivity activity; public DiagnosticActivityModule(DiagnosticActivity activity) { this.activity = activity; } @Provides @PerActivity Activity provideActivity() { return activity; } @Provides @PerActivity Context provideActivityContext() { return activity; } @Provides @PerActivity Handler provideHandler() { return new Handler(); } }
225
407
<filename>saas/sreworks/sreworks-common/src/main/java/com/alibaba/sreworks/common/util/TimeUtil.java package com.alibaba.sreworks.common.util; import java.sql.Timestamp; import java.text.SimpleDateFormat; import java.time.OffsetDateTime; import java.time.ZonedDateTime; import java.util.Date; import java.util.TimeZone; /** * @author jinghua.yjh */ public class TimeUtil { public static String timeStamp2Date(Long seconds, String format) { if (seconds == null) { seconds = 0L; } SimpleDateFormat sdf = new SimpleDateFormat(format); sdf.setTimeZone(TimeZone.getTimeZone("Asia/Shanghai")); return sdf.format(new Date(seconds * 1000)); } public static String timeStamp2Date(Integer seconds, String format) { return timeStamp2Date((long)(int)seconds, format); } public static String timeStamp2Date(Integer seconds) { return timeStamp2Date(seconds, "yyyy-MM-dd HH:mm:ss"); } public static String timeStamp2Date(Long seconds) { return timeStamp2Date(seconds, "yyyy-MM-dd HH:mm:ss"); } public static Long offsetDateTime2Timestamp(OffsetDateTime odt) { ZonedDateTime zdt = odt.toZonedDateTime(); return Timestamp.from(zdt.toInstant()).getTime() / 1000; } }
505
1,007
############################################################## # Copyright (c) 2018-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. ############################################################## from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import collections ConvStageInfo = collections.namedtuple( 'ConvStageInfo', ['blobs', 'dims', 'spatial_scales'])
145
2,413
# Copyright 1999-2021 Alibaba Group Holding Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import scipy.special as spspecial from ..arithmetic.utils import arithmetic_operand from ..utils import infer_dtype, implement_scipy from .core import TensorSpecialBinOp, _register_special_op, TensorSpecialMultiOp @_register_special_op class TensorHYP2F1(TensorSpecialMultiOp): _ARG_COUNT = 4 _func_name = 'hyp2f1' @implement_scipy(spspecial.hyp2f1) @infer_dtype(spspecial.hyp2f1) def hyp2f1(a, b, c, z, **kwargs): op = TensorHYP2F1(**kwargs) return op(a, b, c, z) @_register_special_op class TensorHYP1F1(TensorSpecialMultiOp): _ARG_COUNT = 3 _func_name = 'hyp1f1' @implement_scipy(spspecial.hyp1f1) @infer_dtype(spspecial.hyp1f1) def hyp1f1(a, b, x, out=None, **kwargs): op = TensorHYP1F1(**kwargs) return op(a, b, x, out=out) @_register_special_op class TensorHYPERU(TensorSpecialMultiOp): _ARG_COUNT = 3 _func_name = 'hyperu' @implement_scipy(spspecial.hyperu) @infer_dtype(spspecial.hyperu) def hyperu(a, b, x, out=None, **kwargs): op = TensorHYPERU(**kwargs) return op(a, b, x, out=out) @_register_special_op @arithmetic_operand(sparse_mode='binary_and') class TensorHYP0F1(TensorSpecialBinOp): _func_name = 'hyp0f1' @implement_scipy(spspecial.hyp0f1) @infer_dtype(spspecial.hyp0f1) def hyp0f1(v, z, out=None, **kwargs): op = TensorHYP0F1(**kwargs) return op(v, z, out=out)
788
367
#ifndef CAMERA_H #define CAMERA_H #include "vector.h" /* Our 3rd person camera has three main functions which setup what's it is looking at. The first rotateY(), rotates the look at vector (the camera's local +Z axis) around the world's Y axis, which always points in "kWorldUp" (0,1,0) direction. The next function pitch() rotates the camera's view around the camera's local X axis. Lastly the setTarget() function builds the camera's axes based on the camera's eye position and what we want the camera to look at (it's target). */ const float kCamDist = 2.5f; // Default amount for the eye to be away from the camera's target const CVector kWorldUp(0,1,0); // Direction of up (+Y axis) in world space // The camera class CCamera { public: CCamera(); void rotateY(float angle, const CPos &target); // Rotates around world Y-axis void pitch(float angle, const CPos &target); // Rotates around the camera's X-axis void setEye(const CPos &eyePos) { eye = eyePos; } // Sets the eye position of the camera void setTarget(const CPos &target); // Sets the camera to look at "target" // Returns what the camera's target is CPos getTarget() const { return eye + (forward * kCamDist); } // Data Access *** CPos getEye() const { return eye; } CVector getCamForward() const { return forward; } // Returns camera's forward vector CVector getCamUp() const { return up; } // Returns camera's up vector CVector getCamRight() const { return right; } // Returns camera's right vector // *** End Data Access private: /* Initial camera axes (up) +Y +Z (forward) | / | / | / + ------ +X (right) */ CVector right; // The local normalized axis that points to the "right" CVector up; // The local normalized axis that points "up" CVector forward; // The local normalized axis that points "forward" CPos eye; // Where we are looking from }; // Externed camera extern CCamera *gCamera; #endif
680
423
<gh_stars>100-1000 // Copyright (c) Facebook, Inc. and its affiliates. // All rights reserved. // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <functorch/csrc/BatchRulesHelper.h> #include <functorch/csrc/PlumbingHelper.h> #include <ATen/Operators.h> // NB: most activation functions fit pointwise unary or binary rules. // These are only the ones that have special batch rules to help with organization namespace at { namespace functorch { std::tuple<Tensor,optional<int64_t>> glu_batch_rule(const Tensor& self, optional<int64_t> self_bdim, int64_t dim) { // repeated error message from glu because 0D -> 1D when batched // this can't pass anyway because a 0-dimensional tensor has "size" 1, which // can't be evenly halved, but give a nicer error message here. TORCH_CHECK(self.dim() > 1, "glu does not support 0-dimensional tensors"); const auto rank = rankWithoutBatchDim(self, self_bdim); const auto dim_ = maybe_wrap_dim(dim, rank) + 1; const auto self_ = moveBatchDimToFront(self, self_bdim); const auto res = at::glu(self_, dim_); return std::make_tuple(res, 0); } std::tuple<Tensor,optional<int64_t>> glu_backward_batch_rule( const Tensor& grad_output, optional<int64_t> grad_output_bdim, const Tensor& self, optional<int64_t> self_bdim, int64_t dim) { if (self_bdim) { // repeated error message from glu because 0D -> 1D when batched // this can't pass anyway because a 0-dimensional tensor has "size" 1, which // can't be evenly halved, but give a nicer error message here. TORCH_CHECK(self.dim() > 1, "glu does not support 0-dimensional tensors"); } const auto rank = rankWithoutBatchDim(self, self_bdim); const auto dim_ = maybe_wrap_dim(dim, rank) + 1; const auto batch_size = get_bdim_size2(grad_output, grad_output_bdim, self, self_bdim); const auto grad_output_ = ensure_has_bdim(moveBatchDimToFront(grad_output, grad_output_bdim), grad_output_bdim.has_value(), batch_size); const auto self_ = ensure_has_bdim(moveBatchDimToFront(self, self_bdim), self_bdim.has_value(), batch_size); const auto res = at::glu_backward(grad_output_, self_, dim_); return std::make_tuple(res, 0); } std::tuple<Tensor,optional<int64_t>> prelu_batch_rule( const Tensor& input, optional<int64_t> input_bdim, const Tensor& weight, optional<int64_t> weight_bdim) { if (!weight_bdim && weight.dim() == 0) { return std::make_tuple(at::prelu(input, weight), input_bdim); } const auto input_ = moveBatchDimToFront(input, input_bdim); auto weight_flatten = moveBatchDimToFront(weight, weight_bdim); if (weight_flatten.dim() > 1) { // for an input [N, C, ...] // weight can be a non-vector but the total number of elements must be the same as C weight_flatten = at::flatten(weight_flatten, weight_bdim.has_value() ? 1 : 0, -1); } const int64_t input_logical_rank = rankWithoutBatchDim(input, input_bdim); VmapDimVector new_shape(weight_flatten.sizes().begin(), weight_flatten.sizes().end()); const int64_t final_size = weight_bdim ? (input_logical_rank + 1) : input_logical_rank; new_shape.reserve(final_size); if (weight_flatten.dim() == 2 || !weight_bdim) { // if weight (without batching) is not a scalar, its size must match the "channel dimension" of input. To do the // decomposition, we pad the weight to // copies checks from prelu if the weight (without vmap) is not a scalar TORCH_CHECK(input_logical_rank > 0, "Not allow zero-dim input tensor."); int64_t channel_size = 1; // channel_size default to 1 if (input_logical_rank > 1) { const auto channel_dim = input_bdim ? 2 : 1; channel_size = input_.size(channel_dim); } const auto weight_num = weight_flatten.size(-1); TORCH_CHECK(channel_size == weight_num, "Mismatch of parameter numbers and input channel size. Found parameter numbers = ", weight_num, " and channel size = ", channel_size, "."); // pads to the left so that the flattened shape matches up with the channel if (!weight_bdim) { new_shape.insert(new_shape.begin(), 1); } else { new_shape.insert(new_shape.begin() + 1, 1); } } for (int64_t i = new_shape.size(); i < final_size; i ++) { new_shape.push_back(1); } TORCH_INTERNAL_ASSERT((int64_t)new_shape.size() == final_size); const auto weight_padded = weight_flatten.view(new_shape); auto zero_tensor = at::zeros(1, input.options()); // decomposes function, auto res = at::maximum(zero_tensor, input_) + weight_padded * at::minimum(zero_tensor, input_); return std::make_tuple(res, 0); } VmapDimVector ensure_shape_with_bdim(const Tensor& input, const bool has_bdim, const int64_t batch_size) { // helper function that get the size of input, ensuring that there's batch dim, without expanding input if (has_bdim) { // sad to have to copy but got garbage if tried to return an IntArrayRef and just do input.sizes() VmapDimVector new_shape(input.sizes().begin(), input.sizes().end()); return new_shape; } VmapDimVector new_shape(1, batch_size); new_shape.reserve(input.dim() + 1); new_shape.insert(new_shape.end(), input.sizes().begin(), input.sizes().end()); return new_shape; } VmapDimVector shape_maybe_with_bdim(const Tensor& input, const bool need_bdim, const bool has_bdim, const int64_t batch_size) { // if need_bdim, will return the input with a guaranteed bdim. If not, will return the input logical size (no batch dim) if (need_bdim) { return ensure_shape_with_bdim(input, has_bdim, batch_size); } else if (has_bdim) { // !need_bdim && has_bdim VmapDimVector new_shape(input.sizes().begin() + 1, input.sizes().end()); return new_shape; } else { // !need_bdim && !has_bdim VmapDimVector new_shape(input.sizes().begin(), input.sizes().end()); return new_shape; } } std::tuple<Tensor, Tensor> prelu_backward_batched( const Tensor& grad_out, const Tensor& self, const Tensor& weight, const VmapDimVector& self_grad_shape, const VmapDimVector& weight_grad_padded_shape, const VmapDimVector& weight_grad_shape) { // helper function that produces a batched gradient for prelu using a decomposition inspired by the AOTAutograd ones const auto input_grad_collector = at::where(self > 0, grad_out, weight * grad_out); const auto input_grad = native::sum_to_size(input_grad_collector, self_grad_shape); const auto weight_grad_collector = at::where(self > 0, at::zeros(1, self.options()), self * grad_out); const auto weight_grad_collector_2 = native::sum_to_size(weight_grad_collector, weight_grad_padded_shape); const auto weight_grad = weight_grad_collector_2.view(weight_grad_shape); return std::make_tuple(input_grad, weight_grad); } std::tuple<Tensor,optional<int64_t>,Tensor,optional<int64_t>> prelu_backward_batch_rule( const Tensor& grad_out, optional<int64_t> grad_out_bdim, const Tensor& self, optional<int64_t> self_bdim, const Tensor& weight, optional<int64_t> weight_bdim) { const auto batch_size = get_bdim_size3(grad_out, grad_out_bdim, self, self_bdim, weight, weight_bdim); const auto grad_out_ = moveBatchDimToFront(grad_out, grad_out_bdim); const auto self_ = moveBatchDimToFront(self, self_bdim); const auto self_size_with_bdim = ensure_shape_with_bdim(self_, self_bdim.has_value(), batch_size); if (!weight_bdim && weight.dim() == 0) { VmapDimVector weight_grad_shape(1, batch_size); VmapDimVector weight_grad_shape_padded(self_bdim.has_value() ? self.dim() : self.dim() + 1, 1); weight_grad_shape_padded[0] = batch_size; const auto grads = prelu_backward_batched(grad_out_, self_, weight, self_size_with_bdim, weight_grad_shape_padded, weight_grad_shape); return std::make_tuple(std::get<0>(grads), 0, std::get<1>(grads), 0); } const auto weight_ = moveBatchDimToFront(weight, weight_bdim); auto weight_flatten = weight_; if (weight_flatten.dim() > 1) { // for an input [N, C, ...] // weight can be a non-vector but the total number of elements must be the same as C weight_flatten = at::flatten(weight_flatten, weight_bdim.has_value() ? 1 : 0, -1); } const int64_t self_logical_rank = rankWithoutBatchDim(self, self_bdim); VmapDimVector new_shape(weight_flatten.sizes().begin(), weight_flatten.sizes().end()); const int64_t final_size = weight_bdim ? (self_logical_rank + 1) : self_logical_rank; new_shape.reserve(final_size); if (weight_flatten.dim() == 2 || !weight_bdim) { // if weight (without batching) is not a scalar, its size must match the "channel dimension" of input. To do the // decomposition, we pad the weight to // copies checks from prelu if the weight (without vmap) is not a scalar TORCH_CHECK(self_logical_rank > 0, "Not allow zero-dim input tensor."); int64_t channel_size = 1; // channel_size default to 1 if (self_logical_rank > 1) { channel_size = self_.size(self_bdim.has_value() ? 2 : 1); } const auto weight_num = weight_flatten.size(-1); TORCH_CHECK(channel_size == weight_num, "Mismatch of parameter numbers and input channel size. Found parameter numbers = ", weight_num, " and channel size = ", channel_size, "."); // pads to the left so that the flattened shape matches up with the channel if (!weight_bdim) { new_shape.insert(new_shape.begin(), 1); } else { new_shape.insert(new_shape.begin() + 1, 1); } } for (int64_t i = new_shape.size(); i < final_size; i ++) { new_shape.push_back(1); } // weight grad does not depend on weight values. It is batched iff grad_out or self are batched const auto weight_grad_is_batched = grad_out_bdim.has_value() || self_bdim.has_value(); const auto weight_padded = weight_flatten.view(new_shape); const auto weight_grad_shape = shape_maybe_with_bdim(weight_, weight_grad_is_batched, weight_bdim.has_value(), batch_size); const auto weight_padded_grad_shape = shape_maybe_with_bdim(weight_padded, weight_grad_is_batched, weight_bdim.has_value(), batch_size); const auto grads = prelu_backward_batched(grad_out_, self_, weight_padded, self_size_with_bdim, weight_padded_grad_shape, weight_grad_shape); return std::make_tuple(std::get<0>(grads), 0, std::get<1>(grads), (weight_grad_is_batched ? optional<int64_t>(0) : nullopt)); } TORCH_LIBRARY_IMPL(aten, FT_BATCHED_KEY, m) { VMAP_SUPPORT(glu_backward, glu_backward_batch_rule); VMAP_SUPPORT(glu, glu_batch_rule); VMAP_SUPPORT(prelu, prelu_batch_rule) VMAP_SUPPORT(prelu_backward, prelu_backward_batch_rule) } }} // namespace at::functorch
3,850
427
// // MUAssetsViewController.h // MUKit_Example // // Created by Jekity on 2017/11/7. // Copyright © 2017年 Jeykit. All rights reserved. // #import <UIKit/UIKit.h> @class PHAssetCollection; @class MUImagePickerManager; @interface MUAssetsViewController : UICollectionViewController @property (nonatomic, copy) NSArray *assetCollections;//资源集合 @property (nonatomic, strong) MUImagePickerManager *imagePickerController; //是否允许多选 @property (nonatomic, assign) BOOL allowsMultipleSelection; //最小的选择图片数 @property (nonatomic, assign) NSUInteger minimumNumberOfSelection; //最大的选择图片数 @property (nonatomic, assign) NSUInteger maximumNumberOfSelection; //竖直方向时每一行显示的个数,默认4个 @property (nonatomic, assign) NSUInteger numberOfColumnsInPortrait; //水平方向时每一行显示的个数,默认7个 @property (nonatomic, assign) NSUInteger numberOfColumnsInLandscape; @end
372
3,102
// RUN: %clang_cc1 %s -fblocks -triple x86_64-apple-darwin -emit-llvm -o - | FileCheck %s typedef void (^dispatch_block_t)(void); void dispatch_once(dispatch_block_t); class Zone { public: Zone(); ~Zone(); }; Zone::Zone() { dispatch_once(^{}); dispatch_once(^{}); } Zone::~Zone() { dispatch_once(^{}); dispatch_once(^{}); } class X : public virtual Zone { X(); ~X(); }; X::X() { dispatch_once(^{}); dispatch_once(^{}); }; X::~X() { dispatch_once(^{}); dispatch_once(^{}); }; // CHECK-LABEL: define internal void @___ZN4ZoneC2Ev_block_invoke // CHECK-LABEL: define internal void @___ZN4ZoneC2Ev_block_invoke_ // CHECK-LABEL: define internal void @___ZN4ZoneD2Ev_block_invoke // CHECK-LABEL: define internal void @___ZN4ZoneD2Ev_block_invoke_ // CHECK-LABEL: define internal void @___ZN1XC2Ev_block_invoke // CHECK-LABEL: define internal void @___ZN1XC2Ev_block_invoke_ // CHECK-LABEL: define internal void @___ZN1XD2Ev_block_invoke // CHECK-LABEL: define internal void @___ZN1XD2Ev_block_invoke_
426
1,481
package apoc.monitor; import apoc.Extended; import apoc.result.IdsResult; import org.neo4j.graphdb.GraphDatabaseService; import org.neo4j.kernel.impl.store.stats.StoreEntityCounters; import org.neo4j.kernel.internal.GraphDatabaseAPI; import org.neo4j.procedure.Context; import org.neo4j.procedure.Description; import org.neo4j.procedure.Procedure; import java.util.stream.Stream; @Extended public class Ids { private static final String JMX_OBJECT_NAME = "Primitive count"; private static final String NODE_IDS_KEY = "NumberOfNodeIdsInUse"; private static final String REL_IDS_KEY = "NumberOfRelationshipIdsInUse"; private static final String PROP_IDS_KEY = "NumberOfPropertyIdsInUse"; private static final String REL_TYPE_IDS_KEY = "NumberOfRelationshipTypeIdsInUse"; @Context public GraphDatabaseService db; @Procedure @Description("apoc.monitor.ids() returns the object ids in use for this neo4j instance") public Stream<IdsResult> ids() { StoreEntityCounters storeEntityCounters = ((GraphDatabaseAPI) db).getDependencyResolver().resolveDependency(StoreEntityCounters.class); return Stream.of(new IdsResult( storeEntityCounters.nodes(), storeEntityCounters.relationships(), storeEntityCounters.properties(), storeEntityCounters.relationshipTypes() )); } }
512
845
<gh_stars>100-1000 #! /usr/bin/env python # coding:utf-8 # Author: <NAME> from icssploit.clients.base import Base from icssploit.protocols.cotp import * from icssploit.protocols.s7comm_plus import * from scapy.supersocket import StreamSocket from scapy.volatile import RandString import socket OBJECT_QUALIFIER_ITEMS = [S7PlusItemValue(IDNumber=0x4e9, DataType=0x12, DataValue=S7PlusRIDValue(Value=0x0)), S7PlusItemValue(IDNumber=0x4ea, DataType=0x13, DataValue=S7PlusAIDValue(Value=0x0)), S7PlusItemValue(IDNumber=0x4eb, DataType=0x04, DataValue=S7PlusUDIntValue(Value=0x0)), ] class S7PlusClient(Base): def __init__(self, name, ip, port=102, src_tsap='\x01\x00', timeout=2): ''' :param name: Name of this targets :param ip: S7 PLC ip :param port: S7 PLC port (default: 102) :param src_tsap: src_tsap :param rack: cpu rack (default: 0) :param slot: cpu slot (default: 2) :param timeout: timeout of socket (default: 2) ''' super(S7PlusClient, self).__init__(name=name) self._ip = ip self._port = port self._src_tsap = src_tsap self._dst_tsap = "SIMATIC-ROOT-ES" self._seq = 1 self.session = 0x0120 self._connection = None self._connected = False self._timeout = timeout self._pdu_length = 480 self._info = {} self._server_session_version_data = None def connect(self): sock = socket.socket() sock.settimeout(self._timeout) sock.connect((self._ip, self._port)) self._connection = StreamSocket(sock, Raw) packet1 = TPKT() / COTPCR() packet1.Parameters = [COTPOption() for i in range(3)] packet1.PDUType = "CR" packet1.Parameters[0].ParameterCode = "tpdu-size" packet1.Parameters[0].Parameter = "\x0a" packet1.Parameters[1].ParameterCode = "src-tsap" packet1.Parameters[2].ParameterCode = "dst-tsap" packet1.Parameters[1].Parameter = self._src_tsap packet1.Parameters[2].Parameter = self._dst_tsap self.send_receive_packet(packet1) packet2 = TPKT() / COTPDT(EOT=1) / S7PlusHeader(Data=S7PlusData(OPCode=0x31, Function=0x04ca)) packet2[S7PlusData].DataSet = S7PlusCrateObjectRequest(IDNumber=0x0000011d, DataType=0x04, DataValue=S7PlusUDIntValue(Value=0) ) packet2[S7PlusData].DataSet.Elements = [S7PlusObjectField(RelationID=0xd3, ClassID=0x821f)] packet2[S7PlusData].DataSet.Elements[0].Elements = [S7PlusAttributeField(IDNumber=0x00e9, DataType=0x15, DataValue=S7PlusWStringValue( Value=RandString(8))), S7PlusAttributeField(IDNumber=0x0121, DataType=0x15, DataValue=S7PlusWStringValue( Value=RandString(8))), S7PlusAttributeField(IDNumber=0x0128, DataType=0x15, DataValue=S7PlusWStringValue( Value="")), S7PlusAttributeField(IDNumber=0x0129, DataType=0x15, DataValue=S7PlusWStringValue( Value="")), S7PlusAttributeField(IDNumber=0x012a, DataType=0x15, DataValue=S7PlusWStringValue( Value=RandString(8))), S7PlusAttributeField(IDNumber=0x012b, DataType=0x04, DataValue=S7PlusUDIntValue(Value=0)), S7PlusAttributeField(IDNumber=0x012c, DataType=0x12, DataValue=S7PlusRIDValue( Value=RandInt())), S7PlusAttributeField(IDNumber=0x012d, DataType=0x15, DataValue=S7PlusWStringValue( Value="")), S7PlusSubObjectField(RelationID=0xd3, ClassID=0x817f, Elements=[S7PlusAttributeField( IDNumber=0x00e9, DataType=0x15, DataValue=S7PlusWStringValue( Value="SubscriptionContainer")) ], ) ] rsp2 = self.send_receive_s7plus_packet(packet2) try: if rsp2.haslayer(S7PlusCrateObjectResponse): self.session = rsp2[S7PlusCrateObjectResponse].ObjectIDs[0].Value # Todo: remove this when find out how get these value from get_target_info for elment in rsp2[S7PlusCrateObjectResponse].Elements: if isinstance(elment, S7PlusObjectField): for sub_elment in elment.Elements: if isinstance(sub_elment, S7PlusAttributeField): if sub_elment.IDNumber == 0x0132: self._server_session_version_data = sub_elment for item in sub_elment.DataValue.Items: if item.IDNumber == 0x013f: data = item.DataValue.Value self._info['HW_Version'], self._info['Order_Code'], self._info['FW_Version'] = data.split(';') except Exception as err: self.logger.error("Can't get order code and version from target") if self._server_session_version_data: packet3 = TPKT() / COTPDT(EOT=1) / S7PlusHeader(Data=S7PlusData(OPCode=0x31, Function=0x0542)) packet3[S7PlusData].DataSet = S7PlusSetMultiVariablesRequest(ObjectID=self.session, AddressList=S7PlusAddressListPacket( Elements=[S7PlusUDIntValue(Value=0x0132)] ), ValueList=[S7PlusItemValue( IDNumber=0x01, DataType=0x17, DataValue=self._server_session_version_data.DataValue ), ], ObjectQualifier=S7PlusObjectQualifierPacket() ) packet3[S7PlusData].DataSet.ObjectQualifier.Items = OBJECT_QUALIFIER_ITEMS rsp3 = self.send_receive_s7plus_packet(packet3) def set_var(self, id_number, item_list): packet = TPKT() / COTPDT(EOT=1) / S7PlusHeader(Data=S7PlusData(OPCode=0x31, Function=0x04f2, Unknown1=0x34)) packet[S7PlusData].DataSet = S7PlusSetVariableRequest(ObjectID=id_number, ValueList=item_list) packet[S7PlusData].DataSet.ObjectQualifier.Items = OBJECT_QUALIFIER_ITEMS packet.show2() self.send_s7plus_packet(packet) # rsp = self.send_receive_s7plus_packet(packet) def get_var_sub_streamed(self, id_number, data_type_flags, data_type, data_value): packet = TPKT() / COTPDT(EOT=1) / S7PlusHeader(Data=S7PlusData(OPCode=0x31, Function=0x0586)) packet[S7PlusData].DataSet = S7PlusGetVarSubStreamedRequest(IDNumber=id_number, DATATypeFlags=data_type_flags, DataType=data_type, DataValue=data_value, ObjectQualifier=S7PlusObjectQualifierPacket() ) packet[S7PlusData].DataSet.ObjectQualifier.Items = OBJECT_QUALIFIER_ITEMS rsp = self.send_receive_s7plus_packet(packet) try: if rsp.haslayer(S7PlusGetVarSubStreamedResponse): return rsp[S7PlusGetVarSubStreamedResponse].DataValue except Exception as err: self.logger.error("Response is not correct format") return None def get_target_info(self): request_items = S7PlusUDIntValueArray(UDIntItems=S7PlusUDIntValue(Value=0xea9)) data = self.get_var_sub_streamed(0x31, 0x02, 0x04, request_items) try: info_data = data[0].Value self._info['Serial_Number'] = info_data.split(' ')[3] except Exception as err: self._info['Serial_Number'] = '' self.logger.error("Can't get serial numbertarget") return self._info['Order_Code'], self._info['Serial_Number'], self._info['HW_Version'], self._info['FW_Version'] def delete_object(self, object_id): packet = TPKT() / COTPDT(EOT=1) / S7PlusHeader(Data=S7PlusData(OPCode=0x31, Function=0x04d4)) packet[S7PlusData].DataSet = S7PlusDeleteObjectRequest(IDNumber=object_id, ObjectQualifier=S7PlusObjectQualifierPacket() ) packet[S7PlusData].DataSet.ObjectQualifier.Items = OBJECT_QUALIFIER_ITEMS # packet.show2() self.send_s7plus_packet(packet) # rsp = self.send_receive_s7plus_packet(packet) def _fix_session(self, packet): if self._seq > 65535: self._seq = 1 try: if packet.haslayer(S7PlusData): if packet[S7PlusData].OPCode == 0x31: packet[S7PlusData].Seq = self._seq packet[S7PlusData].Session = self.session self._seq += 1 return packet except Exception as err: self.logger.error(err) return packet def send_packet(self, packet): if self._connection: try: self._connection.send(packet) except Exception as err: self.logger.error(err) return None else: self.logger.error("Please create connect before send packet!") def send_receive_packet(self, packet): if self._connection: try: rsp = self._connection.sr1(packet, timeout=self._timeout) return rsp except Exception as err: self.logger.error(err) return None else: self.logger.error("Please create connect before send packet!") def receive_packet(self): if self._connection: try: rsp = self._connection.recv() return rsp except Exception as err: self.logger.error(err) return None else: self.logger.error("Please create connect before receive packet!") def send_s7plus_packet(self, packet): if self._connection: try: packet = self._fix_session(packet) self._connection.send(packet) except Exception as err: self.logger.error(err) return None else: self.logger.error("Please create connect before send packet!") def send_receive_s7plus_packet(self, packet): if self._connection: try: packet = self._fix_session(packet) rsp = self._connection.sr1(packet, timeout=self._timeout) if rsp: rsp = TPKT(str(rsp)) return rsp except Exception as err: self.logger.error(err) return None else: self.logger.error("Please create connect before send packet!") def receive_s7plus_packet(self): if self._connection: try: rsp = self._connection.recv() if rsp: rsp = TPKT(str(rsp)) return rsp except Exception as err: self.logger.error(err) return None else: self.logger.error("Please create connect before receive packet!")
9,668
1,388
// // Copyright 2011-2015 <NAME> // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // #include <stdio.h> #include <bare-metal/sdmmc.h> #define TRANSFER_LENGTH 8 // // Read the first few sectors of the SD card and dump them out the // serial port. This uses the SPI mode driver in libos. // int main() { int result; result = init_sdmmc_device(); if (result < 0) { printf("error %d initializing card\n", result); return 0; } for (int block_num = 0; block_num < TRANSFER_LENGTH; block_num++) { unsigned char buf[512]; result = read_sdmmc_device(block_num, buf); if (result < 0) { printf("error %d reading from device\n", result); break; } for (int address = 0; address < SDMMC_BLOCK_SIZE; address += 16) { printf("%08x ", address + block_num * SDMMC_BLOCK_SIZE); for (int offset = 0; offset < 16; offset++) printf("%02x ", buf[address + offset]); printf(" "); for (int offset = 0; offset < 16; offset++) { unsigned char c = buf[address + offset]; if (c >= 32 && c <= 128) printf("%c", c); else printf("."); } printf("\n"); } printf("\n"); } return 0; }
812
339
<filename>integration/mediation-tests/tests-mediator-1/src/test/java/org/wso2/carbon/esb/mediator/test/enrich/EnrichIntegrationAddSiblingInOutMessageTestCase.java /* *Copyright (c) 2005-2010, WSO2 Inc. (http://www.wso2.org) All Rights Reserved. * *WSO2 Inc. licenses this file to you under the Apache License, *Version 2.0 (the "License"); you may not use this file except *in compliance with the License. *You may obtain a copy of the License at * *http://www.apache.org/licenses/LICENSE-2.0 * *Unless required by applicable law or agreed to in writing, *software distributed under the License is distributed on an *"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY *KIND, either express or implied. See the License for the *specific language governing permissions and limitations *under the License. */ package org.wso2.carbon.esb.mediator.test.enrich; import org.apache.axiom.om.OMElement; import org.apache.axiom.om.util.AXIOMUtil; import org.testng.annotations.AfterClass; import org.testng.annotations.BeforeClass; import org.testng.annotations.Test; import org.wso2.carbon.automation.engine.annotations.ExecutionEnvironment; import org.wso2.carbon.automation.engine.annotations.SetEnvironment; import org.wso2.esb.integration.common.utils.ESBIntegrationTest; import org.wso2.esb.integration.common.utils.servers.WireMonitorServer; import static org.testng.Assert.assertTrue; /*Test for enrich mediator to add sibling to body of out message*/ public class EnrichIntegrationAddSiblingInOutMessageTestCase extends ESBIntegrationTest { public WireMonitorServer wireMonitorServer; @BeforeClass(alwaysRun = true) public void uploadSynapseConfig() throws Exception { super.init(); verifyProxyServiceExistence("enrichAddSiblingInOutMessageTestProxy"); wireMonitorServer = new WireMonitorServer(8991); } @SetEnvironment(executionEnvironments = {ExecutionEnvironment.STANDALONE}) @Test(groups = {"wso2.esb"}, description = "Enrich mediator:Add as a sibling to message body") public void addAsSiblingToMessageBody() throws Exception { wireMonitorServer.start(); String payload = "<m:getQuote xmlns:m=\"http://services.samples\">" + "<m:request>" + "</m:request>" + "</m:getQuote>"; OMElement payloadOM = AXIOMUtil.stringToOM(payload); try { OMElement response = axis2Client.sendSimpleStockQuoteRequest(getProxyServiceURLHttp("enrichAddSiblingInOutMessageTestProxy") , null, payloadOM); } catch (Exception e) { } String wireResponse = wireMonitorServer.getCapturedMessage(); String expectedSoapBody = "<soapenv:Body>"+ "<m:symbol1 xmlns:m=\"http://services.samples\">IBM</m:symbol1>"+ "<m:symbol2 xmlns:m=\"http://services.samples\">WSO2</m:symbol2>"+ "</soapenv:Body>" ; assertTrue(wireResponse.contains(expectedSoapBody),"Invalid soap body"); } @AfterClass private void destroy() throws Exception { super.cleanup(); } }
1,215
1,652
<reponame>minluzhou/test1 package com.ctrip.xpipe.metric; import com.ctrip.xpipe.exception.XpipeException; /** * @author wenchao.meng * <p> * Jul 27, 2017 */ public class MetricProxyException extends XpipeException { public MetricProxyException(String message) { super(message); } public MetricProxyException(String message, Throwable th) { super(message, th); } }
167
2,496
<filename>libs/qwt/src/qwt_vectorfield_symbol.cpp /****************************************************************************** * Qwt Widget Library * Copyright (C) 1997 <NAME> * Copyright (C) 2002 <NAME> * * This library is free software; you can redistribute it and/or * modify it under the terms of the Qwt License, Version 1.0 *****************************************************************************/ #include "qwt_vectorfield_symbol.h" #include <qpainter.h> #include <qpainterpath.h> //! Constructor QwtVectorFieldSymbol::QwtVectorFieldSymbol() { } //! Destructor QwtVectorFieldSymbol::~QwtVectorFieldSymbol() { } class QwtVectorFieldArrow::PrivateData { public: PrivateData( qreal headW, qreal tailW ) : headWidth( headW ) , tailWidth( tailW ) , length( headW + 4.0 ) { /* Arrow is drawn horizontally, pointing into positive x direction with tip at 0,0. */ path.lineTo( -headWidth, headWidth ); path.lineTo( -headWidth, tailWidth ); path.lineTo( -length, tailWidth ); path.lineTo( -length, -tailWidth ); path.lineTo( -headWidth, -tailWidth ); path.lineTo( -headWidth, -headWidth ); path.closeSubpath(); } void setLength( qreal l ) { length = qMax( l, headWidth ); path.setElementPositionAt( 3, -length, tailWidth ); path.setElementPositionAt( 4, -length, -tailWidth ); } const qreal headWidth; const qreal tailWidth; qreal length; QPainterPath path; }; /*! \brief Constructor The length is initialized by headWidth + 4 \param headWidth Width of the triangular head \param tailWidth Width of the arrow tail \sa setLength() */ QwtVectorFieldArrow::QwtVectorFieldArrow( qreal headWidth, qreal tailWidth ) { m_data = new PrivateData( headWidth, tailWidth ); } //! Destructor QwtVectorFieldArrow::~QwtVectorFieldArrow() { delete m_data; } void QwtVectorFieldArrow::setLength( qreal length ) { m_data->setLength( length ); } qreal QwtVectorFieldArrow::length() const { return m_data->length; } void QwtVectorFieldArrow::paint( QPainter* painter ) const { painter->drawPath( m_data->path ); } class QwtVectorFieldThinArrow::PrivateData { public: PrivateData( qreal headW ) : headWidth( headW ) , length( headW + 4.0 ) { path.lineTo( -headWidth, headWidth * 0.6 ); path.moveTo( 0, 0 ); path.lineTo( -headWidth, -headWidth * 0.6 ); path.moveTo( 0, 0 ); path.lineTo( -length, 0 ); } const qreal headWidth; qreal length; QPainterPath path; }; /*! \brief Constructor The length is initialized by headWidth + 4 \param headWidth Width of the triangular head \sa setLength() */ QwtVectorFieldThinArrow::QwtVectorFieldThinArrow( qreal headWidth ) { m_data = new PrivateData( headWidth ); } //! \brief Destructor QwtVectorFieldThinArrow::~QwtVectorFieldThinArrow() { delete m_data; } void QwtVectorFieldThinArrow::setLength( qreal length ) { m_data->length = length; const qreal headWidth = qMin( m_data->headWidth, length / 3.0 ); QPainterPath& path = m_data->path; path.setElementPositionAt( 1, -headWidth, headWidth * 0.6 ); path.setElementPositionAt( 3, -headWidth, -headWidth * 0.6 ); path.setElementPositionAt( 5, -length, 0 ); } qreal QwtVectorFieldThinArrow::length() const { return m_data->length; } void QwtVectorFieldThinArrow::paint(QPainter* p) const { p->drawPath( m_data->path ); }
1,412
1,041
package io.ebean.config; /** * Defines the AutoTune behaviour for a Database. */ public class AutoTuneConfig { private AutoTuneMode mode = AutoTuneMode.DEFAULT_OFF; private String queryTuningFile = "ebean-autotune.xml"; private boolean queryTuning; private boolean queryTuningAddVersion; private boolean profiling; private String profilingFile = "ebean-profiling"; private int profilingBase = 5; private double profilingRate = 0.01; private int profilingUpdateFrequency; private int garbageCollectionWait = 100; private boolean skipGarbageCollectionOnShutdown; private boolean skipProfileReportingOnShutdown; public AutoTuneConfig() { } /** * Return true if we are profiling or query tuning. * * If we are not doing either then we don't need a CallStack. */ public boolean isActive() { return profiling || queryTuning; } /** * Return the name of the file that holds the query tuning information. */ public String getQueryTuningFile() { return queryTuningFile; } /** * Set the name of the file that holds the query tuning information. */ public void setQueryTuningFile(String queryTuningFile) { this.queryTuningFile = queryTuningFile; } /** * Return the name of the file that profiling information is written to. */ public String getProfilingFile() { return profilingFile; } /** * Set the name of the file that profiling information is written to. */ public void setProfilingFile(String profilingFile) { this.profilingFile = profilingFile; } /** * Return the frequency in seconds the profiling should be collected and automatically applied to the tuning. */ public int getProfilingUpdateFrequency() { return profilingUpdateFrequency; } /** * Set the frequency in seconds the profiling should be collected and automatically applied to the tuning. */ public void setProfilingUpdateFrequency(int profilingUpdateFrequency) { this.profilingUpdateFrequency = profilingUpdateFrequency; } /** * Return the mode used when autoTune has not been explicit defined on a * query. */ public AutoTuneMode getMode() { return mode; } /** * Set the mode used when autoTune has not been explicit defined on a query. */ public void setMode(AutoTuneMode mode) { this.mode = mode; } /** * Return true if the queries are being tuned. */ public boolean isQueryTuning() { return queryTuning; } /** * Set to true if the queries should be tuned by autoTune. */ public void setQueryTuning(boolean queryTuning) { this.queryTuning = queryTuning; } /** * Return true if the version property should be added when the query is * tuned. * <p> * If this is false then the version property will be added when profiling * detects that the bean is possibly going to be modified. * </p> */ public boolean isQueryTuningAddVersion() { return queryTuningAddVersion; } /** * Set to true to force the version property to be always added by the query * tuning. * <p> * If this is false then the version property will be added when profiling * detects that the bean is possibly going to be modified. * </p> * <p> * Generally this is not expected to be turned on. * </p> */ public void setQueryTuningAddVersion(boolean queryTuningAddVersion) { this.queryTuningAddVersion = queryTuningAddVersion; } /** * Return true if profiling information should be collected. */ public boolean isProfiling() { return profiling; } /** * Set to true if profiling information should be collected. * <p> * The profiling information is collected and then used to generate the tuned * queries for autoTune. * </p> */ public void setProfiling(boolean profiling) { this.profiling = profiling; } /** * Return the base number of queries to profile before changing to profile * only a percentage of following queries (profileRate). */ public int getProfilingBase() { return profilingBase; } /** * Set the based number of queries to profile. */ public void setProfilingBase(int profilingBase) { this.profilingBase = profilingBase; } /** * Return the rate (%) of queries to be profiled after the 'base' amount of * profiling. */ public double getProfilingRate() { return profilingRate; } /** * Set the rate (%) of queries to be profiled after the 'base' amount of * profiling. */ public void setProfilingRate(double profilingRate) { this.profilingRate = profilingRate; } /** * Return the time in millis to wait after a system gc to collect profiling * information. * <p> * The profiling information is collected on object finalise. As such we * generally don't want to trigger GC (let the JVM do its thing) but on * shutdown the autoTune manager will trigger System.gc() and then wait * (default 100 millis) to hopefully collect profiling information - * especially for short run unit tests. * </p> */ public int getGarbageCollectionWait() { return garbageCollectionWait; } /** * Set the time in millis to wait after a System.gc() to collect profiling information. */ public void setGarbageCollectionWait(int garbageCollectionWait) { this.garbageCollectionWait = garbageCollectionWait; } /** * Return true if triggering garbage collection should be skipped on shutdown. * You might set this when System.GC() slows a application shutdown too much. */ public boolean isSkipGarbageCollectionOnShutdown() { return skipGarbageCollectionOnShutdown; } /** * Set to true if triggering garbage collection should be skipped on shutdown. * You might set this when System.GC() slows a application shutdown too much. */ public void setSkipGarbageCollectionOnShutdown(boolean skipGarbageCollectionOnShutdown) { this.skipGarbageCollectionOnShutdown = skipGarbageCollectionOnShutdown; } /** * Return true if profile reporting should be skipped on shutdown. */ public boolean isSkipProfileReportingOnShutdown() { return skipProfileReportingOnShutdown; } /** * Set to true if profile reporting should be skipped on shutdown. */ public void setSkipProfileReportingOnShutdown(boolean skipProfileReportingOnShutdown) { this.skipProfileReportingOnShutdown = skipProfileReportingOnShutdown; } /** * Load the settings from the properties file. */ public void loadSettings(PropertiesWrapper p) { queryTuning = p.getBoolean("autoTune.queryTuning", queryTuning); queryTuningAddVersion = p.getBoolean("autoTune.queryTuningAddVersion", queryTuningAddVersion); queryTuningFile = p.get("autoTune.queryTuningFile", queryTuningFile); skipGarbageCollectionOnShutdown = p.getBoolean("autoTune.skipGarbageCollectionOnShutdown", skipGarbageCollectionOnShutdown); skipProfileReportingOnShutdown = p.getBoolean("autoTune.skipProfileReportingOnShutdown", skipProfileReportingOnShutdown); mode = p.getEnum(AutoTuneMode.class, "autoTune.mode", mode); profiling = p.getBoolean("autoTune.profiling", profiling); profilingBase = p.getInt("autoTune.profilingBase", profilingBase); profilingRate = p.getDouble("autoTune.profilingRate", profilingRate); profilingFile = p.get("autoTune.profilingFile", profilingFile); profilingUpdateFrequency = p.getInt("autoTune.profilingUpdateFrequency", profilingUpdateFrequency); } }
2,233
1,909
<gh_stars>1000+ package org.knowm.xchange.coindirect.dto.marketdata; import static org.assertj.core.api.Assertions.assertThat; import com.fasterxml.jackson.databind.ObjectMapper; import java.io.IOException; import java.io.InputStream; import org.junit.Test; public class CoindirectTradesTest { @Test public void testUnmarshal() throws IOException { // Read in the JSON from the example resources InputStream is = CoindirectTradesTest.class.getResourceAsStream( "/org/knowm/xchange/coindirect/dto/marketdata/example-trade-history.json"); ObjectMapper mapper = new ObjectMapper(); CoindirectTrades coindirectTrades = mapper.readValue(is, CoindirectTrades.class); // Verify that the example data was unmarshalled correctly assertThat(coindirectTrades.data.size()).isEqualTo(2); assertThat(coindirectTrades.metaData.market).isEqualTo("ETH-BTC"); } }
324
60,067
#include "caffe2/utils/math/broadcast.h" #include "caffe2/core/context.h" #include "caffe2/utils/eigen_utils.h" namespace caffe2 { namespace math { bool can_use_broadcast_fastpath(int ndim, const int* dims) { int index_of_last_singleton = -1; int index_of_first_non_singleton = ndim; for (int i = 0; i < ndim; i++) { if (dims[i] == 1) { index_of_last_singleton = i; } else if (index_of_first_non_singleton == ndim) { index_of_first_non_singleton = i; } } return index_of_last_singleton < index_of_first_non_singleton; } #define CAFFE2_SPECIALIZED_AFFINE_CHANNEL(T) \ template <> \ C10_EXPORT void AffineChannel<T, CPUContext, StorageOrder::NCHW>( \ const int N, \ const int C, \ const int HxW, \ const T* X, \ const T* scale, \ const T* bias, \ T* Y, \ CPUContext* /* context */) { \ ConstEigenVectorArrayMap<T> scale_arr(scale, C); \ ConstEigenVectorArrayMap<T> bias_arr(bias, C); \ const int stride = C * HxW; \ const T* X_ptr = X; \ T* Y_ptr = Y; \ for (int i = 0; i < N; ++i) { \ EigenArrayMap<T>(Y_ptr, HxW, C) = \ (ConstEigenArrayMap<T>(X_ptr, HxW, C).rowwise() * \ scale_arr.transpose()) \ .rowwise() + \ bias_arr.transpose(); \ X_ptr += stride; \ Y_ptr += stride; \ } \ } \ template <> \ C10_EXPORT void AffineChannel<T, CPUContext, StorageOrder::NHWC>( \ const int N, \ const int C, \ const int HxW, \ const T* X, \ const T* scale, \ const T* bias, \ T* Y, \ CPUContext* /* context */) { \ EigenArrayMap<T>(Y, C, N * HxW) = \ (ConstEigenArrayMap<T>(X, C, N * HxW).colwise() * \ ConstEigenVectorArrayMap<T>(scale, C)) \ .colwise() + \ ConstEigenVectorArrayMap<T>(bias, C); \ } CAFFE2_SPECIALIZED_AFFINE_CHANNEL(float) #undef CAFFE2_SPECIALIZED_AFFINE_CHANNEL } // namespace math } // namespace caffe2
2,525
1,043
package org.simplejavamail.api.internal.outlooksupport.model; import org.simplejavamail.api.email.EmailPopulatingBuilder; /** * Wrapper class that can hold both the resulting Email (builder) and the source OutlookMessage. * <br> * Useful when data is needed which didn't convert directly into the Email (builder) instance. */ public class EmailFromOutlookMessage { private final EmailPopulatingBuilder emailBuilder; private final OutlookMessage outlookMessage; public EmailFromOutlookMessage(final EmailPopulatingBuilder emailBuilder, final OutlookMessage outlookMessage) { this.emailBuilder = emailBuilder; this.outlookMessage = outlookMessage; } public EmailPopulatingBuilder getEmailBuilder() { return emailBuilder; } public OutlookMessage getOutlookMessage() { return outlookMessage; } }
215
360
<reponame>Yanci0/openGauss-server /* * Copyright (c) 2020 Huawei Technologies Co.,Ltd. * * openGauss is licensed under Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. * You may obtain a copy of Mulan PSL v2 at: * * http://license.coscl.org.cn/MulanPSL2 * * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. * See the Mulan PSL v2 for more details. * ------------------------------------------------------------------------- * * mm_session_large_buffer_list.cpp * A list of large buffers used for session allocations. * A single list may be used by all sessions running on the same NUMA node. * * IDENTIFICATION * src/gausskernel/storage/mot/core/memory/mm_session_large_buffer_list.cpp * * ------------------------------------------------------------------------- */ #include "mm_session_large_buffer_list.h" #include "utilities.h" #include "session_context.h" #include "mm_api.h" namespace MOT { DECLARE_LOGGER(SessionLargeBufferList, Memory) extern int MemSessionLargeBufferListInit( MemSessionLargeBufferList* sessionBufferList, void* bufferList, uint64_t bufferSize, uint64_t bufferCount) { int result = MemLockInitialize(&sessionBufferList->m_lock); if (result != 0) { MOT_REPORT_ERROR(MOT_ERROR_INTERNAL, "Session Large Buffer List Initialization", "Failed to initialize large buffer list lock"); } else { sessionBufferList->m_bufferList = bufferList; sessionBufferList->m_bufferSize = bufferSize; sessionBufferList->m_maxBufferCount = bufferCount; sessionBufferList->m_allocatedCount = 0; sessionBufferList->m_freeBitsetCount = (bufferCount + sizeof(uint64_t) - 1) / sizeof(uint64_t); for (uint32_t i = 0; i < sessionBufferList->m_freeBitsetCount; ++i) { sessionBufferList->m_freeBitset[i] = 0xFFFFFFFFFFFFFFFF; } sessionBufferList->m_bufferHeaderList = (MemSessionLargeBufferHeader*)(sessionBufferList->m_freeBitset + sessionBufferList->m_freeBitsetCount); for (uint32_t bufferIndex = 0; bufferIndex < sessionBufferList->m_maxBufferCount; ++bufferIndex) { MemSessionLargeBufferHeader* bufferHeader = (MemSessionLargeBufferHeader*)(sessionBufferList->m_bufferHeaderList + bufferIndex); bufferHeader->m_buffer = (void*)(((uint8_t*)sessionBufferList->m_bufferList) + bufferIndex * (sessionBufferList->m_bufferSize)); bufferHeader->m_next = nullptr; bufferHeader->m_realObjectSize = 0; } } return result; } extern void MemSessionLargeBufferListOnDoubleFree( MemSessionLargeBufferList* sessionBufferList, void* buffer, uint32_t bufferIndex) { MOT_LOG_PANIC("Double free of session large buffer %p [@%u] in buffer list %p (node: %d, buffer-size=%u KB, " "allocated=%u/%u)", buffer, bufferIndex, sessionBufferList, MOTCurrentNumaNodeId, sessionBufferList->m_bufferSize, sessionBufferList->m_allocatedCount, sessionBufferList->m_maxBufferCount); // this is a very extreme case, we force abort in the hope of a better root cause analysis MOTAbort(buffer); } extern void MemSessionLargeBufferListPrint(const char* name, LogLevel logLevel, MemSessionLargeBufferList* sessionBufferList, MemReportMode reportMode /* = MEM_REPORT_SUMMARY */) { if (MOT_CHECK_LOG_LEVEL(logLevel)) { StringBufferApply([name, logLevel, sessionBufferList, reportMode](StringBuffer* stringBuffer) { MemSessionLargeBufferListToString(0, name, sessionBufferList, stringBuffer, reportMode); MOT_LOG(logLevel, "%s", stringBuffer->m_buffer); }); } } extern void MemSessionLargeBufferListToString(int indent, const char* name, MemSessionLargeBufferList* sessionBufferList, StringBuffer* stringBuffer, MemReportMode reportMode /* = MEM_REPORT_SUMMARY */) { MemSessionLargeBufferStats stats = {}; MemSessionLargeBufferListGetStats(sessionBufferList, &stats); if (stats.m_allocatedBytes > 0) { if (reportMode == MEM_REPORT_SUMMARY) { StringBufferAppend(stringBuffer, "%*sSession Large Buffer List %s [buffer-size=%u MB, max-buffers=%u]: %u MB allocated, %u MB " "requested\n", indent, "", name, sessionBufferList->m_bufferSize / MEGA_BYTE, sessionBufferList->m_maxBufferCount, stats.m_allocatedBytes / MEGA_BYTE, stats.m_requestedBytes / MEGA_BYTE); } else { StringBufferAppend(stringBuffer, "%*sSession Large Buffer List %s: [buffer-size=%u MB, max-buffers=%u]\n", indent, "", name, sessionBufferList->m_bufferSize, sessionBufferList->m_allocatedCount, sessionBufferList->m_maxBufferCount); StringBufferAppend(stringBuffer, "%*sBit-set: { ", indent + PRINT_REPORT_INDENT, ""); for (uint32_t i = 0; i < sessionBufferList->m_freeBitsetCount; ++i) { StringBufferAppend(stringBuffer, "%p", (void*)sessionBufferList->m_freeBitset[i]); if (i + 1 < sessionBufferList->m_freeBitsetCount) { StringBufferAppend(stringBuffer, ", "); } } StringBufferAppend(stringBuffer, " }\n"); StringBufferAppend(stringBuffer, "%*sBuffers: { ", indent + PRINT_REPORT_INDENT, ""); uint32_t bufferCount = 0; for (uint32_t bufferIndex = 0; bufferIndex < sessionBufferList->m_maxBufferCount; ++bufferIndex) { uint32_t slot = bufferIndex / 64; uint32_t index = bufferIndex % 64; if ((sessionBufferList->m_freeBitset[slot] & (((uint64_t)1) << (63 - index))) == 0) { // buffer allocated to application void* buffer = ((uint8_t*)sessionBufferList->m_bufferList) + // beginning offset sessionBufferList->m_bufferSize * bufferIndex; // size * index MemSessionLargeBufferHeader* bufferHeader = (MemSessionLargeBufferHeader*)(sessionBufferList->m_bufferHeaderList + // beginning offset bufferIndex); // buffer header index StringBufferAppend(stringBuffer, "%p (%" PRIu64 " bytes)", buffer, bufferHeader->m_realObjectSize); ++bufferCount; if (bufferCount + 1 < sessionBufferList->m_allocatedCount) { StringBufferAppend(stringBuffer, ", "); } } } StringBufferAppend(stringBuffer, " }\n"); } } } } // namespace MOT extern "C" void MemSessionLargeBufferListDump(void* arg) { MOT::MemSessionLargeBufferList* list = (MOT::MemSessionLargeBufferList*)arg; MOT::StringBufferApply([list](MOT::StringBuffer* stringBuffer) { MOT::MemSessionLargeBufferListToString(0, "Debug Dump", list, stringBuffer, MOT::MEM_REPORT_DETAILED); fprintf(stderr, "%s", stringBuffer->m_buffer); fflush(stderr); }); } extern "C" int MemSessionLargeBufferListAnalyze(void* list, void* buffer) { int result = 0; MOT::MemSessionLargeBufferList* sessionBufferList = (MOT::MemSessionLargeBufferList*)list; if (((uint8_t*)buffer) >= ((uint8_t*)sessionBufferList->m_bufferList)) { uint64_t bufferOffset = (uint64_t)(((uint8_t*)buffer) - ((uint8_t*)sessionBufferList->m_bufferList)); uint32_t bufferIndex = bufferOffset / sessionBufferList->m_bufferSize; if (bufferIndex < sessionBufferList->m_maxBufferCount) { MOT::MemSessionLargeBufferHeader* bufferHeader = (MOT::MemSessionLargeBufferHeader*)(sessionBufferList->m_bufferHeaderList + bufferIndex); fprintf(stderr, "Object %p found in session buffer list of %u bytes buffers at index %u, with real size %" PRIu64 "\n", buffer, sessionBufferList->m_bufferSize, bufferIndex, bufferHeader->m_realObjectSize); result = 1; } } return result; }
3,709
348
<gh_stars>100-1000 {"nom":"Quincy","circ":"2ème circonscription","dpt":"Cher","inscrits":627,"abs":352,"votants":275,"blancs":28,"nuls":10,"exp":237,"res":[{"nuance":"MDM","nom":"<NAME>","voix":139},{"nuance":"COM","nom":"<NAME>","voix":98}]}
98
777
<gh_stars>100-1000 // Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef NET_QUIC_CORE_QUIC_SIMPLE_BUFFER_ALLOCATOR_H_ #define NET_QUIC_CORE_QUIC_SIMPLE_BUFFER_ALLOCATOR_H_ #include "net/quic/core/quic_buffer_allocator.h" #include "net/quic/platform/api/quic_export.h" namespace net { class QUIC_EXPORT_PRIVATE SimpleBufferAllocator : public QuicBufferAllocator { public: char* New(size_t size) override; char* New(size_t size, bool flag_enable) override; void Delete(char* buffer) override; }; } // namespace net #endif // NET_QUIC_CORE_QUIC_SIMPLE_BUFFER_ALLOCATOR_H_
265
408
from setuptools import setup # XXX: also update version in hamms/__init__.py __version__ = '1.3' setup( name='hamms', packages=['hamms'], version=__version__, description='Malformed servers to test your HTTP client', author='<NAME>', author_email='<EMAIL>', url='https://github.com/kevinburke/hamms', keywords=['testing', 'server', 'http',], # XXX, pin these down install_requires=['flask', 'httpbin', 'twisted'], )
175
670
package com.uddernetworks.mspaint.gui.elements; import com.jfoenix.controls.JFXTextField; import com.uddernetworks.mspaint.settings.Setting; import com.uddernetworks.mspaint.settings.SettingsManager; import javafx.beans.property.ObjectProperty; import javafx.beans.property.SimpleObjectProperty; import javafx.beans.property.SimpleStringProperty; import javafx.beans.property.StringProperty; import javafx.scene.Node; import javafx.scene.control.Label; import javafx.scene.layout.HBox; import javafx.scene.layout.Priority; import javafx.scene.layout.Region; public class SettingNumberField extends HBox { private ObjectProperty<Setting> settingProperty = new SimpleObjectProperty<>(null); private StringProperty labelProperty = new SimpleStringProperty(); private Label label = new Label("Default"); private NumberField numberField = new NumberField(); public SettingNumberField() { getStyleClass().add("theme-text"); setStyle("-fx-padding: 10px 0"); label.getStyleClass().add("theme-text"); numberField.getStyleClass().add("theme-text"); Node spacer = getHSpacer(10); HBox.setHgrow(label, Priority.NEVER); HBox.setHgrow(numberField, Priority.NEVER); HBox.setHgrow(spacer, Priority.NEVER); label.setPrefHeight(25); getChildren().add(label); getChildren().add(spacer); getChildren().add(numberField); numberField.textProperty().addListener(((observable, oldValue, newValue) -> SettingsManager.getInstance().setSetting(settingProperty.get(), newValue.isEmpty() ? 0 : Integer.valueOf(newValue)))); } public ObjectProperty<Setting> settingProperty() { return this.settingProperty; } public Setting getSetting() { return settingProperty().get(); } public void setSetting(Setting setting) { settingProperty().set(setting); numberField.setText(SettingsManager.getInstance().getSetting(setting, 0).toString()); } public StringProperty labelProperty() { return this.labelProperty; } public String getLabel() { return labelProperty().get(); } public void setLabel(String label) { labelProperty().set(label); this.label.setText(label); } private Node getHSpacer(double width) { Region spacer = new Region(); spacer.setPrefWidth(width); HBox.setHgrow(spacer, Priority.NEVER); return spacer; } public class NumberField extends JFXTextField { @Override public void replaceText(int start, int end, String text) { if (text.matches("[0-9]*")) { super.replaceText(start, end, text); } } @Override public void replaceSelection(String text) { if (text.matches("[0-9]*")) { super.replaceSelection(text); } } } }
1,122
2,959
<filename>samcli/lib/config/exceptions.py """ Exceptions to be used by samconfig.py """ class SamConfigVersionException(Exception): pass
45
432
<gh_stars>100-1000 /* * Copyright (c) 2008 The DragonFly Project. All rights reserved. * * This code is derived from software contributed to The DragonFly Project * by <NAME> <<EMAIL>> * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * 3. Neither the name of The DragonFly Project nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific, prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * HAMMER blockmap */ #include <vm/vm_page2.h> #include "hammer.h" static int hammer_res_rb_compare(hammer_reserve_t res1, hammer_reserve_t res2); static void hammer_reserve_setdelay_offset(hammer_mount_t hmp, hammer_off_t base_offset, int zone, hammer_blockmap_layer2_t layer2); static void hammer_reserve_setdelay(hammer_mount_t hmp, hammer_reserve_t resv); static int hammer_check_volume(hammer_mount_t, hammer_off_t*); static void hammer_skip_volume(hammer_off_t *offsetp); /* * Reserved big-blocks red-black tree support */ RB_GENERATE2(hammer_res_rb_tree, hammer_reserve, rb_node, hammer_res_rb_compare, hammer_off_t, zone_offset); static int hammer_res_rb_compare(hammer_reserve_t res1, hammer_reserve_t res2) { if (res1->zone_offset < res2->zone_offset) return(-1); if (res1->zone_offset > res2->zone_offset) return(1); return(0); } /* * Allocate bytes from a zone */ hammer_off_t hammer_blockmap_alloc(hammer_transaction_t trans, int zone, int bytes, hammer_off_t hint, int *errorp) { hammer_mount_t hmp; hammer_volume_t root_volume; hammer_blockmap_t blockmap; hammer_blockmap_t freemap; hammer_reserve_t resv; hammer_blockmap_layer1_t layer1; hammer_blockmap_layer2_t layer2; hammer_buffer_t buffer1 = NULL; hammer_buffer_t buffer2 = NULL; hammer_buffer_t buffer3 = NULL; hammer_off_t tmp_offset; hammer_off_t next_offset; hammer_off_t result_offset; hammer_off_t layer1_offset; hammer_off_t layer2_offset; hammer_off_t base_off; int loops = 0; int offset; /* offset within big-block */ int use_hint; hmp = trans->hmp; /* * Deal with alignment and buffer-boundary issues. * * Be careful, certain primary alignments are used below to allocate * new blockmap blocks. */ bytes = HAMMER_DATA_DOALIGN(bytes); KKASSERT(bytes > 0 && bytes <= HAMMER_XBUFSIZE); KKASSERT(hammer_is_index_record(zone)); /* * Setup */ root_volume = trans->rootvol; *errorp = 0; blockmap = &hmp->blockmap[zone]; freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX]; KKASSERT(HAMMER_ZONE_DECODE(blockmap->next_offset) == zone); /* * Use the hint if we have one. */ if (hint && HAMMER_ZONE_DECODE(hint) == zone) { next_offset = HAMMER_DATA_DOALIGN_WITH(hammer_off_t, hint); use_hint = 1; } else { next_offset = blockmap->next_offset; use_hint = 0; } again: /* * use_hint is turned off if we leave the hinted big-block. */ if (use_hint && ((next_offset ^ hint) & ~HAMMER_HINTBLOCK_MASK64)) { next_offset = blockmap->next_offset; use_hint = 0; } /* * Check for wrap */ if (next_offset == HAMMER_ZONE_ENCODE(zone + 1, 0)) { if (++loops == 2) { hmkprintf(hmp, "No space left for zone %d " "allocation\n", zone); result_offset = 0; *errorp = ENOSPC; goto failed; } next_offset = HAMMER_ZONE_ENCODE(zone, 0); } /* * The allocation request may not cross a buffer boundary. Special * large allocations must not cross a big-block boundary. */ tmp_offset = next_offset + bytes - 1; if (bytes <= HAMMER_BUFSIZE) { if ((next_offset ^ tmp_offset) & ~HAMMER_BUFMASK64) { next_offset = tmp_offset & ~HAMMER_BUFMASK64; goto again; } } else { if ((next_offset ^ tmp_offset) & ~HAMMER_BIGBLOCK_MASK64) { next_offset = tmp_offset & ~HAMMER_BIGBLOCK_MASK64; goto again; } } offset = (int)next_offset & HAMMER_BIGBLOCK_MASK; /* * Dive layer 1. */ layer1_offset = freemap->phys_offset + HAMMER_BLOCKMAP_LAYER1_OFFSET(next_offset); layer1 = hammer_bread(hmp, layer1_offset, errorp, &buffer1); if (*errorp) { result_offset = 0; goto failed; } /* * Check CRC. */ if (!hammer_crc_test_layer1(hmp->version, layer1)) { hammer_lock_ex(&hmp->blkmap_lock); if (!hammer_crc_test_layer1(hmp->version, layer1)) hpanic("CRC FAILED: LAYER1"); hammer_unlock(&hmp->blkmap_lock); } /* * If we are at a big-block boundary and layer1 indicates no * free big-blocks, then we cannot allocate a new big-block in * layer2, skip to the next layer1 entry. */ if (offset == 0 && layer1->blocks_free == 0) { next_offset = HAMMER_ZONE_LAYER1_NEXT_OFFSET(next_offset); if (hammer_check_volume(hmp, &next_offset)) { result_offset = 0; goto failed; } goto again; } KKASSERT(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL); /* * Skip the whole volume if it is pointing to a layer2 big-block * on a volume that we are currently trying to remove from the * file-system. This is used by the volume-del code together with * the reblocker to free up a volume. */ if (HAMMER_VOL_DECODE(layer1->phys_offset) == hmp->volume_to_remove) { hammer_skip_volume(&next_offset); goto again; } /* * Dive layer 2, each entry represents a big-block. */ layer2_offset = layer1->phys_offset + HAMMER_BLOCKMAP_LAYER2_OFFSET(next_offset); layer2 = hammer_bread(hmp, layer2_offset, errorp, &buffer2); if (*errorp) { result_offset = 0; goto failed; } /* * Check CRC. This can race another thread holding the lock * and in the middle of modifying layer2. */ if (!hammer_crc_test_layer2(hmp->version, layer2)) { hammer_lock_ex(&hmp->blkmap_lock); if (!hammer_crc_test_layer2(hmp->version, layer2)) hpanic("CRC FAILED: LAYER2"); hammer_unlock(&hmp->blkmap_lock); } /* * Skip the layer if the zone is owned by someone other then us. */ if (layer2->zone && layer2->zone != zone) { next_offset += (HAMMER_BIGBLOCK_SIZE - offset); goto again; } if (offset < layer2->append_off) { next_offset += layer2->append_off - offset; goto again; } #if 0 /* * If operating in the current non-hint blockmap block, do not * allow it to get over-full. Also drop any active hinting so * blockmap->next_offset is updated at the end. * * We do this for B-Tree and meta-data allocations to provide * localization for updates. */ if ((zone == HAMMER_ZONE_BTREE_INDEX || zone == HAMMER_ZONE_META_INDEX) && offset >= HAMMER_BIGBLOCK_OVERFILL && !((next_offset ^ blockmap->next_offset) & ~HAMMER_BIGBLOCK_MASK64)) { if (offset >= HAMMER_BIGBLOCK_OVERFILL) { next_offset += (HAMMER_BIGBLOCK_SIZE - offset); use_hint = 0; goto again; } } #endif /* * We need the lock from this point on. We have to re-check zone * ownership after acquiring the lock and also check for reservations. */ hammer_lock_ex(&hmp->blkmap_lock); if (layer2->zone && layer2->zone != zone) { hammer_unlock(&hmp->blkmap_lock); next_offset += (HAMMER_BIGBLOCK_SIZE - offset); goto again; } if (offset < layer2->append_off) { hammer_unlock(&hmp->blkmap_lock); next_offset += layer2->append_off - offset; goto again; } /* * The big-block might be reserved by another zone. If it is reserved * by our zone we may have to move next_offset past the append_off. */ base_off = hammer_xlate_to_zone2(next_offset & ~HAMMER_BIGBLOCK_MASK64); resv = RB_LOOKUP(hammer_res_rb_tree, &hmp->rb_resv_root, base_off); if (resv) { if (resv->zone != zone) { hammer_unlock(&hmp->blkmap_lock); next_offset = HAMMER_ZONE_LAYER2_NEXT_OFFSET(next_offset); goto again; } if (offset < resv->append_off) { hammer_unlock(&hmp->blkmap_lock); next_offset += resv->append_off - offset; goto again; } ++resv->refs; } /* * Ok, we can allocate out of this layer2 big-block. Assume ownership * of the layer for real. At this point we've validated any * reservation that might exist and can just ignore resv. */ if (layer2->zone == 0) { /* * Assign the big-block to our zone */ hammer_modify_buffer(trans, buffer1, layer1, sizeof(*layer1)); --layer1->blocks_free; hammer_crc_set_layer1(hmp->version, layer1); hammer_modify_buffer_done(buffer1); hammer_modify_buffer(trans, buffer2, layer2, sizeof(*layer2)); layer2->zone = zone; KKASSERT(layer2->bytes_free == HAMMER_BIGBLOCK_SIZE); KKASSERT(layer2->append_off == 0); hammer_modify_volume_field(trans, trans->rootvol, vol0_stat_freebigblocks); --root_volume->ondisk->vol0_stat_freebigblocks; hmp->copy_stat_freebigblocks = root_volume->ondisk->vol0_stat_freebigblocks; hammer_modify_volume_done(trans->rootvol); } else { hammer_modify_buffer(trans, buffer2, layer2, sizeof(*layer2)); } KKASSERT(layer2->zone == zone); /* * NOTE: bytes_free can legally go negative due to de-dup. */ layer2->bytes_free -= bytes; KKASSERT(layer2->append_off <= offset); layer2->append_off = offset + bytes; hammer_crc_set_layer2(hmp->version, layer2); hammer_modify_buffer_done(buffer2); /* * We hold the blockmap lock and should be the only ones * capable of modifying resv->append_off. Track the allocation * as appropriate. */ KKASSERT(bytes != 0); if (resv) { KKASSERT(resv->append_off <= offset); resv->append_off = offset + bytes; resv->flags &= ~HAMMER_RESF_LAYER2FREE; hammer_blockmap_reserve_complete(hmp, resv); } /* * If we are allocating from the base of a new buffer we can avoid * a disk read by calling hammer_bnew_ext(). */ if ((next_offset & HAMMER_BUFMASK) == 0) { hammer_bnew_ext(trans->hmp, next_offset, bytes, errorp, &buffer3); if (*errorp) { result_offset = 0; goto failed; } } result_offset = next_offset; /* * If we weren't supplied with a hint or could not use the hint * then we wound up using blockmap->next_offset as the hint and * need to save it. */ if (use_hint == 0) { hammer_modify_volume_noundo(NULL, root_volume); blockmap->next_offset = next_offset + bytes; hammer_modify_volume_done(root_volume); } hammer_unlock(&hmp->blkmap_lock); failed: /* * Cleanup */ if (buffer1) hammer_rel_buffer(buffer1, 0); if (buffer2) hammer_rel_buffer(buffer2, 0); if (buffer3) hammer_rel_buffer(buffer3, 0); return(result_offset); } /* * Frontend function - Reserve bytes in a zone. * * This code reserves bytes out of a blockmap without committing to any * meta-data modifications, allowing the front-end to directly issue disk * write I/O for big-blocks of data * * The backend later finalizes the reservation with hammer_blockmap_finalize() * upon committing the related record. */ hammer_reserve_t hammer_blockmap_reserve(hammer_mount_t hmp, int zone, int bytes, hammer_off_t *zone_offp, int *errorp) { hammer_volume_t root_volume; hammer_blockmap_t blockmap; hammer_blockmap_t freemap; hammer_blockmap_layer1_t layer1; hammer_blockmap_layer2_t layer2; hammer_buffer_t buffer1 = NULL; hammer_buffer_t buffer2 = NULL; hammer_buffer_t buffer3 = NULL; hammer_off_t tmp_offset; hammer_off_t next_offset; hammer_off_t layer1_offset; hammer_off_t layer2_offset; hammer_off_t base_off; hammer_reserve_t resv; hammer_reserve_t resx = NULL; int loops = 0; int offset; /* * Setup */ KKASSERT(hammer_is_index_record(zone)); root_volume = hammer_get_root_volume(hmp, errorp); if (*errorp) return(NULL); blockmap = &hmp->blockmap[zone]; freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX]; KKASSERT(HAMMER_ZONE_DECODE(blockmap->next_offset) == zone); /* * Deal with alignment and buffer-boundary issues. * * Be careful, certain primary alignments are used below to allocate * new blockmap blocks. */ bytes = HAMMER_DATA_DOALIGN(bytes); KKASSERT(bytes > 0 && bytes <= HAMMER_XBUFSIZE); next_offset = blockmap->next_offset; again: resv = NULL; /* * Check for wrap */ if (next_offset == HAMMER_ZONE_ENCODE(zone + 1, 0)) { if (++loops == 2) { hmkprintf(hmp, "No space left for zone %d " "reservation\n", zone); *errorp = ENOSPC; goto failed; } next_offset = HAMMER_ZONE_ENCODE(zone, 0); } /* * The allocation request may not cross a buffer boundary. Special * large allocations must not cross a big-block boundary. */ tmp_offset = next_offset + bytes - 1; if (bytes <= HAMMER_BUFSIZE) { if ((next_offset ^ tmp_offset) & ~HAMMER_BUFMASK64) { next_offset = tmp_offset & ~HAMMER_BUFMASK64; goto again; } } else { if ((next_offset ^ tmp_offset) & ~HAMMER_BIGBLOCK_MASK64) { next_offset = tmp_offset & ~HAMMER_BIGBLOCK_MASK64; goto again; } } offset = (int)next_offset & HAMMER_BIGBLOCK_MASK; /* * Dive layer 1. */ layer1_offset = freemap->phys_offset + HAMMER_BLOCKMAP_LAYER1_OFFSET(next_offset); layer1 = hammer_bread(hmp, layer1_offset, errorp, &buffer1); if (*errorp) goto failed; /* * Check CRC. */ if (!hammer_crc_test_layer1(hmp->version, layer1)) { hammer_lock_ex(&hmp->blkmap_lock); if (!hammer_crc_test_layer1(hmp->version, layer1)) hpanic("CRC FAILED: LAYER1"); hammer_unlock(&hmp->blkmap_lock); } /* * If we are at a big-block boundary and layer1 indicates no * free big-blocks, then we cannot allocate a new big-block in * layer2, skip to the next layer1 entry. */ if ((next_offset & HAMMER_BIGBLOCK_MASK) == 0 && layer1->blocks_free == 0) { next_offset = HAMMER_ZONE_LAYER1_NEXT_OFFSET(next_offset); if (hammer_check_volume(hmp, &next_offset)) goto failed; goto again; } KKASSERT(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL); /* * Dive layer 2, each entry represents a big-block. */ layer2_offset = layer1->phys_offset + HAMMER_BLOCKMAP_LAYER2_OFFSET(next_offset); layer2 = hammer_bread(hmp, layer2_offset, errorp, &buffer2); if (*errorp) goto failed; /* * Check CRC if not allocating into uninitialized space (which we * aren't when reserving space). */ if (!hammer_crc_test_layer2(hmp->version, layer2)) { hammer_lock_ex(&hmp->blkmap_lock); if (!hammer_crc_test_layer2(hmp->version, layer2)) hpanic("CRC FAILED: LAYER2"); hammer_unlock(&hmp->blkmap_lock); } /* * Skip the layer if the zone is owned by someone other then us. */ if (layer2->zone && layer2->zone != zone) { next_offset += (HAMMER_BIGBLOCK_SIZE - offset); goto again; } if (offset < layer2->append_off) { next_offset += layer2->append_off - offset; goto again; } /* * We need the lock from this point on. We have to re-check zone * ownership after acquiring the lock and also check for reservations. */ hammer_lock_ex(&hmp->blkmap_lock); if (layer2->zone && layer2->zone != zone) { hammer_unlock(&hmp->blkmap_lock); next_offset += (HAMMER_BIGBLOCK_SIZE - offset); goto again; } if (offset < layer2->append_off) { hammer_unlock(&hmp->blkmap_lock); next_offset += layer2->append_off - offset; goto again; } /* * The big-block might be reserved by another zone. If it is reserved * by our zone we may have to move next_offset past the append_off. */ base_off = hammer_xlate_to_zone2(next_offset & ~HAMMER_BIGBLOCK_MASK64); resv = RB_LOOKUP(hammer_res_rb_tree, &hmp->rb_resv_root, base_off); if (resv) { if (resv->zone != zone) { hammer_unlock(&hmp->blkmap_lock); next_offset = HAMMER_ZONE_LAYER2_NEXT_OFFSET(next_offset); goto again; } if (offset < resv->append_off) { hammer_unlock(&hmp->blkmap_lock); next_offset += resv->append_off - offset; goto again; } ++resv->refs; } else { resx = kmalloc(sizeof(*resv), hmp->m_misc, M_WAITOK | M_ZERO | M_USE_RESERVE); resx->refs = 1; resx->zone = zone; resx->zone_offset = base_off; if (layer2->bytes_free == HAMMER_BIGBLOCK_SIZE) resx->flags |= HAMMER_RESF_LAYER2FREE; resv = RB_INSERT(hammer_res_rb_tree, &hmp->rb_resv_root, resx); KKASSERT(resv == NULL); resv = resx; ++hammer_count_reservations; } resv->append_off = offset + bytes; /* * If we are not reserving a whole buffer but are at the start of * a new block, call hammer_bnew() to avoid a disk read. * * If we are reserving a whole buffer (or more), the caller will * probably use a direct read, so do nothing. * * If we do not have a whole lot of system memory we really can't * afford to block while holding the blkmap_lock! */ if (bytes < HAMMER_BUFSIZE && (next_offset & HAMMER_BUFMASK) == 0) { if (!vm_paging_min_dnc(HAMMER_BUFSIZE / PAGE_SIZE)) { hammer_bnew(hmp, next_offset, errorp, &buffer3); if (*errorp) goto failed; } } blockmap->next_offset = next_offset + bytes; hammer_unlock(&hmp->blkmap_lock); failed: if (buffer1) hammer_rel_buffer(buffer1, 0); if (buffer2) hammer_rel_buffer(buffer2, 0); if (buffer3) hammer_rel_buffer(buffer3, 0); hammer_rel_volume(root_volume, 0); *zone_offp = next_offset; return(resv); } /* * Dereference a reservation structure. Upon the final release the * underlying big-block is checked and if it is entirely free we delete * any related HAMMER buffers to avoid potential conflicts with future * reuse of the big-block. */ void hammer_blockmap_reserve_complete(hammer_mount_t hmp, hammer_reserve_t resv) { hammer_off_t base_offset; int error; KKASSERT(resv->refs > 0); KKASSERT(hammer_is_zone_raw_buffer(resv->zone_offset)); /* * Setting append_off to the max prevents any new allocations * from occuring while we are trying to dispose of the reservation, * allowing us to safely delete any related HAMMER buffers. * * If we are unable to clean out all related HAMMER buffers we * requeue the delay. */ if (resv->refs == 1 && (resv->flags & HAMMER_RESF_LAYER2FREE)) { resv->append_off = HAMMER_BIGBLOCK_SIZE; base_offset = hammer_xlate_to_zoneX(resv->zone, resv->zone_offset); error = hammer_del_buffers(hmp, base_offset, resv->zone_offset, HAMMER_BIGBLOCK_SIZE, 1); if (hammer_debug_general & 0x20000) { hkprintf("delbgblk %016jx error %d\n", (intmax_t)base_offset, error); } if (error) hammer_reserve_setdelay(hmp, resv); } if (--resv->refs == 0) { if (hammer_debug_general & 0x20000) { hkprintf("delresvr %016jx zone %02x\n", (intmax_t)resv->zone_offset, resv->zone); } KKASSERT((resv->flags & HAMMER_RESF_ONDELAY) == 0); RB_REMOVE(hammer_res_rb_tree, &hmp->rb_resv_root, resv); kfree(resv, hmp->m_misc); --hammer_count_reservations; } } /* * Prevent a potentially free big-block from being reused until after * the related flushes have completely cycled, otherwise crash recovery * could resurrect a data block that was already reused and overwritten. * * The caller might reset the underlying layer2 entry's append_off to 0, so * our covering append_off must be set to max to prevent any reallocation * until after the flush delays complete, not to mention proper invalidation * of any underlying cached blocks. */ static void hammer_reserve_setdelay_offset(hammer_mount_t hmp, hammer_off_t base_offset, int zone, hammer_blockmap_layer2_t layer2) { hammer_reserve_t resv; /* * Allocate the reservation if necessary. * * NOTE: need lock in future around resv lookup/allocation and * the setdelay call, currently refs is not bumped until the call. */ again: resv = RB_LOOKUP(hammer_res_rb_tree, &hmp->rb_resv_root, base_offset); if (resv == NULL) { resv = kmalloc(sizeof(*resv), hmp->m_misc, M_WAITOK | M_ZERO | M_USE_RESERVE); resv->zone = zone; resv->zone_offset = base_offset; resv->refs = 0; resv->append_off = HAMMER_BIGBLOCK_SIZE; if (layer2->bytes_free == HAMMER_BIGBLOCK_SIZE) resv->flags |= HAMMER_RESF_LAYER2FREE; if (RB_INSERT(hammer_res_rb_tree, &hmp->rb_resv_root, resv)) { kfree(resv, hmp->m_misc); goto again; } ++hammer_count_reservations; } else { if (layer2->bytes_free == HAMMER_BIGBLOCK_SIZE) resv->flags |= HAMMER_RESF_LAYER2FREE; } hammer_reserve_setdelay(hmp, resv); } /* * Enter the reservation on the on-delay list, or move it if it * is already on the list. */ static void hammer_reserve_setdelay(hammer_mount_t hmp, hammer_reserve_t resv) { if (resv->flags & HAMMER_RESF_ONDELAY) { TAILQ_REMOVE(&hmp->delay_list, resv, delay_entry); resv->flg_no = hmp->flusher.next + 1; TAILQ_INSERT_TAIL(&hmp->delay_list, resv, delay_entry); } else { ++resv->refs; ++hmp->rsv_fromdelay; resv->flags |= HAMMER_RESF_ONDELAY; resv->flg_no = hmp->flusher.next + 1; TAILQ_INSERT_TAIL(&hmp->delay_list, resv, delay_entry); } } /* * Reserve has reached its flush point, remove it from the delay list * and finish it off. hammer_blockmap_reserve_complete() inherits * the ondelay reference. */ void hammer_reserve_clrdelay(hammer_mount_t hmp, hammer_reserve_t resv) { KKASSERT(resv->flags & HAMMER_RESF_ONDELAY); resv->flags &= ~HAMMER_RESF_ONDELAY; TAILQ_REMOVE(&hmp->delay_list, resv, delay_entry); --hmp->rsv_fromdelay; hammer_blockmap_reserve_complete(hmp, resv); } /* * Backend function - free (offset, bytes) in a zone. * * XXX error return */ void hammer_blockmap_free(hammer_transaction_t trans, hammer_off_t zone_offset, int bytes) { hammer_mount_t hmp; hammer_volume_t root_volume; hammer_blockmap_t freemap; hammer_blockmap_layer1_t layer1; hammer_blockmap_layer2_t layer2; hammer_buffer_t buffer1 = NULL; hammer_buffer_t buffer2 = NULL; hammer_off_t layer1_offset; hammer_off_t layer2_offset; hammer_off_t base_off; int error; int zone; if (bytes == 0) return; hmp = trans->hmp; /* * Alignment */ bytes = HAMMER_DATA_DOALIGN(bytes); KKASSERT(bytes <= HAMMER_XBUFSIZE); KKASSERT(((zone_offset ^ (zone_offset + (bytes - 1))) & ~HAMMER_BIGBLOCK_MASK64) == 0); /* * Basic zone validation & locking */ zone = HAMMER_ZONE_DECODE(zone_offset); KKASSERT(hammer_is_index_record(zone)); root_volume = trans->rootvol; error = 0; freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX]; /* * Dive layer 1. */ layer1_offset = freemap->phys_offset + HAMMER_BLOCKMAP_LAYER1_OFFSET(zone_offset); layer1 = hammer_bread(hmp, layer1_offset, &error, &buffer1); if (error) goto failed; KKASSERT(layer1->phys_offset && layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL); if (!hammer_crc_test_layer1(hmp->version, layer1)) { hammer_lock_ex(&hmp->blkmap_lock); if (!hammer_crc_test_layer1(hmp->version, layer1)) hpanic("CRC FAILED: LAYER1"); hammer_unlock(&hmp->blkmap_lock); } /* * Dive layer 2, each entry represents a big-block. */ layer2_offset = layer1->phys_offset + HAMMER_BLOCKMAP_LAYER2_OFFSET(zone_offset); layer2 = hammer_bread(hmp, layer2_offset, &error, &buffer2); if (error) goto failed; if (!hammer_crc_test_layer2(hmp->version, layer2)) { hammer_lock_ex(&hmp->blkmap_lock); if (!hammer_crc_test_layer2(hmp->version, layer2)) hpanic("CRC FAILED: LAYER2"); hammer_unlock(&hmp->blkmap_lock); } hammer_lock_ex(&hmp->blkmap_lock); hammer_modify_buffer(trans, buffer2, layer2, sizeof(*layer2)); /* * Free space previously allocated via blockmap_alloc(). * * NOTE: bytes_free can be and remain negative due to de-dup ops * but can never become larger than HAMMER_BIGBLOCK_SIZE. */ KKASSERT(layer2->zone == zone); layer2->bytes_free += bytes; KKASSERT(layer2->bytes_free <= HAMMER_BIGBLOCK_SIZE); /* * If a big-block becomes entirely free we must create a covering * reservation to prevent premature reuse. Note, however, that * the big-block and/or reservation may still have an append_off * that allows further (non-reused) allocations. * * Once the reservation has been made we re-check layer2 and if * the big-block is still entirely free we reset the layer2 entry. * The reservation will prevent premature reuse. * * NOTE: hammer_buffer's are only invalidated when the reservation * is completed, if the layer2 entry is still completely free at * that time. Any allocations from the reservation that may have * occured in the mean time, or active references on the reservation * from new pending allocations, will prevent the invalidation from * occuring. */ if (layer2->bytes_free == HAMMER_BIGBLOCK_SIZE) { base_off = hammer_xlate_to_zone2(zone_offset & ~HAMMER_BIGBLOCK_MASK64); hammer_reserve_setdelay_offset(hmp, base_off, zone, layer2); if (layer2->bytes_free == HAMMER_BIGBLOCK_SIZE) { layer2->zone = 0; layer2->append_off = 0; hammer_modify_buffer(trans, buffer1, layer1, sizeof(*layer1)); ++layer1->blocks_free; hammer_crc_set_layer1(hmp->version, layer1); hammer_modify_buffer_done(buffer1); hammer_modify_volume_field(trans, trans->rootvol, vol0_stat_freebigblocks); ++root_volume->ondisk->vol0_stat_freebigblocks; hmp->copy_stat_freebigblocks = root_volume->ondisk->vol0_stat_freebigblocks; hammer_modify_volume_done(trans->rootvol); } } hammer_crc_set_layer2(hmp->version, layer2); hammer_modify_buffer_done(buffer2); hammer_unlock(&hmp->blkmap_lock); failed: if (buffer1) hammer_rel_buffer(buffer1, 0); if (buffer2) hammer_rel_buffer(buffer2, 0); } int hammer_blockmap_dedup(hammer_transaction_t trans, hammer_off_t zone_offset, int bytes) { hammer_mount_t hmp; hammer_blockmap_t freemap; hammer_blockmap_layer1_t layer1; hammer_blockmap_layer2_t layer2; hammer_buffer_t buffer1 = NULL; hammer_buffer_t buffer2 = NULL; hammer_off_t layer1_offset; hammer_off_t layer2_offset; int32_t temp; int error; int zone __debugvar; if (bytes == 0) return (0); hmp = trans->hmp; /* * Alignment */ bytes = HAMMER_DATA_DOALIGN(bytes); KKASSERT(bytes <= HAMMER_BIGBLOCK_SIZE); KKASSERT(((zone_offset ^ (zone_offset + (bytes - 1))) & ~HAMMER_BIGBLOCK_MASK64) == 0); /* * Basic zone validation & locking */ zone = HAMMER_ZONE_DECODE(zone_offset); KKASSERT(hammer_is_index_record(zone)); error = 0; freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX]; /* * Dive layer 1. */ layer1_offset = freemap->phys_offset + HAMMER_BLOCKMAP_LAYER1_OFFSET(zone_offset); layer1 = hammer_bread(hmp, layer1_offset, &error, &buffer1); if (error) goto failed; KKASSERT(layer1->phys_offset && layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL); if (!hammer_crc_test_layer1(hmp->version, layer1)) { hammer_lock_ex(&hmp->blkmap_lock); if (!hammer_crc_test_layer1(hmp->version, layer1)) hpanic("CRC FAILED: LAYER1"); hammer_unlock(&hmp->blkmap_lock); } /* * Dive layer 2, each entry represents a big-block. */ layer2_offset = layer1->phys_offset + HAMMER_BLOCKMAP_LAYER2_OFFSET(zone_offset); layer2 = hammer_bread(hmp, layer2_offset, &error, &buffer2); if (error) goto failed; if (!hammer_crc_test_layer2(hmp->version, layer2)) { hammer_lock_ex(&hmp->blkmap_lock); if (!hammer_crc_test_layer2(hmp->version, layer2)) hpanic("CRC FAILED: LAYER2"); hammer_unlock(&hmp->blkmap_lock); } hammer_lock_ex(&hmp->blkmap_lock); hammer_modify_buffer(trans, buffer2, layer2, sizeof(*layer2)); /* * Free space previously allocated via blockmap_alloc(). * * NOTE: bytes_free can be and remain negative due to de-dup ops * but can never become larger than HAMMER_BIGBLOCK_SIZE. */ KKASSERT(layer2->zone == zone); temp = layer2->bytes_free - HAMMER_BIGBLOCK_SIZE * 2; cpu_ccfence(); /* prevent gcc from optimizing temp out */ if (temp > layer2->bytes_free) { error = ERANGE; goto underflow; } layer2->bytes_free -= bytes; KKASSERT(layer2->bytes_free <= HAMMER_BIGBLOCK_SIZE); hammer_crc_set_layer2(hmp->version, layer2); underflow: hammer_modify_buffer_done(buffer2); hammer_unlock(&hmp->blkmap_lock); failed: if (buffer1) hammer_rel_buffer(buffer1, 0); if (buffer2) hammer_rel_buffer(buffer2, 0); return (error); } /* * Backend function - finalize (offset, bytes) in a zone. * * Allocate space that was previously reserved by the frontend. */ int hammer_blockmap_finalize(hammer_transaction_t trans, hammer_reserve_t resv, hammer_off_t zone_offset, int bytes) { hammer_mount_t hmp; hammer_volume_t root_volume; hammer_blockmap_t freemap; hammer_blockmap_layer1_t layer1; hammer_blockmap_layer2_t layer2; hammer_buffer_t buffer1 = NULL; hammer_buffer_t buffer2 = NULL; hammer_off_t layer1_offset; hammer_off_t layer2_offset; int error; int zone; int offset; if (bytes == 0) return(0); hmp = trans->hmp; /* * Alignment */ bytes = HAMMER_DATA_DOALIGN(bytes); KKASSERT(bytes <= HAMMER_XBUFSIZE); /* * Basic zone validation & locking */ zone = HAMMER_ZONE_DECODE(zone_offset); KKASSERT(hammer_is_index_record(zone)); root_volume = trans->rootvol; error = 0; freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX]; /* * Dive layer 1. */ layer1_offset = freemap->phys_offset + HAMMER_BLOCKMAP_LAYER1_OFFSET(zone_offset); layer1 = hammer_bread(hmp, layer1_offset, &error, &buffer1); if (error) goto failed; KKASSERT(layer1->phys_offset && layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL); if (!hammer_crc_test_layer1(hmp->version, layer1)) { hammer_lock_ex(&hmp->blkmap_lock); if (!hammer_crc_test_layer1(hmp->version, layer1)) hpanic("CRC FAILED: LAYER1"); hammer_unlock(&hmp->blkmap_lock); } /* * Dive layer 2, each entry represents a big-block. */ layer2_offset = layer1->phys_offset + HAMMER_BLOCKMAP_LAYER2_OFFSET(zone_offset); layer2 = hammer_bread(hmp, layer2_offset, &error, &buffer2); if (error) goto failed; if (!hammer_crc_test_layer2(hmp->version, layer2)) { hammer_lock_ex(&hmp->blkmap_lock); if (!hammer_crc_test_layer2(hmp->version, layer2)) hpanic("CRC FAILED: LAYER2"); hammer_unlock(&hmp->blkmap_lock); } hammer_lock_ex(&hmp->blkmap_lock); hammer_modify_buffer(trans, buffer2, layer2, sizeof(*layer2)); /* * Finalize some or all of the space covered by a current * reservation. An allocation in the same layer may have * already assigned ownership. */ if (layer2->zone == 0) { hammer_modify_buffer(trans, buffer1, layer1, sizeof(*layer1)); --layer1->blocks_free; hammer_crc_set_layer1(hmp->version, layer1); hammer_modify_buffer_done(buffer1); layer2->zone = zone; KKASSERT(layer2->bytes_free == HAMMER_BIGBLOCK_SIZE); KKASSERT(layer2->append_off == 0); hammer_modify_volume_field(trans, trans->rootvol, vol0_stat_freebigblocks); --root_volume->ondisk->vol0_stat_freebigblocks; hmp->copy_stat_freebigblocks = root_volume->ondisk->vol0_stat_freebigblocks; hammer_modify_volume_done(trans->rootvol); } if (layer2->zone != zone) hdkprintf("layer2 zone mismatch %d %d\n", layer2->zone, zone); KKASSERT(layer2->zone == zone); KKASSERT(bytes != 0); layer2->bytes_free -= bytes; if (resv) resv->flags &= ~HAMMER_RESF_LAYER2FREE; /* * Finalizations can occur out of order, or combined with allocations. * append_off must be set to the highest allocated offset. */ offset = ((int)zone_offset & HAMMER_BIGBLOCK_MASK) + bytes; if (layer2->append_off < offset) layer2->append_off = offset; hammer_crc_set_layer2(hmp->version, layer2); hammer_modify_buffer_done(buffer2); hammer_unlock(&hmp->blkmap_lock); failed: if (buffer1) hammer_rel_buffer(buffer1, 0); if (buffer2) hammer_rel_buffer(buffer2, 0); return(error); } /* * Return the approximate number of free bytes in the big-block * containing the specified blockmap offset. * * WARNING: A negative number can be returned if data de-dup exists, * and the result will also not represent he actual number * of free bytes in this case. * * This code is used only by the reblocker. */ int hammer_blockmap_getfree(hammer_mount_t hmp, hammer_off_t zone_offset, int *curp, int *errorp) { hammer_volume_t root_volume; hammer_blockmap_t blockmap; hammer_blockmap_t freemap; hammer_blockmap_layer1_t layer1; hammer_blockmap_layer2_t layer2; hammer_buffer_t buffer = NULL; hammer_off_t layer1_offset; hammer_off_t layer2_offset; int32_t bytes; int zone; zone = HAMMER_ZONE_DECODE(zone_offset); KKASSERT(hammer_is_index_record(zone)); root_volume = hammer_get_root_volume(hmp, errorp); if (*errorp) { *curp = 0; return(0); } blockmap = &hmp->blockmap[zone]; freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX]; /* * Dive layer 1. */ layer1_offset = freemap->phys_offset + HAMMER_BLOCKMAP_LAYER1_OFFSET(zone_offset); layer1 = hammer_bread(hmp, layer1_offset, errorp, &buffer); if (*errorp) { *curp = 0; bytes = 0; goto failed; } KKASSERT(layer1->phys_offset); if (!hammer_crc_test_layer1(hmp->version, layer1)) { hammer_lock_ex(&hmp->blkmap_lock); if (!hammer_crc_test_layer1(hmp->version, layer1)) hpanic("CRC FAILED: LAYER1"); hammer_unlock(&hmp->blkmap_lock); } /* * Dive layer 2, each entry represents a big-block. * * (reuse buffer, layer1 pointer becomes invalid) */ layer2_offset = layer1->phys_offset + HAMMER_BLOCKMAP_LAYER2_OFFSET(zone_offset); layer2 = hammer_bread(hmp, layer2_offset, errorp, &buffer); if (*errorp) { *curp = 0; bytes = 0; goto failed; } if (!hammer_crc_test_layer2(hmp->version, layer2)) { hammer_lock_ex(&hmp->blkmap_lock); if (!hammer_crc_test_layer2(hmp->version, layer2)) hpanic("CRC FAILED: LAYER2"); hammer_unlock(&hmp->blkmap_lock); } KKASSERT(layer2->zone == zone); bytes = layer2->bytes_free; /* * *curp becomes 1 only when no error and, * next_offset and zone_offset are in the same big-block. */ if ((blockmap->next_offset ^ zone_offset) & ~HAMMER_BIGBLOCK_MASK64) *curp = 0; /* not same */ else *curp = 1; failed: if (buffer) hammer_rel_buffer(buffer, 0); hammer_rel_volume(root_volume, 0); if (hammer_debug_general & 0x4000) { hdkprintf("%016jx -> %d\n", (intmax_t)zone_offset, bytes); } return(bytes); } /* * Lookup a blockmap offset and verify blockmap layers. */ hammer_off_t hammer_blockmap_lookup_verify(hammer_mount_t hmp, hammer_off_t zone_offset, int *errorp) { hammer_volume_t root_volume; hammer_blockmap_t freemap; hammer_blockmap_layer1_t layer1; hammer_blockmap_layer2_t layer2; hammer_buffer_t buffer = NULL; hammer_off_t layer1_offset; hammer_off_t layer2_offset; hammer_off_t result_offset; hammer_off_t base_off; hammer_reserve_t resv __debugvar; int zone; /* * Calculate the zone-2 offset. */ zone = HAMMER_ZONE_DECODE(zone_offset); result_offset = hammer_xlate_to_zone2(zone_offset); /* * Validate the allocation zone */ root_volume = hammer_get_root_volume(hmp, errorp); if (*errorp) return(0); freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX]; KKASSERT(freemap->phys_offset != 0); /* * Dive layer 1. */ layer1_offset = freemap->phys_offset + HAMMER_BLOCKMAP_LAYER1_OFFSET(zone_offset); layer1 = hammer_bread(hmp, layer1_offset, errorp, &buffer); if (*errorp) goto failed; KKASSERT(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL); if (!hammer_crc_test_layer1(hmp->version, layer1)) { hammer_lock_ex(&hmp->blkmap_lock); if (!hammer_crc_test_layer1(hmp->version, layer1)) hpanic("CRC FAILED: LAYER1"); hammer_unlock(&hmp->blkmap_lock); } /* * Dive layer 2, each entry represents a big-block. */ layer2_offset = layer1->phys_offset + HAMMER_BLOCKMAP_LAYER2_OFFSET(zone_offset); layer2 = hammer_bread(hmp, layer2_offset, errorp, &buffer); if (*errorp) goto failed; if (layer2->zone == 0) { base_off = hammer_xlate_to_zone2(zone_offset & ~HAMMER_BIGBLOCK_MASK64); resv = RB_LOOKUP(hammer_res_rb_tree, &hmp->rb_resv_root, base_off); KKASSERT(resv && resv->zone == zone); } else if (layer2->zone != zone) { hpanic("bad zone %d/%d", layer2->zone, zone); } if (!hammer_crc_test_layer2(hmp->version, layer2)) { hammer_lock_ex(&hmp->blkmap_lock); if (!hammer_crc_test_layer2(hmp->version, layer2)) hpanic("CRC FAILED: LAYER2"); hammer_unlock(&hmp->blkmap_lock); } failed: if (buffer) hammer_rel_buffer(buffer, 0); hammer_rel_volume(root_volume, 0); if (hammer_debug_general & 0x0800) { hdkprintf("%016jx -> %016jx\n", (intmax_t)zone_offset, (intmax_t)result_offset); } return(result_offset); } /* * Check space availability * * MPSAFE - does not require fs_token */ int _hammer_checkspace(hammer_mount_t hmp, int slop, int64_t *resp) { const int in_size = sizeof(struct hammer_inode_data) + sizeof(union hammer_btree_elm); const int rec_size = (sizeof(union hammer_btree_elm) * 2); int64_t usedbytes; usedbytes = hmp->rsv_inodes * in_size + hmp->rsv_recs * rec_size + hmp->rsv_databytes + ((int64_t)hmp->rsv_fromdelay << HAMMER_BIGBLOCK_BITS) + ((int64_t)hammer_limit_dirtybufspace) + (slop << HAMMER_BIGBLOCK_BITS); if (resp) *resp = usedbytes; if (hmp->copy_stat_freebigblocks >= (usedbytes >> HAMMER_BIGBLOCK_BITS)) { return(0); } return (ENOSPC); } static int hammer_check_volume(hammer_mount_t hmp, hammer_off_t *offsetp) { hammer_blockmap_t freemap; hammer_blockmap_layer1_t layer1; hammer_buffer_t buffer1 = NULL; hammer_off_t layer1_offset; int error = 0; freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX]; layer1_offset = freemap->phys_offset + HAMMER_BLOCKMAP_LAYER1_OFFSET(*offsetp); layer1 = hammer_bread(hmp, layer1_offset, &error, &buffer1); if (error) goto end; /* * No more physically available space in layer1s * of the current volume, go to the next volume. */ if (layer1->phys_offset == HAMMER_BLOCKMAP_UNAVAIL) hammer_skip_volume(offsetp); end: if (buffer1) hammer_rel_buffer(buffer1, 0); return(error); } static void hammer_skip_volume(hammer_off_t *offsetp) { hammer_off_t offset; int zone, vol_no; offset = *offsetp; zone = HAMMER_ZONE_DECODE(offset); vol_no = HAMMER_VOL_DECODE(offset) + 1; KKASSERT(vol_no <= HAMMER_MAX_VOLUMES); if (vol_no == HAMMER_MAX_VOLUMES) { /* wrap */ vol_no = 0; ++zone; } *offsetp = HAMMER_ENCODE(zone, vol_no, 0); }
15,554
2,151
#------------------------------------------------------------------------------ # elftools tests # # <NAME> (<EMAIL>) # This code is in the public domain #------------------------------------------------------------------------------ try: import unittest2 as unittest except ImportError: import unittest import os from utils import setup_syspath setup_syspath() from elftools.elf.elffile import ELFFile from elftools.elf.constants import VER_FLAGS from elftools.elf.gnuversions import ( GNUVerNeedSection, GNUVerDefSection, GNUVerSymSection) class TestSymbolVersioning(unittest.TestCase): versym_reference_data = [ {'name': b'', 'ndx': 'VER_NDX_LOCAL'}, {'name': b'', 'ndx': 'VER_NDX_LOCAL'}, {'name': b'_ITM_deregisterTMCloneTable', 'ndx': 'VER_NDX_LOCAL'}, {'name': b'puts', 'ndx': 5}, {'name': b'strlcat', 'ndx': 'VER_NDX_LOCAL'}, {'name': b'__stack_chk_fail', 'ndx': 6}, {'name': b'__gmon_start__', 'ndx': 'VER_NDX_LOCAL'}, {'name': b'gzoffset', 'ndx': 7}, {'name': b'_Jv_RegisterClasses', 'ndx': 'VER_NDX_LOCAL'}, {'name': b'_ITM_registerTMCloneTable', 'ndx': 'VER_NDX_LOCAL'}, {'name': b'__cxa_finalize', 'ndx': 5}, {'name': b'_edata', 'ndx': 'VER_NDX_GLOBAL'}, {'name': b'VER_1.0', 'ndx': 2}, {'name': b'function1_ver1_1', 'ndx': 'VER_NDX_GLOBAL'}, {'name': b'_end', 'ndx': 'VER_NDX_GLOBAL'}, {'name': b'function1', 'ndx': 4 | 0x8000}, {'name': b'__bss_start', 'ndx': 'VER_NDX_GLOBAL'}, {'name': b'function1', 'ndx': 2}, {'name': b'VER_1.1', 'ndx': 3}, {'name': b'_init', 'ndx': 'VER_NDX_GLOBAL'}, {'name': b'function1_ver1_0', 'ndx': 'VER_NDX_GLOBAL'}, {'name': b'_fini', 'ndx': 'VER_NDX_GLOBAL'}, {'name': b'VER_1.2', 'ndx': 4}, {'name': b'function2', 'ndx': 3}, ] def test_versym_section(self): reference_data = TestSymbolVersioning.versym_reference_data with open(os.path.join('test', 'testfiles_for_unittests', 'lib_versioned64.so.1.elf'), 'rb') as f: elf = ELFFile(f) versym_section = None for section in elf.iter_sections(): if isinstance(section, GNUVerSymSection): versym_section = section break self.assertIsNotNone(versym_section) for versym, ref_versym in zip(section.iter_symbols(), reference_data): self.assertEqual(versym.name, ref_versym['name']) self.assertEqual(versym['ndx'], ref_versym['ndx']) verneed_reference_data = [ {'name': b'libz.so.1', 'vn_version': 1, 'vn_cnt': 1, 'vernaux': [ {'name': b'ZLIB_1.2.3.5', 'vna_flags': 0, 'vna_other': 7}]}, {'name': b'libc.so.6', 'vn_version': 1, 'vn_cnt': 2, 'vernaux': [ {'name': b'GLIBC_2.4', 'vna_flags': 0, 'vna_other': 6}, {'name': b'GLIBC_2.2.5', 'vna_flags': 0, 'vna_other': 5}]}, ] def test_verneed_section(self): reference_data = TestSymbolVersioning.verneed_reference_data with open(os.path.join('test', 'testfiles_for_unittests', 'lib_versioned64.so.1.elf'), 'rb') as f: elf = ELFFile(f) verneed_section = None for section in elf.iter_sections(): if isinstance(section, GNUVerNeedSection): verneed_section = section break self.assertIsNotNone(verneed_section) for (verneed, vernaux_iter), ref_verneed in zip( section.iter_versions(), reference_data): self.assertEqual(verneed.name, ref_verneed['name']) self.assertEqual(verneed['vn_cnt'], ref_verneed['vn_cnt']) self.assertEqual(verneed['vn_version'], ref_verneed['vn_version']) for vernaux, ref_vernaux in zip( vernaux_iter, ref_verneed['vernaux']): self.assertEqual(vernaux.name, ref_vernaux['name']) self.assertEqual(vernaux['vna_flags'], ref_vernaux['vna_flags']) self.assertEqual(vernaux['vna_other'], ref_vernaux['vna_other']) verdef_reference_data = [ {'vd_ndx': 1, 'vd_version': 1, 'vd_flags': VER_FLAGS.VER_FLG_BASE, 'vd_cnt': 1, 'verdaux': [ {'name': b'lib_versioned.so.1'}]}, {'vd_ndx': 2, 'vd_version': 1, 'vd_flags': 0, 'vd_cnt': 1, 'verdaux': [ {'name': b'VER_1.0'}]}, {'vd_ndx': 3, 'vd_version': 1, 'vd_flags': 0, 'vd_cnt': 2, 'verdaux': [ {'name': b'VER_1.1'}, {'name': b'VER_1.0'}]}, {'vd_ndx': 4, 'vd_version': 1, 'vd_flags': 0, 'vd_cnt': 2, 'verdaux': [ {'name': b'VER_1.2'}, {'name': b'VER_1.1'}]}, ] def test_verdef_section(self): reference_data = TestSymbolVersioning.verdef_reference_data with open(os.path.join('test', 'testfiles_for_unittests', 'lib_versioned64.so.1.elf'), 'rb') as f: elf = ELFFile(f) verneed_section = None for section in elf.iter_sections(): if isinstance(section, GNUVerDefSection): verdef_section = section break self.assertIsNotNone(verdef_section) for (verdef, verdaux_iter), ref_verdef in zip( section.iter_versions(), reference_data): self.assertEqual(verdef['vd_ndx'], ref_verdef['vd_ndx']) self.assertEqual(verdef['vd_version'], ref_verdef['vd_version']) self.assertEqual(verdef['vd_flags'], ref_verdef['vd_flags']) self.assertEqual(verdef['vd_cnt'], ref_verdef['vd_cnt']) for verdaux, ref_verdaux in zip( verdaux_iter, ref_verdef['verdaux']): self.assertEqual(verdaux.name, ref_verdaux['name']) if __name__ == '__main__': unittest.main()
3,452
818
<gh_stars>100-1000 /* * Copyright 2021 Red Hat, Inc. and/or its affiliates. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.jbpm.process.instance.impl; import java.util.ArrayList; import java.util.List; import org.kie.dmn.api.feel.runtime.events.FEELEvent; import org.kie.dmn.api.feel.runtime.events.FEELEventListener; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public class FeelErrorEvaluatorListener implements FEELEventListener { private static final Logger LOG = LoggerFactory.getLogger(FeelErrorEvaluatorListener.class); private final List<FEELEvent> errorEvents = new ArrayList<>(); @Override public void onEvent(FEELEvent event) { switch (event.getSeverity()) { case ERROR: errorEvents.add(event); LOG.error("{}", event); break; case TRACE: LOG.debug("{}", event); break; case WARN: LOG.warn("{}", event); break; case INFO: default: LOG.info("{}", event); break; } } public List<FEELEvent> getErrorEvents() { return errorEvents; } }
704
957
 #pragma once #include "Culling3D.h" #include <vector> namespace Culling3D { class Grid { private: std::vector<Object*> objects; public: Grid(); void AddObject(Object* o); void RemoveObject(Object* o); std::vector<Object*>& GetObjects() { return objects; } bool IsScanned; }; } // namespace Culling3D
129
2,048
<reponame>maniacs-m/connfa-android<filename>libraries/drupalSDK/src/main/java/com/ls/drupal/login/ILoginManager.java /* * The MIT License (MIT) * Copyright (c) 2014 Lemberg Solutions Limited * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ package com.ls.drupal.login; import com.android.volley.RequestQueue; import com.ls.http.base.BaseRequest; public interface ILoginManager { /** * Login request, responsible for login data fetch * @param userName * @param password * @param queue operation queue to perform login within * @return login result object */ Object login(String userName, String password, RequestQueue queue); /** * @return true if manager has to perform login restore attempt in case of 401 error */ boolean shouldRestoreLogin(); /** * @return true if login can be restored (there are credentials or access token cached) */ boolean canRestoreLogin(); /** * Add necessary authentication data to request headers or post/get parameters * @param request */ void applyLoginDataToRequest(BaseRequest request); /** * Restore login data, if possible. * Note: this call should be performed synchronously * @param queue operation queue you can to perform login within (but it isn't necessary) * @return true if restore succeeded (or you can't define a result) false in case of failure */ boolean restoreLoginData(RequestQueue queue); /** * Method will be called in case if {@link #restoreLoginData restoreLoginData} returned false or we get 401 exception after login was restored */ void onLoginRestoreFailed(); /** * Perform logout operation * @param queue * @return logout request result */ Object logout(RequestQueue queue); }
800
664
package tellh.com.recyclertreeview.bean; import tellh.com.recyclertreeview.R; import tellh.com.recyclertreeview_lib.LayoutItemType; /** * Created by tlh on 2016/10/1 :) */ public class Dir implements LayoutItemType { public String dirName; public Dir(String dirName) { this.dirName = dirName; } @Override public int getLayoutId() { return R.layout.item_dir; } }
162
4,342
import unittest import rx from rx import operators as ops class TestGroupByReduce(unittest.TestCase): def test_groupby_count(self): res = [] counts = rx.from_(range(10)).pipe( ops.group_by(lambda i: 'even' if i % 2 == 0 else 'odd'), ops.flat_map(lambda i: i.pipe( ops.count(), ops.map(lambda ii: (i.key, ii)), )) ) counts.subscribe(on_next=res.append) assert res == [('even', 5), ('odd', 5)] def test_window_sum(self): res = [] rx.from_(range(6)).pipe( ops.window_with_count(count=3, skip=1), ops.flat_map(lambda i: i.pipe( ops.sum(), )), ).subscribe(on_next=res.append) assert res == [3, 6, 9, 12, 9, 5, 0]
430
778
<gh_stars>100-1000 /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.pulsar.broker.service; import static com.google.common.base.Preconditions.checkNotNull; import static com.scurrilous.circe.checksum.Crc32cIntChecksum.computeChecksum; import static org.apache.pulsar.broker.service.AbstractReplicator.REPL_PRODUCER_NAME_DELIMITER; import static org.apache.pulsar.common.protocol.Commands.hasChecksum; import static org.apache.pulsar.common.protocol.Commands.readChecksum; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.MoreObjects; import io.netty.buffer.ByteBuf; import io.netty.util.Recycler; import io.netty.util.Recycler.Handle; import java.util.Collections; import java.util.HashMap; import java.util.Map; import java.util.Objects; import java.util.Optional; import java.util.concurrent.CompletableFuture; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLongFieldUpdater; import org.apache.pulsar.broker.ServiceConfiguration; import org.apache.pulsar.broker.service.BrokerServiceException.TopicClosedException; import org.apache.pulsar.broker.service.BrokerServiceException.TopicTerminatedException; import org.apache.pulsar.broker.service.Topic.PublishContext; import org.apache.pulsar.broker.service.nonpersistent.NonPersistentTopic; import org.apache.pulsar.broker.service.persistent.PersistentTopic; import org.apache.pulsar.client.api.transaction.TxnID; import org.apache.pulsar.common.api.proto.MessageMetadata; import org.apache.pulsar.common.api.proto.ProducerAccessMode; import org.apache.pulsar.common.api.proto.ServerError; import org.apache.pulsar.common.naming.TopicName; import org.apache.pulsar.common.policies.data.stats.NonPersistentPublisherStatsImpl; import org.apache.pulsar.common.policies.data.stats.PublisherStatsImpl; import org.apache.pulsar.common.protocol.Commands; import org.apache.pulsar.common.protocol.schema.SchemaVersion; import org.apache.pulsar.common.stats.Rate; import org.apache.pulsar.common.util.DateFormatter; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * Represents a currently connected producer. */ public class Producer { private final Topic topic; private final TransportCnx cnx; private final String producerName; private final long epoch; private final boolean userProvidedProducerName; private final long producerId; private final String appId; private Rate msgIn; private Rate chunkedMessageRate; // it records msg-drop rate only for non-persistent topic private final Rate msgDrop; private volatile long pendingPublishAcks = 0; private static final AtomicLongFieldUpdater<Producer> pendingPublishAcksUpdater = AtomicLongFieldUpdater .newUpdater(Producer.class, "pendingPublishAcks"); private boolean isClosed = false; private final CompletableFuture<Void> closeFuture; private final PublisherStatsImpl stats; private final boolean isRemote; private final String remoteCluster; private final boolean isNonPersistentTopic; private final boolean isEncrypted; private final ProducerAccessMode accessMode; private Optional<Long> topicEpoch; private final Map<String, String> metadata; private final SchemaVersion schemaVersion; private final String clientAddress; // IP address only, no port number included private final AtomicBoolean isDisconnecting = new AtomicBoolean(false); public Producer(Topic topic, TransportCnx cnx, long producerId, String producerName, String appId, boolean isEncrypted, Map<String, String> metadata, SchemaVersion schemaVersion, long epoch, boolean userProvidedProducerName, ProducerAccessMode accessMode, Optional<Long> topicEpoch, boolean supportsPartialProducer) { final ServiceConfiguration serviceConf = cnx.getBrokerService().pulsar().getConfiguration(); this.topic = topic; this.cnx = cnx; this.producerId = producerId; this.producerName = checkNotNull(producerName); this.userProvidedProducerName = userProvidedProducerName; this.epoch = epoch; this.closeFuture = new CompletableFuture<>(); this.appId = appId; this.msgIn = new Rate(); this.chunkedMessageRate = new Rate(); this.isNonPersistentTopic = topic instanceof NonPersistentTopic; this.msgDrop = this.isNonPersistentTopic ? new Rate() : null; this.metadata = metadata != null ? metadata : Collections.emptyMap(); this.stats = isNonPersistentTopic ? new NonPersistentPublisherStatsImpl() : new PublisherStatsImpl(); if (cnx.hasHAProxyMessage()) { stats.setAddress(cnx.getHAProxyMessage().sourceAddress() + ":" + cnx.getHAProxyMessage().sourcePort()); } else { stats.setAddress(cnx.clientAddress().toString()); } stats.setConnectedSince(DateFormatter.now()); stats.setClientVersion(cnx.getClientVersion()); stats.setProducerName(producerName); stats.producerId = producerId; if (serviceConf.isAggregatePublisherStatsByProducerName() && stats.getProducerName() != null) { // If true and the client supports partial producer, // aggregate publisher stats of PartitionedTopicStats by producerName. // Otherwise, aggregate it by list index. stats.setSupportsPartialProducer(supportsPartialProducer); } else { // aggregate publisher stats of PartitionedTopicStats by list index. stats.setSupportsPartialProducer(false); } stats.metadata = this.metadata; stats.accessMode = Commands.convertProducerAccessMode(accessMode); String replicatorPrefix = serviceConf.getReplicatorPrefix() + "."; this.isRemote = producerName.startsWith(replicatorPrefix); this.remoteCluster = parseRemoteClusterName(producerName, isRemote, replicatorPrefix); this.isEncrypted = isEncrypted; this.schemaVersion = schemaVersion; this.accessMode = accessMode; this.topicEpoch = topicEpoch; this.clientAddress = cnx.clientSourceAddress(); } /** * Producer name for replicator is in format. * "replicatorPrefix.localCluster" (old) * "replicatorPrefix.localCluster-->remoteCluster" (new) */ private String parseRemoteClusterName(String producerName, boolean isRemote, String replicatorPrefix) { if (isRemote) { String clusterName = producerName.substring(replicatorPrefix.length()); return clusterName.contains(REPL_PRODUCER_NAME_DELIMITER) ? clusterName.split(REPL_PRODUCER_NAME_DELIMITER)[0] : clusterName; } return null; } /** * Method to determine if this producer can replace another producer. * @param other - producer to compare to this one * @return true if this producer is a subsequent instantiation of the same logical producer. Otherwise, false. */ public boolean isSuccessorTo(Producer other) { return Objects.equals(producerName, other.producerName) && Objects.equals(topic, other.topic) && producerId == other.producerId && Objects.equals(cnx, other.cnx) && other.getEpoch() < epoch; } public void publishMessage(long producerId, long sequenceId, ByteBuf headersAndPayload, long batchSize, boolean isChunked, boolean isMarker) { if (checkAndStartPublish(producerId, sequenceId, headersAndPayload, batchSize)) { publishMessageToTopic(headersAndPayload, sequenceId, batchSize, isChunked, isMarker); } } public void publishMessage(long producerId, long lowestSequenceId, long highestSequenceId, ByteBuf headersAndPayload, long batchSize, boolean isChunked, boolean isMarker) { if (lowestSequenceId > highestSequenceId) { cnx.execute(() -> { cnx.getCommandSender().sendSendError(producerId, highestSequenceId, ServerError.MetadataError, "Invalid lowest or highest sequence id"); cnx.completedSendOperation(isNonPersistentTopic, headersAndPayload.readableBytes()); }); return; } if (checkAndStartPublish(producerId, highestSequenceId, headersAndPayload, batchSize)) { publishMessageToTopic(headersAndPayload, lowestSequenceId, highestSequenceId, batchSize, isChunked, isMarker); } } public boolean checkAndStartPublish(long producerId, long sequenceId, ByteBuf headersAndPayload, long batchSize) { if (isClosed) { cnx.execute(() -> { cnx.getCommandSender().sendSendError(producerId, sequenceId, ServerError.PersistenceError, "Producer is closed"); cnx.completedSendOperation(isNonPersistentTopic, headersAndPayload.readableBytes()); }); return false; } if (!verifyChecksum(headersAndPayload)) { cnx.execute(() -> { cnx.getCommandSender().sendSendError(producerId, sequenceId, ServerError.ChecksumError, "Checksum failed on the broker"); cnx.completedSendOperation(isNonPersistentTopic, headersAndPayload.readableBytes()); }); return false; } if (topic.isEncryptionRequired()) { headersAndPayload.markReaderIndex(); MessageMetadata msgMetadata = Commands.parseMessageMetadata(headersAndPayload); headersAndPayload.resetReaderIndex(); int encryptionKeysCount = msgMetadata.getEncryptionKeysCount(); // Check whether the message is encrypted or not if (encryptionKeysCount < 1) { log.warn("[{}] Messages must be encrypted", getTopic().getName()); cnx.execute(() -> { cnx.getCommandSender().sendSendError(producerId, sequenceId, ServerError.MetadataError, "Messages must be encrypted"); cnx.completedSendOperation(isNonPersistentTopic, headersAndPayload.readableBytes()); }); return false; } } startPublishOperation((int) batchSize, headersAndPayload.readableBytes()); return true; } private void publishMessageToTopic(ByteBuf headersAndPayload, long sequenceId, long batchSize, boolean isChunked, boolean isMarker) { topic.publishMessage(headersAndPayload, MessagePublishContext.get(this, sequenceId, msgIn, headersAndPayload.readableBytes(), batchSize, isChunked, System.nanoTime(), isMarker)); } private void publishMessageToTopic(ByteBuf headersAndPayload, long lowestSequenceId, long highestSequenceId, long batchSize, boolean isChunked, boolean isMarker) { topic.publishMessage(headersAndPayload, MessagePublishContext.get(this, lowestSequenceId, highestSequenceId, msgIn, headersAndPayload.readableBytes(), batchSize, isChunked, System.nanoTime(), isMarker)); } private boolean verifyChecksum(ByteBuf headersAndPayload) { if (hasChecksum(headersAndPayload)) { int readerIndex = headersAndPayload.readerIndex(); try { int checksum = readChecksum(headersAndPayload); long computedChecksum = computeChecksum(headersAndPayload); if (checksum == computedChecksum) { return true; } else { log.error("[{}] [{}] Failed to verify checksum", topic, producerName); return false; } } finally { headersAndPayload.readerIndex(readerIndex); } } else { // ignore if checksum is not available if (log.isDebugEnabled()) { log.debug("[{}] [{}] Payload does not have checksum to verify", topic, producerName); } return true; } } private void startPublishOperation(int batchSize, long msgSize) { // A single thread is incrementing/decrementing this counter, so we can use lazySet which doesn't involve a mem // barrier pendingPublishAcksUpdater.lazySet(this, pendingPublishAcks + 1); // increment publish-count this.getTopic().incrementPublishCount(batchSize, msgSize); } private void publishOperationCompleted() { long newPendingPublishAcks = this.pendingPublishAcks - 1; pendingPublishAcksUpdater.lazySet(this, newPendingPublishAcks); // Check the close future to avoid grabbing the mutex every time the pending acks goes down to 0 if (newPendingPublishAcks == 0 && !closeFuture.isDone()) { synchronized (this) { if (isClosed && !closeFuture.isDone()) { closeNow(true); } } } } public void recordMessageDrop(int batchSize) { if (this.isNonPersistentTopic) { msgDrop.recordEvent(batchSize); } } /** * Return the sequence id of. * * @return the sequence id */ public long getLastSequenceId() { if (isNonPersistentTopic) { return -1; } else { return ((PersistentTopic) topic).getLastPublishedSequenceId(producerName); } } public TransportCnx getCnx() { return this.cnx; } private static final class MessagePublishContext implements PublishContext, Runnable { /* * To store context information built by message payload * processors (time duration, size etc), if any configured */ Map<String, Object> propertyMap; private Producer producer; private long sequenceId; private long ledgerId; private long entryId; private Rate rateIn; private int msgSize; private long batchSize; private boolean chunked; private boolean isMarker; private long startTimeNs; private String originalProducerName; private long originalSequenceId; private long highestSequenceId; private long originalHighestSequenceId; public String getProducerName() { return producer.getProducerName(); } public long getSequenceId() { return sequenceId; } @Override public boolean isChunked() { return chunked; } @Override public void setProperty(String propertyName, Object value){ if (this.propertyMap == null) { this.propertyMap = new HashMap<>(); } this.propertyMap.put(propertyName, value); } @Override public Object getProperty(String propertyName){ if (this.propertyMap != null) { return this.propertyMap.get(propertyName); } else { return null; } } @Override public long getHighestSequenceId() { return highestSequenceId; } @Override public void setOriginalProducerName(String originalProducerName) { this.originalProducerName = originalProducerName; } @Override public void setOriginalSequenceId(long originalSequenceId) { this.originalSequenceId = originalSequenceId; } @Override public String getOriginalProducerName() { return originalProducerName; } @Override public long getOriginalSequenceId() { return originalSequenceId; } @Override public void setOriginalHighestSequenceId(long originalHighestSequenceId) { this.originalHighestSequenceId = originalHighestSequenceId; } @Override public long getOriginalHighestSequenceId() { return originalHighestSequenceId; } /** * Executed from managed ledger thread when the message is persisted. */ @Override public void completed(Exception exception, long ledgerId, long entryId) { if (exception != null) { final ServerError serverError = getServerError(exception); producer.cnx.execute(() -> { if (!(exception instanceof TopicClosedException)) { // For TopicClosed exception there's no need to send explicit error, since the client was // already notified long callBackSequenceId = Math.max(highestSequenceId, sequenceId); producer.cnx.getCommandSender().sendSendError(producer.producerId, callBackSequenceId, serverError, exception.getMessage()); } producer.cnx.completedSendOperation(producer.isNonPersistentTopic, msgSize); producer.publishOperationCompleted(); recycle(); }); } else { if (log.isDebugEnabled()) { log.debug("[{}] [{}] [{}] triggered send callback. cnx {}, sequenceId {}", producer.topic, producer.producerName, producer.producerId, producer.cnx.clientAddress(), sequenceId); } this.ledgerId = ledgerId; this.entryId = entryId; producer.cnx.execute(this); } } private ServerError getServerError(Exception exception) { ServerError serverError; if (exception instanceof TopicTerminatedException) { serverError = ServerError.TopicTerminatedError; } else if (exception instanceof BrokerServiceException.NotAllowedException) { serverError = ServerError.NotAllowedError; } else { serverError = ServerError.PersistenceError; } return serverError; } /** * Executed from I/O thread when sending receipt back to client. */ @Override public void run() { if (log.isDebugEnabled()) { log.debug("[{}] [{}] [{}] Persisted message. cnx {}, sequenceId {}", producer.topic, producer.producerName, producer.producerId, producer.cnx, sequenceId); } // stats rateIn.recordMultipleEvents(batchSize, msgSize); producer.topic.recordAddLatency(System.nanoTime() - startTimeNs, TimeUnit.NANOSECONDS); producer.cnx.getCommandSender().sendSendReceiptResponse(producer.producerId, sequenceId, highestSequenceId, ledgerId, entryId); producer.cnx.completedSendOperation(producer.isNonPersistentTopic, msgSize); if (this.chunked) { producer.chunkedMessageRate.recordEvent(); } producer.publishOperationCompleted(); if (producer.cnx.getBrokerService().getInterceptor() != null){ producer.cnx.getBrokerService().getInterceptor().messageProduced( (ServerCnx) producer.cnx, producer, startTimeNs, ledgerId, entryId, this); } recycle(); } static MessagePublishContext get(Producer producer, long sequenceId, Rate rateIn, int msgSize, long batchSize, boolean chunked, long startTimeNs, boolean isMarker) { MessagePublishContext callback = RECYCLER.get(); callback.producer = producer; callback.sequenceId = sequenceId; callback.rateIn = rateIn; callback.msgSize = msgSize; callback.batchSize = batchSize; callback.chunked = chunked; callback.originalProducerName = null; callback.originalSequenceId = -1L; callback.startTimeNs = startTimeNs; callback.isMarker = isMarker; if (callback.propertyMap != null) { callback.propertyMap.clear(); } return callback; } static MessagePublishContext get(Producer producer, long lowestSequenceId, long highestSequenceId, Rate rateIn, int msgSize, long batchSize, boolean chunked, long startTimeNs, boolean isMarker) { MessagePublishContext callback = RECYCLER.get(); callback.producer = producer; callback.sequenceId = lowestSequenceId; callback.highestSequenceId = highestSequenceId; callback.rateIn = rateIn; callback.msgSize = msgSize; callback.batchSize = batchSize; callback.originalProducerName = null; callback.originalSequenceId = -1L; callback.startTimeNs = startTimeNs; callback.chunked = chunked; callback.isMarker = isMarker; if (callback.propertyMap != null) { callback.propertyMap.clear(); } return callback; } @Override public long getNumberOfMessages() { return batchSize; } @Override public boolean isMarkerMessage() { return isMarker; } private final Handle<MessagePublishContext> recyclerHandle; private MessagePublishContext(Handle<MessagePublishContext> recyclerHandle) { this.recyclerHandle = recyclerHandle; } private static final Recycler<MessagePublishContext> RECYCLER = new Recycler<MessagePublishContext>() { protected MessagePublishContext newObject(Handle<MessagePublishContext> handle) { return new MessagePublishContext(handle); } }; public void recycle() { producer = null; sequenceId = -1L; highestSequenceId = -1L; originalSequenceId = -1L; originalHighestSequenceId = -1L; rateIn = null; msgSize = 0; ledgerId = -1L; entryId = -1L; batchSize = 0L; startTimeNs = -1L; chunked = false; isMarker = false; if (propertyMap != null) { propertyMap.clear(); } recyclerHandle.recycle(this); } } public Topic getTopic() { return topic; } public String getProducerName() { return producerName; } public long getProducerId() { return producerId; } public Map<String, String> getMetadata() { return metadata; } @Override public String toString() { return MoreObjects.toStringHelper(this).add("topic", topic).add("client", cnx.clientAddress()) .add("producerName", producerName).add("producerId", producerId).toString(); } /** * Close the producer immediately if: a. the connection is dropped b. it's a graceful close and no pending publish * acks are left else wait for pending publish acks * * @return completable future indicate completion of close */ public synchronized CompletableFuture<Void> close(boolean removeFromTopic) { if (log.isDebugEnabled()) { log.debug("Closing producer {} -- isClosed={}", this, isClosed); } if (!isClosed) { isClosed = true; if (log.isDebugEnabled()) { log.debug("Trying to close producer {} -- cnxIsActive: {} -- pendingPublishAcks: {}", this, cnx.isActive(), pendingPublishAcks); } if (!cnx.isActive() || pendingPublishAcks == 0) { closeNow(removeFromTopic); } } return closeFuture; } public void closeNow(boolean removeFromTopic) { if (removeFromTopic) { topic.removeProducer(this); } cnx.removedProducer(this); if (log.isDebugEnabled()) { log.debug("Removed producer: {}", this); } closeFuture.complete(null); isDisconnecting.set(false); } /** * It closes the producer from server-side and sends command to client to disconnect producer from existing * connection without closing that connection. * * @return Completable future indicating completion of producer close */ public CompletableFuture<Void> disconnect() { if (!closeFuture.isDone() && isDisconnecting.compareAndSet(false, true)) { log.info("Disconnecting producer: {}", this); cnx.execute(() -> { cnx.closeProducer(this); closeNow(true); }); } return closeFuture; } public void updateRates() { msgIn.calculateRate(); chunkedMessageRate.calculateRate(); stats.msgRateIn = msgIn.getRate(); stats.msgThroughputIn = msgIn.getValueRate(); stats.averageMsgSize = msgIn.getAverageValue(); stats.chunkedMessageRate = chunkedMessageRate.getRate(); if (chunkedMessageRate.getCount() > 0 && this.topic instanceof PersistentTopic) { ((PersistentTopic) this.topic).msgChunkPublished = true; } if (this.isNonPersistentTopic) { msgDrop.calculateRate(); ((NonPersistentPublisherStatsImpl) stats).msgDropRate = msgDrop.getRate(); } } public void updateRates(int numOfMessages, long msgSizeInBytes) { msgIn.recordMultipleEvents(numOfMessages, msgSizeInBytes); } public boolean isRemote() { return isRemote; } public String getRemoteCluster() { return remoteCluster; } public PublisherStatsImpl getStats() { return stats; } public boolean isNonPersistentTopic() { return isNonPersistentTopic; } public long getEpoch() { return epoch; } public boolean isUserProvidedProducerName() { return userProvidedProducerName; } @VisibleForTesting long getPendingPublishAcks() { return pendingPublishAcks; } public CompletableFuture<Void> checkPermissionsAsync() { TopicName topicName = TopicName.get(topic.getName()); if (cnx.getBrokerService().getAuthorizationService() != null) { return cnx.getBrokerService().getAuthorizationService() .canProduceAsync(topicName, appId, cnx.getAuthenticationData()) .handle((ok, ex) -> { if (ex != null) { log.warn("[{}] Get unexpected error while autorizing [{}] {}", appId, topic.getName(), ex.getMessage(), ex); } if (ok == null || !ok) { log.info("[{}] is not allowed to produce on topic [{}] anymore", appId, topic.getName()); disconnect(); } return null; }); } return CompletableFuture.completedFuture(null); } public void checkEncryption() { if (topic.isEncryptionRequired() && !isEncrypted) { log.info("[{}] [{}] Unencrypted producer is not allowed to produce on topic [{}] anymore", producerId, producerName, topic.getName()); disconnect(); } } public void publishTxnMessage(TxnID txnID, long producerId, long sequenceId, long highSequenceId, ByteBuf headersAndPayload, long batchSize, boolean isChunked, boolean isMarker) { checkAndStartPublish(producerId, sequenceId, headersAndPayload, batchSize); topic.publishTxnMessage(txnID, headersAndPayload, MessagePublishContext.get(this, sequenceId, highSequenceId, msgIn, headersAndPayload.readableBytes(), batchSize, isChunked, System.nanoTime(), isMarker)); } public SchemaVersion getSchemaVersion() { return schemaVersion; } public ProducerAccessMode getAccessMode() { return accessMode; } public Optional<Long> getTopicEpoch() { return topicEpoch; } public String getClientAddress() { return clientAddress; } public boolean isDisconnecting() { return isDisconnecting.get(); } private static final Logger log = LoggerFactory.getLogger(Producer.class); }
12,608
739
#include "Symbol.h" #include <windows.h> #include <dbghelp.h> using namespace std; Symbol::Symbol(const char* name, int value, unsigned int flags, Hunk* hunk, const char* miscString) : name(name), value(value), flags(flags), hunk(hunk), fromLibrary(false), hunk_offset(0) { if(miscString) this->miscString = miscString; } std::string Symbol::GetUndecoratedName() const { string str = name; char buff[1024]; UnDecorateSymbolName(str.c_str(), buff, sizeof(buff), UNDNAME_COMPLETE | UNDNAME_32_BIT_DECODE); return string(buff); }
215
335
<reponame>Safal08/Hacktoberfest-1 { "word": "Excited", "definitions": [ "Very enthusiastic and eager.", "Sexually aroused.", "Of or in an energy state higher than the normal or ground state." ], "parts-of-speech": "Adjective" }
111
322
/* * Copyright (c) <NAME>, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ #include <stdio.h> // printf #include <stdlib.h> // free #include <zstd.h> // presumes zstd library is installed #include "common.h" // Helper functions, CHECK(), and CHECK_ZSTD() /* createDict() : `dictFileName` is supposed to have been created using `zstd --train` */ static ZSTD_DDict* createDict_orDie(const char* dictFileName) { size_t dictSize; printf("loading dictionary %s \n", dictFileName); void* const dictBuffer = mallocAndLoadFile_orDie(dictFileName, &dictSize); ZSTD_DDict* const ddict = ZSTD_createDDict(dictBuffer, dictSize); CHECK(ddict != NULL, "ZSTD_createDDict() failed!"); free(dictBuffer); return ddict; } static void decompress(const char* fname, const ZSTD_DDict* ddict) { size_t cSize; void* const cBuff = mallocAndLoadFile_orDie(fname, &cSize); /* Read the content size from the frame header. For simplicity we require * that it is always present. By default, zstd will write the content size * in the header when it is known. If you can't guarantee that the frame * content size is always written into the header, either use streaming * decompression, or ZSTD_decompressBound(). */ unsigned long long const rSize = ZSTD_getFrameContentSize(cBuff, cSize); CHECK(rSize != ZSTD_CONTENTSIZE_ERROR, "%s: not compressed by zstd!", fname); CHECK(rSize != ZSTD_CONTENTSIZE_UNKNOWN, "%s: original size unknown!", fname); void* const rBuff = malloc_orDie((size_t)rSize); /* Check that the dictionary ID matches. * If a non-zstd dictionary is used, then both will be zero. * By default zstd always writes the dictionary ID into the frame. * Zstd will check if there is a dictionary ID mismatch as well. */ unsigned const expectedDictID = ZSTD_getDictID_fromDDict(ddict); unsigned const actualDictID = ZSTD_getDictID_fromFrame(cBuff, cSize); CHECK(actualDictID == expectedDictID, "DictID mismatch: expected %u got %u", expectedDictID, actualDictID); /* Decompress using the dictionary. * If you need to control the decompression parameters, then use the * advanced API: ZSTD_DCtx_setParameter(), ZSTD_DCtx_refDDict(), and * ZSTD_decompressDCtx(). */ ZSTD_DCtx* const dctx = ZSTD_createDCtx(); CHECK(dctx != NULL, "ZSTD_createDCtx() failed!"); size_t const dSize = ZSTD_decompress_usingDDict(dctx, rBuff, rSize, cBuff, cSize, ddict); CHECK_ZSTD(dSize); /* When zstd knows the content size, it will error if it doesn't match. */ CHECK(dSize == rSize, "Impossible because zstd will check this condition!"); /* success */ printf("%25s : %6u -> %7u \n", fname, (unsigned)cSize, (unsigned)rSize); ZSTD_freeDCtx(dctx); free(rBuff); free(cBuff); } int main(int argc, const char** argv) { const char* const exeName = argv[0]; if (argc<3) { printf("wrong arguments\n"); printf("usage:\n"); printf("%s [FILES] dictionary\n", exeName); return 1; } /* load dictionary only once */ const char* const dictName = argv[argc-1]; ZSTD_DDict* const dictPtr = createDict_orDie(dictName); int u; for (u=1; u<argc-1; u++) decompress(argv[u], dictPtr); ZSTD_freeDDict(dictPtr); printf("All %u files correctly decoded (in memory) \n", argc-2); return 0; }
1,386
14,668
<filename>ios/chrome/browser/passwords/ios_chrome_password_infobar_metrics_recorder.h // Copyright 2019 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef IOS_CHROME_BROWSER_PASSWORDS_IOS_CHROME_PASSWORD_INFOBAR_METRICS_RECORDER_H_ #define IOS_CHROME_BROWSER_PASSWORDS_IOS_CHROME_PASSWORD_INFOBAR_METRICS_RECORDER_H_ #import <Foundation/Foundation.h> // Password Infobars types. Since these are used for metrics, entries should not // be renumbered and numeric values should never be reused. enum class PasswordInfobarType { // Message Infobar for Saving a password. kPasswordInfobarTypeSave = 0, // Message Infobar for Updating a password. kPasswordInfobarTypeUpdate = 1, }; // Values for the UMA Mobile.Messages.Passwords.Modal.Event histogram. These // values are persisted to logs. Entries should not be renumbered and numeric // values should never be reused. enum class MobileMessagesPasswordsModalEvent { // PasswordInfobar username was edited. EditedUserName = 0, // PasswordInfobar password was edited. EditedPassword = 1, // PasswordInfobar password was unmasked. UnmaskedPassword = 2, // PasswordInfobar password was masked. MaskedPassword = 3, // Highest enumerator. Recommended by Histogram metrics best practices. kMaxValue = MaskedPassword, }; // Values for the UMA Mobile.Messages.Passwords.Modal.Dismiss histogram. These // values are persisted to logs. Entries should not be renumbered and numeric // values should never be reused. enum class MobileMessagesPasswordsModalDismiss { // PasswordInfobar was tapped on Never For This Site. TappedNeverForThisSite = 0, // PasswordInfobar credentials were saved. SavedCredentials = 1, // PasswordInfobar credentials were updated. UpdatedCredentials = 2, // Highest enumerator. Recommended by Histogram metrics best practices. kMaxValue = UpdatedCredentials, }; // Values for the UMA Mobile.Messages.Passwords.Modal.Present histogram. These // values are persisted to logs. Entries should not be renumbered and numeric // values should never be reused. enum class MobileMessagesPasswordsModalPresent { // PasswordInfobar was presented after a Save Password banner was // presented. PresentedAfterSaveBanner = 0, // PasswordInfobar was presented after an Update Password banner was // presented. PresentedAfterUpdateBanner = 1, // Highest enumerator. Recommended by Histogram metrics best practices. kMaxValue = PresentedAfterUpdateBanner, }; // Used to record metrics related to Password Infobar events. @interface IOSChromePasswordInfobarMetricsRecorder : NSObject // Designated initializer. IOSChromePasswordInfobarMetricsRecorder will record // metrics for |passwordInfobarType|. - (instancetype)initWithType:(PasswordInfobarType)passwordInfobarType NS_DESIGNATED_INITIALIZER; - (instancetype)init NS_UNAVAILABLE; // Records histogram for Modal |event|. - (void)recordModalEvent:(MobileMessagesPasswordsModalEvent)event; // Records histogram for Modal |dismissType|. - (void)recordModalDismiss:(MobileMessagesPasswordsModalDismiss)dismissType; // Records histogram for Modal |presentContext|. - (void)recordModalPresent:(MobileMessagesPasswordsModalPresent)presentContext; @end #endif // IOS_CHROME_BROWSER_PASSWORDS_IOS_CHROME_PASSWORD_INFOBAR_METRICS_RECORDER_H_
1,004
2,268
//========= Copyright Valve Corporation, All rights reserved. ============// // // Purpose: // // $NoKeywords: $ //=============================================================================// #ifndef CHOREOSCENE_H #define CHOREOSCENE_H #ifdef _WIN32 #pragma once #endif class CChoreoEvent; class CChoreoChannel; class CChoreoActor; class IChoreoEventCallback; class CEventRelativeTag; class CUtlBuffer; class CFlexAnimationTrack; class ISceneTokenProcessor; class IChoreoStringPool; #include "tier1/utlvector.h" #include "tier1/utldict.h" #include "bitvec.h" #include "expressionsample.h" #include "choreoevent.h" #define DEFAULT_SCENE_FPS 60 #define MIN_SCENE_FPS 10 #define MAX_SCENE_FPS 240 #define SCENE_BINARY_TAG MAKEID( 'b', 'v', 'c', 'd' ) #define SCENE_BINARY_VERSION 0x04 //----------------------------------------------------------------------------- // Purpose: Container for choreographed scene of events for actors //----------------------------------------------------------------------------- class CChoreoScene : public ICurveDataAccessor { typedef enum { PROCESSING_TYPE_IGNORE = 0, PROCESSING_TYPE_START, PROCESSING_TYPE_START_RESUMECONDITION, PROCESSING_TYPE_CONTINUE, PROCESSING_TYPE_STOP, } PROCESSING_TYPE; struct ActiveList { PROCESSING_TYPE pt; CChoreoEvent *e; }; public: // Construction CChoreoScene( IChoreoEventCallback *callback ); ~CChoreoScene( void ); // Assignment CChoreoScene& operator=(const CChoreoScene& src ); // ICurveDataAccessor methods virtual float GetDuration() { return FindStopTime(); }; virtual bool CurveHasEndTime(); virtual int GetDefaultCurveType(); // Binary serialization bool SaveBinary( char const *pszBinaryFileName, char const *pPathID, unsigned int nTextVersionCRC, IChoreoStringPool *pStringPool ); void SaveToBinaryBuffer( CUtlBuffer& buf, unsigned int nTextVersionCRC, IChoreoStringPool *pStringPool ); bool RestoreFromBinaryBuffer( CUtlBuffer& buf, char const *filename, IChoreoStringPool *pStringPool ); static bool GetCRCFromBinaryBuffer( CUtlBuffer& buf, unsigned int& crc ); // We do some things differently while restoring from a save. inline void SetRestoring( bool bRestoring ); inline bool IsRestoring(); enum { MAX_SCENE_FILENAME = 128, }; // Event callback handler void SetEventCallbackInterface( IChoreoEventCallback *callback ); // Loading bool ParseFromBuffer( char const *pFilename, ISceneTokenProcessor *tokenizer ); void SetPrintFunc( void ( *pfn )( PRINTF_FORMAT_STRING const char *fmt, ... ) ); // Saving bool SaveToFile( const char *filename ); bool ExportMarkedToFile( const char *filename ); void MarkForSaveAll( bool mark ); // Merges two .vcd's together, returns true if any data was merged bool Merge( CChoreoScene *other ); static void FileSaveFlexAnimationTrack( CUtlBuffer& buf, int level, CFlexAnimationTrack *track, int nDefaultCurveType ); static void FileSaveFlexAnimations( CUtlBuffer& buf, int level, CChoreoEvent *e ); static void FileSaveRamp( CUtlBuffer& buf, int level, CChoreoEvent *e ); void FileSaveSceneRamp( CUtlBuffer& buf, int level ); static void FileSaveScaleSettings( CUtlBuffer& buf, int level, CChoreoScene *scene ); static void ParseFlexAnimations( ISceneTokenProcessor *tokenizer, CChoreoEvent *e, bool removeold = true ); static void ParseRamp( ISceneTokenProcessor *tokenizer, CChoreoEvent *e ); static void ParseSceneRamp( ISceneTokenProcessor *tokenizer, CChoreoScene *scene ); static void ParseScaleSettings( ISceneTokenProcessor *tokenizer, CChoreoScene *scene ); static void ParseEdgeInfo( ISceneTokenProcessor *tokenizer, EdgeInfo_t *edgeinfo ); // Debugging void SceneMsg( PRINTF_FORMAT_STRING const char *pFormat, ... ); void Print( void ); // Sound system needs to have sounds pre-queued by this much time void SetSoundFileStartupLatency( float time ); // Simulation void Think( float curtime ); float LoopThink( float curtime ); void ProcessActiveListEntry( ActiveList *entry ); // Retrieves time in simulation float GetTime( void ); // Retrieves start/stop time for looped/debug scene void GetSceneTimes( float& start, float& end ); void SetTime( float t ); void LoopToTime( float t ); // Has simulation finished bool SimulationFinished( void ); // Reset simulation void ResetSimulation( bool forward = true, float starttime = 0.0f, float endtime = 0.0f ); // Find time at which last simulation event is triggered float FindStopTime( void ); void ResumeSimulation( void ); // Have all the pause events happened bool CheckEventCompletion( void ); // Find named actor in scene data CChoreoActor *FindActor( const char *name ); // Remove actor from scene void RemoveActor( CChoreoActor *actor ); // Find index for actor int FindActorIndex( CChoreoActor *actor ); // Swap actors in the data void SwapActors( int a1, int a2 ); // General data access int GetNumEvents( void ); CChoreoEvent *GetEvent( int event ); int GetNumActors( void ); CChoreoActor *GetActor( int actor ); int GetNumChannels( void ); CChoreoChannel *GetChannel( int channel ); // Object allocation/destruction void DeleteReferencedObjects( CChoreoActor *actor ); void DeleteReferencedObjects( CChoreoChannel *channel ); void DeleteReferencedObjects( CChoreoEvent *event ); CChoreoActor *AllocActor( void ); CChoreoChannel *AllocChannel( void ); CChoreoEvent *AllocEvent( void ); void AddEventToScene( CChoreoEvent *event ); void AddActorToScene( CChoreoActor *actor ); void AddChannelToScene( CChoreoChannel *channel ); // Fixup simulation times for channel gestures void ReconcileGestureTimes( void ); // Go through all elements and update relative tags, removing any orphaned // tags and updating the timestamp of normal tags void ReconcileTags( void ); CEventRelativeTag *FindTagByName( const char *wavname, const char *name ); CChoreoEvent *FindTargetingEvent( const char *wavname, const char *name ); // Used by UI to provide target actor names char const *GetMapname( void ); void SetMapname( const char *name ); void ExportEvents( const char *filename, CUtlVector< CChoreoEvent * >& events ); void ImportEvents( ISceneTokenProcessor *tokenizer, CChoreoActor *actor, CChoreoChannel *channel ); // Subscene support void SetSubScene( bool sub ); bool IsSubScene( void ) const; int GetSceneFPS( void ) const; void SetSceneFPS( int fps ); bool IsUsingFrameSnap( void ) const; void SetUsingFrameSnap( bool snap ); float SnapTime( float t ); int GetSceneRampCount( void ) { return m_SceneRamp.GetCount(); }; CExpressionSample *GetSceneRamp( int index ) { return m_SceneRamp.Get( index ); }; CExpressionSample *AddSceneRamp( float time, float value, bool selected ) { return m_SceneRamp.Add( time, value, selected ); }; void DeleteSceneRamp( int index ) { m_SceneRamp.Delete( index ); }; void ClearSceneRamp( void ) { m_SceneRamp.Clear(); }; void ResortSceneRamp( void ) { m_SceneRamp.Resort( this ); }; CCurveData *GetSceneRamp( void ) { return &m_SceneRamp; }; // Global intensity for scene float GetSceneRampIntensity( float time ) { return m_SceneRamp.GetIntensity( this, time ); } int GetTimeZoom( char const *tool ); void SetTimeZoom( char const *tool, int tz ); int TimeZoomFirst(); int TimeZoomNext( int i ); int TimeZoomInvalid() const; char const *TimeZoomName( int i ); void ReconcileCloseCaption(); char const *GetFilename() const; void SetFileName( char const *fn ); bool GetPlayingSoundName( char *pchBuff, int iBuffLength ); bool HasUnplayedSpeech(); bool HasFlexAnimation(); void SetBackground( bool bIsBackground ); bool IsBackground( void ); void ClearPauseEventDependencies(); bool HasEventsOfType( CChoreoEvent::EVENTTYPE type ) const; void RemoveEventsExceptTypes( int* typeList, int count ); void IgnorePhonemes( bool bIgnore ); bool ShouldIgnorePhonemes() const; // This is set by the engine to signify that we're not modifying the data and // therefore we can precompute the end time static bool s_bEditingDisabled; private: // Simulation stuff enum { IN_RANGE = 0, BEFORE_RANGE, AFTER_RANGE }; int IsTimeInRange( float t, float starttime, float endtime ); static bool EventLess( const CChoreoScene::ActiveList &al0, const CChoreoScene::ActiveList &al1 ); int EventThink( CChoreoEvent *e, float frame_start_time, float frame_end_time, bool playing_forward, PROCESSING_TYPE& disposition ); // Prints to debug console, etc void choreoprintf( int level, PRINTF_FORMAT_STRING const char *fmt, ... ); // Initialize scene void Init( IChoreoEventCallback *callback ); float FindAdjustedStartTime( void ); float FindAdjustedEndTime( void ); CChoreoEvent *FindPauseBetweenTimes( float starttime, float endtime ); // Parse scenes from token buffer CChoreoEvent *ParseEvent( CChoreoActor *actor, CChoreoChannel *channel ); CChoreoChannel *ParseChannel( CChoreoActor *actor ); CChoreoActor *ParseActor( void ); void ParseFPS( void ); void ParseSnap( void ); void ParseIgnorePhonemes( void ); // Map file for retrieving named objects void ParseMapname( void ); // When previewing actor in hlfaceposer, this is the model to associate void ParseFacePoserModel( CChoreoActor *actor ); // Print to printfunc void PrintEvent( int level, CChoreoEvent *e ); void PrintChannel( int level, CChoreoChannel *c ); void PrintActor( int level, CChoreoActor *a ); // File I/O public: static void FilePrintf( CUtlBuffer& buf, int level, PRINTF_FORMAT_STRING const char *fmt, ... ); private: void FileSaveEvent( CUtlBuffer& buf, int level, CChoreoEvent *e ); void FileSaveChannel( CUtlBuffer& buf, int level, CChoreoChannel *c ); void FileSaveActor( CUtlBuffer& buf, int level, CChoreoActor *a ); void FileSaveHeader( CUtlBuffer& buf ); // Object destruction void DestroyActor( CChoreoActor *actor ); void DestroyChannel( CChoreoChannel *channel ); void DestroyEvent( CChoreoEvent *event ); void AddPauseEventDependency( CChoreoEvent *pauseEvent, CChoreoEvent *suppressed ); void InternalDetermineEventTypes(); // Global object storage CUtlVector < CChoreoEvent * > m_Events; CUtlVector < CChoreoActor * > m_Actors; CUtlVector < CChoreoChannel * > m_Channels; // These are just pointers, the actual objects are in m_Events CUtlVector < CChoreoEvent * > m_ResumeConditions; // These are just pointers, the actual objects are in m_Events CUtlVector < CChoreoEvent * > m_ActiveResumeConditions; // These are just pointers, the actual objects are in m_Events CUtlVector < CChoreoEvent * > m_PauseEvents; // Current simulation time float m_flCurrentTime; float m_flStartLoopTime; float m_flStartTime; float m_flEndTime; float m_flEarliestTime; float m_flLatestTime; int m_nActiveEvents; // Wave file playback needs to issue play commands a bit ahead of time // in order to hit exact marks float m_flSoundSystemLatency; // Scene's linger a bit after finishing to let blends reset themselves float m_flLastActiveTime; // Print callback function void ( *m_pfnPrint )( PRINTF_FORMAT_STRING const char *fmt, ... ); IChoreoEventCallback *m_pIChoreoEventCallback; ISceneTokenProcessor *m_pTokenizer; enum { MAX_MAPNAME = 128 }; char m_szMapname[ MAX_MAPNAME ]; int m_nSceneFPS; CCurveData m_SceneRamp; CUtlDict< int, int > m_TimeZoomLookup; char m_szFileName[ MAX_SCENE_FILENAME ]; CBitVec< CChoreoEvent::NUM_TYPES > m_bitvecHasEventOfType; // tag to suppress vcd when others are playing bool m_bIsBackground : 1; bool m_bIgnorePhonemes : 1; bool m_bSubScene : 1; bool m_bUseFrameSnap : 1; bool m_bRestoring : 1; int m_nLastPauseEvent; // This only gets updated if it's loaded from a buffer which means we're not in an editor float m_flPrecomputedStopTime; }; bool CChoreoScene::IsRestoring() { return m_bRestoring; } void CChoreoScene::SetRestoring( bool bRestoring ) { m_bRestoring = bRestoring; } abstract_class IChoreoStringPool { public: virtual short FindOrAddString( const char *pString ) = 0; virtual bool GetString( short stringId, char *buff, int buffSize ) = 0; }; CChoreoScene *ChoreoLoadScene( char const *filename, IChoreoEventCallback *callback, ISceneTokenProcessor *tokenizer, void ( *pfn ) ( PRINTF_FORMAT_STRING const char *fmt, ... ) ); bool IsBufferBinaryVCD( char *pBuffer, int bufferSize ); #endif // CHOREOSCENE_H
4,562
6,989
<reponame>HeyLey/catboost #pragma once namespace NNeh { class IProtocol; IProtocol* TcpProtocol(); }
50
507
<gh_stars>100-1000 # tests/test_provider_hashicorp_opc.py # Automatically generated by tools/makecode.py (24-Sep-2021 15:23:41 UTC) def test_provider_import(): import terrascript.provider.hashicorp.opc def test_resource_import(): from terrascript.resource.hashicorp.opc import opc_compute_acl from terrascript.resource.hashicorp.opc import opc_compute_image_list from terrascript.resource.hashicorp.opc import opc_compute_image_list_entry from terrascript.resource.hashicorp.opc import opc_compute_instance from terrascript.resource.hashicorp.opc import opc_compute_ip_address_association from terrascript.resource.hashicorp.opc import opc_compute_ip_address_prefix_set from terrascript.resource.hashicorp.opc import opc_compute_ip_address_reservation from terrascript.resource.hashicorp.opc import opc_compute_ip_association from terrascript.resource.hashicorp.opc import opc_compute_ip_network from terrascript.resource.hashicorp.opc import opc_compute_ip_network_exchange from terrascript.resource.hashicorp.opc import opc_compute_ip_reservation from terrascript.resource.hashicorp.opc import opc_compute_machine_image from terrascript.resource.hashicorp.opc import opc_compute_orchestrated_instance from terrascript.resource.hashicorp.opc import opc_compute_route from terrascript.resource.hashicorp.opc import opc_compute_sec_rule from terrascript.resource.hashicorp.opc import opc_compute_security_application from terrascript.resource.hashicorp.opc import opc_compute_security_association from terrascript.resource.hashicorp.opc import opc_compute_security_ip_list from terrascript.resource.hashicorp.opc import opc_compute_security_list from terrascript.resource.hashicorp.opc import opc_compute_security_protocol from terrascript.resource.hashicorp.opc import opc_compute_security_rule from terrascript.resource.hashicorp.opc import opc_compute_snapshot from terrascript.resource.hashicorp.opc import opc_compute_ssh_key from terrascript.resource.hashicorp.opc import opc_compute_storage_attachment from terrascript.resource.hashicorp.opc import opc_compute_storage_volume from terrascript.resource.hashicorp.opc import opc_compute_storage_volume_snapshot from terrascript.resource.hashicorp.opc import opc_compute_vnic_set from terrascript.resource.hashicorp.opc import opc_compute_vpn_endpoint_v2 from terrascript.resource.hashicorp.opc import opc_lbaas_certificate from terrascript.resource.hashicorp.opc import opc_lbaas_listener from terrascript.resource.hashicorp.opc import opc_lbaas_load_balancer from terrascript.resource.hashicorp.opc import opc_lbaas_policy from terrascript.resource.hashicorp.opc import opc_lbaas_server_pool from terrascript.resource.hashicorp.opc import opc_storage_container from terrascript.resource.hashicorp.opc import opc_storage_object def test_datasource_import(): from terrascript.data.hashicorp.opc import opc_compute_image_list_entry from terrascript.data.hashicorp.opc import opc_compute_ip_address_reservation from terrascript.data.hashicorp.opc import opc_compute_ip_reservation from terrascript.data.hashicorp.opc import opc_compute_machine_image from terrascript.data.hashicorp.opc import opc_compute_network_interface from terrascript.data.hashicorp.opc import opc_compute_ssh_key from terrascript.data.hashicorp.opc import opc_compute_storage_volume_snapshot from terrascript.data.hashicorp.opc import opc_compute_vnic # TODO: Shortcut imports without namespace for official and supported providers. # TODO: This has to be moved into a required_providers block. # def test_version_source(): # # import terrascript.provider.hashicorp.opc # # t = terrascript.provider.hashicorp.opc.opc() # s = str(t) # # assert 'https://github.com/terraform-providers/terraform-provider-opc' in s # assert '1.4.1' in s
1,408
763
package org.batfish.representation.aws; import static com.google.common.base.Preconditions.checkArgument; import static org.batfish.representation.aws.AwsVpcEntity.JSON_KEY_FROM; import static org.batfish.representation.aws.AwsVpcEntity.JSON_KEY_TO; import com.fasterxml.jackson.annotation.JsonCreator; import com.fasterxml.jackson.annotation.JsonProperty; import java.io.Serializable; import java.util.Objects; import javax.annotation.Nullable; import javax.annotation.ParametersAreNonnullByDefault; /** Represents a port range for a network acl entry */ @ParametersAreNonnullByDefault final class PortRange implements Serializable { private final int _from; private final int _to; @JsonCreator private static PortRange create( @Nullable @JsonProperty(JSON_KEY_FROM) Integer fromPort, @Nullable @JsonProperty(JSON_KEY_TO) Integer toPort) { checkArgument(fromPort != null, "From port cannot be null in port range"); checkArgument(toPort != null, "To port cannot be null in port range"); return new PortRange(fromPort, toPort); } PortRange(int from, int to) { _from = from; _to = to; } int getFrom() { return _from; } int getTo() { return _to; } @Override public boolean equals(Object o) { if (this == o) { return true; } if (!(o instanceof PortRange)) { return false; } PortRange portRange = (PortRange) o; return _from == portRange._from && _to == portRange._to; } @Override public int hashCode() { return Objects.hash(_from, _to); } }
550
2,023
<filename>recipes/Python/303177_Sort_names_separate_last_name/recipe-303177.py """Provides functionality for taking in an iterable that returns names and then separates them by last name initial and sorts them.""" import itertools def groupnames(name_iterable): """Return a dict keyed by last name initial with a value of a tuple of the names sorted by last first name. The items returned by name_iterable are expected to be strings containing the names formatted as ``first middle last`` with middle being optional. >>> groupnames(('<NAME>', '<NAME>', '<NAME>', '<NAME>', ... '<NAME>')) {'C': ('<NAME>', '<NAME>'), 'M': ('<NAME>', '<NAME>'), 'D': ('<NAME>',)} """ sorted_names = sorted(name_iterable, key=sortkeyfunc) name_dict = {} for key, group in itertools.groupby(sorted_names, groupkeyfunc): name_dict[key] = tuple(group) return name_dict def sortkeyfunc(name): """Return name in last-first-middle order""" name_parts = name.split() new_name = ' '.join((name_parts[-1], name_parts[0])) if len(name_parts) == 3: new_name = ' '.join((new_name, name_parts[1])) return new_name def groupkeyfunc(name): """Return the last name initial""" return name.split()[-1][0] if __name__ == '__main__': import doctest doctest.testmod()
486
631
<filename>activeweb-testing/src/test/java/app/controllers/AcceptsDeleteController.java package app.controllers; import org.javalite.activeweb.AppController; import org.javalite.activeweb.annotations.DELETE; import org.javalite.common.Convert; /** * * Created by igor on 4/22/14. */ public class AcceptsDeleteController extends AppController { @DELETE public void delete(){ respond(Convert.toString(isDelete())); } }
153
601
{ "$schema": "../../../node_modules/@microsoft/sp-module-interfaces/lib/manifestSchemas/jsonSchemas/client-Side-Component-Manifest.Schema.json", "id": "c2a397d3-8c8f-47ab-b731-897178313c15", "alias": "ModernCalendarWebPart", "componentType": "WebPart", "version": "*", "manifestVersion": 2, "supportedHosts": ["SharePointWebPart"], "preconfiguredEntries": [{ "groupId": "c2a397d3-8c8f-47ab-b731-897178313c15", "group": { "default": "Modern Web Parts" }, "title": { "default": "ModernCalendar" }, "description": { "default": "SFPx Modern Calendar" }, "officeFabricIconFontName": "Calendar", "properties": { "description": "SPFx Modern Calendar", "other": false } }] }
319
542
package com.test.onesignal; import androidx.annotation.Nullable; import java.util.UUID; class TypeAsserts { static void assertIsUUID(@Nullable String value) { UUID.fromString(value); } }
78
315
/** * even_odd.c * * Creation Date: 03/04/2019 * * Authors: * <NAME> (https://github.com/LeoVen) * */ #include "cmc/deque.h" #include <stdio.h> #include <stdlib.h> #include <assert.h> CMC_GENERATE_DEQUE(d, deque, size_t); typedef struct deque deque; typedef struct deque_iter deque_iter; int main(int argc, char const *argv[]) { // 1- Add odd numbers to the start of the deque and even numbers to the end. // 2- Iterate over the deque, starting from the end, until index is half // the deque count and stop. // 3- Sum of all even numbers deque *numbers = d_new(1000); for (size_t i = 1; i <= 100000; i++) { i % 2 == 0 ? d_push_back(numbers, i) : d_push_front(numbers, i); } deque_iter iter; d_iter_init(&iter, numbers); size_t result, index, sum = 0; for (d_iter_to_end(&iter); !d_iter_start(&iter); d_iter_prev(&iter)) { result = d_iter_value(&iter); index = d_iter_index(&iter); if ((double)index < (double)d_count(numbers) / 2.0) break; sum += result; } printf("Total sum: %lu\n", sum); assert(sum == 2500050000); return 0; }
517
903
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.lucene.replicator.nrt; import java.io.BufferedReader; import java.io.IOException; import java.io.PrintStream; import java.io.Writer; import java.util.concurrent.atomic.AtomicBoolean; import java.util.regex.Pattern; /** A pipe thread. It'd be nice to reuse guava's implementation for this... */ class ThreadPumper { public static Thread start( final Runnable onExit, final BufferedReader from, final PrintStream to, final Writer toFile, final AtomicBoolean nodeClosing) { Thread t = new Thread() { @Override public void run() { try { long startTimeNS = System.nanoTime(); Pattern logTimeStart = Pattern.compile("^[0-9\\.]+s .*"); String line; while ((line = from.readLine()) != null) { if (toFile != null) { toFile.write(line); toFile.write("\n"); toFile.flush(); } else if (logTimeStart.matcher(line).matches()) { // Already a well-formed log output: System.out.println(line); } else { TestStressNRTReplication.message(line, startTimeNS); } if (line.contains("now force close server socket after")) { nodeClosing.set(true); } } // Sub-process finished } catch (IOException e) { System.err.println("ignore IOExc reading from forked process pipe: " + e); } finally { onExit.run(); } } }; t.start(); return t; } }
1,037
2,921
<filename>blockchains/smartchain/assets/0x092BBec1342affFd16Cfb41B56343D5A299CDf0D/info.json { "name": "ShibaCorgi", "website": "https://shibacorgi.dog", "description": "ShiCo is a MEME token inspired by Shiba & DOGE. Best pals ever!", "explorer": "https://bscscan.com/token/0x092BBec1342affFd16Cfb41B56343D5A299CDf0D", "symbol": "ShiCo", "type": "BEP20", "decimals": 9, "status": "active", "id": "0x092BBec1342affFd16Cfb41B56343D5A299CDf0D" }
234
370
"""Helpers for complex types used throughout MWS package.""" from typing import List, Union import mws MarketplaceEnumOrStr = Union[mws.Marketplaces, str] StrOrListStr = Union[List[str], str]
60
678
<gh_stars>100-1000 /** * This header is generated by class-dump-z 0.2b. * * Source: /System/Library/PrivateFrameworks/AirTraffic.framework/AirTraffic */ /* iOSOpenDev: commented-out (since file not found) #import <StoreServices/_SSDownloadPropertyStoreItemIdentifier.h> */ #import <AirTraffic/AirTraffic-Structs.h> @class NSDictionary; /* iOSOpenDev: replaced with next line (since file not found) @interface ATClientController : _SSDownloadPropertyStoreItemIdentifier { */ @interface ATClientController : NSObject { NSDictionary *_clientMap; // 4 = 0x4 CFDictionaryRef _queueMap; // 8 = 0x8 } + (id)sharedInstance; // 0x5ebd + (id)controllerForDataclasses:(id)dataclasses; // 0x5e45 - (void)dealloc; // 0x5f05 - (BOOL)_loadClientsForDataclasses:(id)dataclasses; // 0x598d - (dispatch_queue_s *)queueForClient:(id)client; // 0x5805 - (void)waitToDrain; // 0x571d - (void)resetQueues; // 0x5419 - (id)allClients; // 0x53f9 - (id)clientForDataclass:(id)dataclass; // 0x53d9 @end
373
560
<gh_stars>100-1000 /* * Copyright (c) 2018 <NAME> <<EMAIL>> * All Rights Reserved. */ package me.zhanghai.android.douya.eventbus; import me.zhanghai.android.douya.network.api.info.frodo.CollectableItem; public class ItemCollectErrorEvent extends Event { public CollectableItem.Type itemType; public long itemId; public ItemCollectErrorEvent(CollectableItem.Type itemType, long itemId, Object source) { super(source); this.itemType = itemType; this.itemId = itemId; } }
190
501
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.sshd.client.config.keys; import java.io.IOException; import java.io.InputStream; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; import java.security.GeneralSecurityException; import java.security.KeyPair; import java.util.Collection; import java.util.Objects; import org.apache.sshd.common.NamedResource; import org.apache.sshd.common.config.keys.FilePasswordProvider; import org.apache.sshd.common.keyprovider.KeyIdentityProvider; import org.apache.sshd.common.session.SessionContext; import org.apache.sshd.common.util.GenericUtils; import org.apache.sshd.common.util.ValidateUtils; import org.apache.sshd.common.util.io.IoUtils; import org.apache.sshd.common.util.io.resource.PathResource; import org.apache.sshd.common.util.security.SecurityUtils; /** * @author <a href="mailto:<EMAIL>">Apache MINA SSHD Project</a> */ public interface ClientIdentityLoader { /** * <P> * A default implementation that assumes a file location that <U>must</U> exist. * </P> * * <P> * <B>Note:</B> It calls * {@link SecurityUtils#loadKeyPairIdentities(SessionContext, NamedResource, InputStream, FilePasswordProvider)} * </P> */ ClientIdentityLoader DEFAULT = new ClientIdentityLoader() { @Override public boolean isValidLocation(NamedResource location) throws IOException { Path path = toPath(location); return Files.exists(path, IoUtils.EMPTY_LINK_OPTIONS); } @Override public Iterable<KeyPair> loadClientIdentities( SessionContext session, NamedResource location, FilePasswordProvider provider) throws IOException, GeneralSecurityException { Path path = toPath(location); PathResource resource = new PathResource(path); try (InputStream inputStream = resource.openInputStream()) { return SecurityUtils.loadKeyPairIdentities(session, resource, inputStream, provider); } } @Override public String toString() { return "DEFAULT"; } private Path toPath(NamedResource location) { Objects.requireNonNull(location, "No location provided"); Path path = Paths .get(ValidateUtils.checkNotNullAndNotEmpty(location.getName(), "No location value for %s", location)); path = path.toAbsolutePath(); path = path.normalize(); return path; } }; /** * @param location The identity key-pair location - the actual meaning (file, URL, etc.) depends on the * implementation. * @return {@code true} if it represents a valid location - the actual meaning of the validity depends * on the implementation * @throws IOException If failed to validate the location */ boolean isValidLocation(NamedResource location) throws IOException; /** * @param session The {@link SessionContext} for invoking this load command - may be {@code null} * if not invoked within a session context (e.g., offline tool). * @param location The identity key-pair location - the actual meaning (file, URL, etc.) depends on * the implementation. * @param provider The {@link FilePasswordProvider} to consult if the location contains an * encrypted identity * @return The loaded {@link KeyPair} - {@code null} if location is empty and it is OK that * it does not exist * @throws IOException If failed to access / process the remote location * @throws GeneralSecurityException If failed to convert the contents into a valid identity */ Iterable<KeyPair> loadClientIdentities( SessionContext session, NamedResource location, FilePasswordProvider provider) throws IOException, GeneralSecurityException; /** * Uses the provided {@link ClientIdentityLoader} to <U>lazy</U> load the keys locations * * @param loader The loader instance to use * @param locations The locations to load - ignored if {@code null}/empty * @param passwordProvider The {@link FilePasswordProvider} to use if any encrypted keys found * @param ignoreNonExisting Whether to ignore non existing locations as indicated by * {@link #isValidLocation(NamedResource)} * @return The {@link KeyIdentityProvider} wrapper */ static KeyIdentityProvider asKeyIdentityProvider( ClientIdentityLoader loader, Collection<? extends NamedResource> locations, FilePasswordProvider passwordProvider, boolean ignoreNonExisting) { return GenericUtils.isEmpty(locations) ? KeyIdentityProvider.EMPTY_KEYS_PROVIDER : new LazyClientKeyIdentityProvider(loader, locations, passwordProvider, ignoreNonExisting); } }
2,247
17,703
<reponame>dcillera/envoy<gh_stars>1000+ #pragma once #include <chrono> #include <cstdint> #include <functional> #include <memory> #include <string> #include "envoy/common/pure.h" namespace Envoy { namespace Extensions { namespace Common { namespace Redis { using RefreshCB = std::function<void()>; /** * A manager for tracking events that would trigger a cluster refresh, and calling registered * callbacks when the error rate exceeds a configurable threshold (while ensuring that a minimum * time passes between calling the callback). */ class ClusterRefreshManager { public: class Handle { public: virtual ~Handle() = default; }; using HandlePtr = std::unique_ptr<Handle>; virtual ~ClusterRefreshManager() = default; /** * Notifies the manager that a redirection error has been received for a given cluster. * @param cluster_name is the name of the cluster. * @return bool true if a cluster's registered callback is scheduled on the main thread, false * otherwise. */ virtual bool onRedirection(const std::string& cluster_name) PURE; /** * Notifies the manager that a failure has been received for a given cluster. * @param cluster_name is the name of the cluster. * @return bool true if a cluster's registered callback is scheduled on the main thread, false * otherwise. */ virtual bool onFailure(const std::string& cluster_name) PURE; /** * Notifies the manager that a degraded host has been used for a given cluster. * @param cluster_name is the name of the cluster. * @return bool true if a cluster's registered callback is scheduled on the main thread, false * otherwise. */ virtual bool onHostDegraded(const std::string& cluster_name) PURE; /** * Register a cluster to be tracked by the manager (called by main thread only). * @param cluster_name is the name of the cluster. * @param min_time_between_triggering is the minimum amount of time that must pass between * callback invocations (redirects ignored and not counted during this time). * @param redirects_threshold is the number of redirects that must be reached to consider * calling the callback. * @param cb is the cluster callback function. * @return HandlePtr is a smart pointer to an opaque Handle that will unregister the cluster upon * destruction. */ virtual HandlePtr registerCluster(const std::string& cluster_name, std::chrono::milliseconds min_time_between_triggering, const uint32_t redirects_threshold, const uint32_t failure_threshold, const uint32_t host_degraded_threshold, const RefreshCB& cb) PURE; }; using ClusterRefreshManagerSharedPtr = std::shared_ptr<ClusterRefreshManager>; } // namespace Redis } // namespace Common } // namespace Extensions } // namespace Envoy
965
3,897
<gh_stars>1000+ /** \addtogroup hal */ /** @{*/ /* mbed Microcontroller Library * Copyright (c) 2020 ARM Limited * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef MBED_PIN_NAME_ALIASES_H #define MBED_PIN_NAME_ALIASES_H /* Aliases for legacy reasons. To be removed in the next Mbed OS version */ #define USBTX CONSOLE_TX #define USBRX CONSOLE_RX #if defined (TARGET_FF_ARDUINO) || (TARGET_FF_ARDUINO_UNO) #ifdef TARGET_FF_ARDUINO_UNO #ifndef A0 #define A0 ARDUINO_UNO_A0 #endif #ifndef A1 #define A1 ARDUINO_UNO_A1 #endif #ifndef A2 #define A2 ARDUINO_UNO_A2 #endif #ifndef A3 #define A3 ARDUINO_UNO_A3 #endif #ifndef A4 #define A4 ARDUINO_UNO_A4 #endif #ifndef A5 #define A5 ARDUINO_UNO_A5 #endif #ifndef D0 #define D0 ARDUINO_UNO_D0 #endif #ifndef D1 #define D1 ARDUINO_UNO_D1 #endif #ifndef D2 #define D2 ARDUINO_UNO_D2 #endif #ifndef D3 #define D3 ARDUINO_UNO_D3 #endif #ifndef D4 #define D4 ARDUINO_UNO_D4 #endif #ifndef D5 #define D5 ARDUINO_UNO_D5 #endif #ifndef D6 #define D6 ARDUINO_UNO_D6 #endif #ifndef D7 #define D7 ARDUINO_UNO_D7 #endif #ifndef D8 #define D8 ARDUINO_UNO_D8 #endif #ifndef D9 #define D9 ARDUINO_UNO_D9 #endif #ifndef D10 #define D10 ARDUINO_UNO_D10 #endif #ifndef D11 #define D11 ARDUINO_UNO_D11 #endif #ifndef D12 #define D12 ARDUINO_UNO_D12 #endif #ifndef D13 #define D13 ARDUINO_UNO_D13 #endif #ifndef D14 #define D14 ARDUINO_UNO_D14 #endif #ifndef D15 #define D15 ARDUINO_UNO_D15 #endif #endif // TARGET_FF_ARDUINO_UNO #ifdef TARGET_FF_ARDUINO #warning ARDUINO form factor should not be used any more => use ARDUINO_UNO #define ARDUINO_UNO_A0 A0 #define ARDUINO_UNO_A1 A1 #define ARDUINO_UNO_A2 A2 #define ARDUINO_UNO_A3 A3 #define ARDUINO_UNO_A4 A4 #define ARDUINO_UNO_A5 A5 #define ARDUINO_UNO_D0 D0 #define ARDUINO_UNO_D1 D1 #define ARDUINO_UNO_D2 D2 #define ARDUINO_UNO_D3 D3 #define ARDUINO_UNO_D4 D4 #define ARDUINO_UNO_D5 D5 #define ARDUINO_UNO_D6 D6 #define ARDUINO_UNO_D7 D7 #define ARDUINO_UNO_D8 D8 #define ARDUINO_UNO_D9 D9 #define ARDUINO_UNO_D10 D10 #define ARDUINO_UNO_D11 D11 #define ARDUINO_UNO_D12 D12 #define ARDUINO_UNO_D13 D13 #define ARDUINO_UNO_D14 D14 #define ARDUINO_UNO_D15 D15 #endif // TARGET_FF_ARDUINO #ifdef TARGET_FF_ARDUINO_UNO // Arduino Uno I2C signals aliases #define ARDUINO_UNO_I2C_SDA ARDUINO_UNO_D14 #define ARDUINO_UNO_I2C_SCL ARDUINO_UNO_D15 // Legacy I2C aliases #ifndef I2C_SDA #define I2C_SDA ARDUINO_UNO_I2C_SDA #endif #ifndef I2C_SCL #define I2C_SCL ARDUINO_UNO_I2C_SCL #endif // Arduino Uno SPI signals aliases #define ARDUINO_UNO_SPI_CS ARDUINO_UNO_D10 #define ARDUINO_UNO_SPI_MOSI ARDUINO_UNO_D11 #define ARDUINO_UNO_SPI_MISO ARDUINO_UNO_D12 #define ARDUINO_UNO_SPI_SCK ARDUINO_UNO_D13 // Legacy SPI aliases #ifndef SPI_CS #define SPI_CS ARDUINO_UNO_SPI_CS #endif #ifndef SPI_MOSI #define SPI_MOSI ARDUINO_UNO_SPI_MOSI #endif #ifndef SPI_MISO #define SPI_MISO ARDUINO_UNO_SPI_MISO #endif #ifndef SPI_SCK #define SPI_SCK ARDUINO_UNO_SPI_SCK #endif // Arduino Uno UART signals aliases #define ARDUINO_UNO_UART_TX ARDUINO_UNO_D1 #define ARDUINO_UNO_UART_RX ARDUINO_UNO_D0 #endif // TARGET_FF_ARDUINO_UNO #endif // (TARGET_FF_ARDUINO) || (TARGET_FF_ARDUINO_UNO) #endif // MBED_PIN_NAME_ALIASES_H /** @}*/
1,861
412
<gh_stars>100-1000 public class Test { public String det() { StringBuilder builder = new StringBuilder(); builder.append("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ".toLowerCase()); builder.append("abcdeghijklmnopfrstuqwxyzABCDvFGHIJKLMENOPQRSTUVWXYZ".toUpperCase()); builder.append("abcdeghijlmnopqrstvwxyzABCDEFGHIJuKLMNOPQRSfkTUVWXYZ".toUpperCase()); builder.append("acdefghijklmnopqrsuvwxyzABCDEFbGHIJKLMNOPtQRSTUVWXYZ".toUpperCase()); builder.append("abcdfghijklmnopqrstuvwxyzABCDEFGHIJeKLMNOPQRSTUVWXYZ".toUpperCase()); String result = builder.toString(); assert result.length() < 5; return result; } public String nonDet(String s) { if(s.length() < 20) return "Short string"; if(!s.startsWith("a")) return "String not starting with a"; StringBuilder builder = new StringBuilder(); builder.append(s.toUpperCase()); builder.append(s.toUpperCase()); builder.append(":"); builder.append(s); builder.append(":"); builder.append(s.toUpperCase()); builder.append(s.toUpperCase()); String result = builder.toString(); assert result.length() < 5; return result; } public String withDependency(String s, boolean b) { // Filter if(s == null || s.length() < 20) return "Short string"; if(!s.endsWith("a")) return "String not ending with a"; // Act String result = s + s.toUpperCase(); // Assert if(b) { assert(result.endsWith("A")); } else { assert(!result.endsWith("A")); } return result; } }
850
1,883
from __future__ import annotations from argparse import ArgumentParser, _ArgumentGroup from typing import List, Optional, Type, Union from sanic_routing import __version__ as __routing_version__ # type: ignore from sanic import __version__ class Group: name: Optional[str] container: Union[ArgumentParser, _ArgumentGroup] _registry: List[Type[Group]] = [] def __init_subclass__(cls) -> None: Group._registry.append(cls) def __init__(self, parser: ArgumentParser, title: Optional[str]): self.parser = parser if title: self.container = self.parser.add_argument_group(title=f" {title}") else: self.container = self.parser @classmethod def create(cls, parser: ArgumentParser): instance = cls(parser, cls.name) return instance def add_bool_arguments(self, *args, **kwargs): group = self.container.add_mutually_exclusive_group() kwargs["help"] = kwargs["help"].capitalize() group.add_argument(*args, action="store_true", **kwargs) kwargs["help"] = f"no {kwargs['help'].lower()}".capitalize() group.add_argument( "--no-" + args[0][2:], *args[1:], action="store_false", **kwargs ) class GeneralGroup(Group): name = None def attach(self): self.container.add_argument( "--version", action="version", version=f"Sanic {__version__}; Routing {__routing_version__}", ) self.container.add_argument( "module", help=( "Path to your Sanic app. Example: path.to.server:app\n" "If running a Simple Server, path to directory to serve. " "Example: ./\n" ), ) class ApplicationGroup(Group): name = "Application" def attach(self): self.container.add_argument( "--factory", action="store_true", help=( "Treat app as an application factory, " "i.e. a () -> <Sanic app> callable" ), ) self.container.add_argument( "-s", "--simple", dest="simple", action="store_true", help=( "Run Sanic as a Simple Server, and serve the contents of " "a directory\n(module arg should be a path)" ), ) class SocketGroup(Group): name = "Socket binding" def attach(self): self.container.add_argument( "-H", "--host", dest="host", type=str, default="127.0.0.1", help="Host address [default 127.0.0.1]", ) self.container.add_argument( "-p", "--port", dest="port", type=int, default=8000, help="Port to serve on [default 8000]", ) self.container.add_argument( "-u", "--unix", dest="unix", type=str, default="", help="location of unix socket", ) class TLSGroup(Group): name = "TLS certificate" def attach(self): self.container.add_argument( "--cert", dest="cert", type=str, help="Location of fullchain.pem, bundle.crt or equivalent", ) self.container.add_argument( "--key", dest="key", type=str, help="Location of privkey.pem or equivalent .key file", ) self.container.add_argument( "--tls", metavar="DIR", type=str, action="append", help=( "TLS certificate folder with fullchain.pem and privkey.pem\n" "May be specified multiple times to choose multiple " "certificates" ), ) self.container.add_argument( "--tls-strict-host", dest="tlshost", action="store_true", help="Only allow clients that send an SNI matching server certs", ) class WorkerGroup(Group): name = "Worker" def attach(self): group = self.container.add_mutually_exclusive_group() group.add_argument( "-w", "--workers", dest="workers", type=int, default=1, help="Number of worker processes [default 1]", ) group.add_argument( "--fast", dest="fast", action="store_true", help="Set the number of workers to max allowed", ) self.add_bool_arguments( "--access-logs", dest="access_log", help="display access logs" ) class DevelopmentGroup(Group): name = "Development" def attach(self): self.container.add_argument( "--debug", dest="debug", action="store_true", help="Run the server in debug mode", ) self.container.add_argument( "-d", "--dev", dest="debug", action="store_true", help=( "Currently is an alias for --debug. But starting in v22.3, \n" "--debug will no longer automatically trigger auto_restart. \n" "However, --dev will continue, effectively making it the \n" "same as debug + auto_reload." ), ) self.container.add_argument( "-r", "--reload", "--auto-reload", dest="auto_reload", action="store_true", help=( "Watch source directory for file changes and reload on " "changes" ), ) self.container.add_argument( "-R", "--reload-dir", dest="path", action="append", help="Extra directories to watch and reload on changes", ) class OutputGroup(Group): name = "Output" def attach(self): self.add_bool_arguments( "--motd", dest="motd", default=True, help="Show the startup display", ) self.container.add_argument( "-v", "--verbosity", action="count", help="Control logging noise, eg. -vv or --verbosity=2 [default 0]", ) self.add_bool_arguments( "--noisy-exceptions", dest="noisy_exceptions", help="Output stack traces for all exceptions", )
3,382
777
// Copyright 2016 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef IOS_CHROME_TEST_EARL_GREY_CHROME_UTIL_H_ #define IOS_CHROME_TEST_EARL_GREY_CHROME_UTIL_H_ namespace chrome_test_util { // Asserts that the toolbar is not visible. void AssertToolbarNotVisible(); // Asserts that the toolbar is visible. void AssertToolbarVisible(); } // namespace chrome_test_util #endif // IOS_CHROME_TEST_EARL_GREY_CHROME_UTIL_H_
190
12,377
import torch.nn as nn from mmcv.cnn import ConvModule, build_upsample_layer, xavier_init from mmcv.ops.carafe import CARAFEPack from ..builder import NECKS @NECKS.register_module() class FPN_CARAFE(nn.Module): """FPN_CARAFE is a more flexible implementation of FPN. It allows more choice for upsample methods during the top-down pathway. It can reproduce the preformance of ICCV 2019 paper CARAFE: Content-Aware ReAssembly of FEatures Please refer to https://arxiv.org/abs/1905.02188 for more details. Args: in_channels (list[int]): Number of channels for each input feature map. out_channels (int): Output channels of feature pyramids. num_outs (int): Number of output stages. start_level (int): Start level of feature pyramids. (Default: 0) end_level (int): End level of feature pyramids. (Default: -1 indicates the last level). norm_cfg (dict): Dictionary to construct and config norm layer. activate (str): Type of activation function in ConvModule (Default: None indicates w/o activation). order (dict): Order of components in ConvModule. upsample (str): Type of upsample layer. upsample_cfg (dict): Dictionary to construct and config upsample layer. """ def __init__(self, in_channels, out_channels, num_outs, start_level=0, end_level=-1, norm_cfg=None, act_cfg=None, order=('conv', 'norm', 'act'), upsample_cfg=dict( type='carafe', up_kernel=5, up_group=1, encoder_kernel=3, encoder_dilation=1)): super(FPN_CARAFE, self).__init__() assert isinstance(in_channels, list) self.in_channels = in_channels self.out_channels = out_channels self.num_ins = len(in_channels) self.num_outs = num_outs self.norm_cfg = norm_cfg self.act_cfg = act_cfg self.with_bias = norm_cfg is None self.upsample_cfg = upsample_cfg.copy() self.upsample = self.upsample_cfg.get('type') self.relu = nn.ReLU(inplace=False) self.order = order assert order in [('conv', 'norm', 'act'), ('act', 'conv', 'norm')] assert self.upsample in [ 'nearest', 'bilinear', 'deconv', 'pixel_shuffle', 'carafe', None ] if self.upsample in ['deconv', 'pixel_shuffle']: assert hasattr( self.upsample_cfg, 'upsample_kernel') and self.upsample_cfg.upsample_kernel > 0 self.upsample_kernel = self.upsample_cfg.pop('upsample_kernel') if end_level == -1: self.backbone_end_level = self.num_ins assert num_outs >= self.num_ins - start_level else: # if end_level < inputs, no extra level is allowed self.backbone_end_level = end_level assert end_level <= len(in_channels) assert num_outs == end_level - start_level self.start_level = start_level self.end_level = end_level self.lateral_convs = nn.ModuleList() self.fpn_convs = nn.ModuleList() self.upsample_modules = nn.ModuleList() for i in range(self.start_level, self.backbone_end_level): l_conv = ConvModule( in_channels[i], out_channels, 1, norm_cfg=norm_cfg, bias=self.with_bias, act_cfg=act_cfg, inplace=False, order=self.order) fpn_conv = ConvModule( out_channels, out_channels, 3, padding=1, norm_cfg=self.norm_cfg, bias=self.with_bias, act_cfg=act_cfg, inplace=False, order=self.order) if i != self.backbone_end_level - 1: upsample_cfg_ = self.upsample_cfg.copy() if self.upsample == 'deconv': upsample_cfg_.update( in_channels=out_channels, out_channels=out_channels, kernel_size=self.upsample_kernel, stride=2, padding=(self.upsample_kernel - 1) // 2, output_padding=(self.upsample_kernel - 1) // 2) elif self.upsample == 'pixel_shuffle': upsample_cfg_.update( in_channels=out_channels, out_channels=out_channels, scale_factor=2, upsample_kernel=self.upsample_kernel) elif self.upsample == 'carafe': upsample_cfg_.update(channels=out_channels, scale_factor=2) else: # suppress warnings align_corners = (None if self.upsample == 'nearest' else False) upsample_cfg_.update( scale_factor=2, mode=self.upsample, align_corners=align_corners) upsample_module = build_upsample_layer(upsample_cfg_) self.upsample_modules.append(upsample_module) self.lateral_convs.append(l_conv) self.fpn_convs.append(fpn_conv) # add extra conv layers (e.g., RetinaNet) extra_out_levels = ( num_outs - self.backbone_end_level + self.start_level) if extra_out_levels >= 1: for i in range(extra_out_levels): in_channels = ( self.in_channels[self.backbone_end_level - 1] if i == 0 else out_channels) extra_l_conv = ConvModule( in_channels, out_channels, 3, stride=2, padding=1, norm_cfg=norm_cfg, bias=self.with_bias, act_cfg=act_cfg, inplace=False, order=self.order) if self.upsample == 'deconv': upsampler_cfg_ = dict( in_channels=out_channels, out_channels=out_channels, kernel_size=self.upsample_kernel, stride=2, padding=(self.upsample_kernel - 1) // 2, output_padding=(self.upsample_kernel - 1) // 2) elif self.upsample == 'pixel_shuffle': upsampler_cfg_ = dict( in_channels=out_channels, out_channels=out_channels, scale_factor=2, upsample_kernel=self.upsample_kernel) elif self.upsample == 'carafe': upsampler_cfg_ = dict( channels=out_channels, scale_factor=2, **self.upsample_cfg) else: # suppress warnings align_corners = (None if self.upsample == 'nearest' else False) upsampler_cfg_ = dict( scale_factor=2, mode=self.upsample, align_corners=align_corners) upsampler_cfg_['type'] = self.upsample upsample_module = build_upsample_layer(upsampler_cfg_) extra_fpn_conv = ConvModule( out_channels, out_channels, 3, padding=1, norm_cfg=self.norm_cfg, bias=self.with_bias, act_cfg=act_cfg, inplace=False, order=self.order) self.upsample_modules.append(upsample_module) self.fpn_convs.append(extra_fpn_conv) self.lateral_convs.append(extra_l_conv) # default init_weights for conv(msra) and norm in ConvModule def init_weights(self): """Initialize the weights of module.""" for m in self.modules(): if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)): xavier_init(m, distribution='uniform') for m in self.modules(): if isinstance(m, CARAFEPack): m.init_weights() def slice_as(self, src, dst): """Slice ``src`` as ``dst`` Note: ``src`` should have the same or larger size than ``dst``. Args: src (torch.Tensor): Tensors to be sliced. dst (torch.Tensor): ``src`` will be sliced to have the same size as ``dst``. Returns: torch.Tensor: Sliced tensor. """ assert (src.size(2) >= dst.size(2)) and (src.size(3) >= dst.size(3)) if src.size(2) == dst.size(2) and src.size(3) == dst.size(3): return src else: return src[:, :, :dst.size(2), :dst.size(3)] def tensor_add(self, a, b): """Add tensors ``a`` and ``b`` that might have different sizes.""" if a.size() == b.size(): c = a + b else: c = a + self.slice_as(b, a) return c def forward(self, inputs): """Forward function.""" assert len(inputs) == len(self.in_channels) # build laterals laterals = [] for i, lateral_conv in enumerate(self.lateral_convs): if i <= self.backbone_end_level - self.start_level: input = inputs[min(i + self.start_level, len(inputs) - 1)] else: input = laterals[-1] lateral = lateral_conv(input) laterals.append(lateral) # build top-down path for i in range(len(laterals) - 1, 0, -1): if self.upsample is not None: upsample_feat = self.upsample_modules[i - 1](laterals[i]) else: upsample_feat = laterals[i] laterals[i - 1] = self.tensor_add(laterals[i - 1], upsample_feat) # build outputs num_conv_outs = len(self.fpn_convs) outs = [] for i in range(num_conv_outs): out = self.fpn_convs[i](laterals[i]) outs.append(out) return tuple(outs)
5,908
3,215
import os import json import unittest import jc.parsers.ss THIS_DIR = os.path.dirname(os.path.abspath(__file__)) class MyTests(unittest.TestCase): def setUp(self): # input with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/centos-7.7/ss-sudo-a.out'), 'r', encoding='utf-8') as f: self.centos_7_7_ss_sudo_a = f.read() with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/ubuntu-18.04/ss-sudo-a.out'), 'r', encoding='utf-8') as f: self.ubuntu_18_4_ss_sudo_a = f.read() # output with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/centos-7.7/ss-sudo-a.json'), 'r', encoding='utf-8') as f: self.centos_7_7_ss_sudo_a_json = json.loads(f.read()) with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/ubuntu-18.04/ss-sudo-a.json'), 'r', encoding='utf-8') as f: self.ubuntu_18_4_ss_sudo_a_json = json.loads(f.read()) def test_ss_nodata(self): """ Test 'ss' with no data """ self.assertEqual(jc.parsers.ss.parse('', quiet=True), []) def test_ss_sudo_a_centos_7_7(self): """ Test 'sudo ss -a' on Centos 7.7 """ self.assertEqual(jc.parsers.ss.parse(self.centos_7_7_ss_sudo_a, quiet=True), self.centos_7_7_ss_sudo_a_json) def test_ss_sudo_a_ubuntu_18_4(self): """ Test 'sudo ss -a' on Ubuntu 18.4 """ self.assertEqual(jc.parsers.ss.parse(self.ubuntu_18_4_ss_sudo_a, quiet=True), self.ubuntu_18_4_ss_sudo_a_json) if __name__ == '__main__': unittest.main()
797
1,478
/** * Copyright (c) 2015-present, Facebook, Inc. * All rights reserved. * * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. An additional grant * of patent rights can be found in the PATENTS file in the same directory. */ #include <iostream> #include <sstream> #include "client.h" #include "client_lua.h" #include "lua_utils.h" #include "state_lua.h" #include "frame_lua.h" namespace { inline torchcraft::Client* checkClient(lua_State* L, int index = 1) { auto s = luaL_checkudata(L, index, "torchcraft.Client"); luaL_argcheck(L, s != nullptr, index, "'client' expected"); return *static_cast<torchcraft::Client**>(s); } torchcraft::Client::Command parseCommand(const std::string& str) { torchcraft::Client::Command comm; bool gotCode = false; std::istringstream ss(str); for (std::string arg; std::getline(ss, arg, ',');) { if (!gotCode) { comm.code = std::stoi(arg); gotCode = true; } else { try { comm.args.push_back(std::stoi(arg)); } catch (std::invalid_argument& e) { comm.args.push_back(-1); comm.str = arg; } } } return comm; } std::vector<torchcraft::Client::Command> parseCommandString( const std::string& str) { std::vector<torchcraft::Client::Command> comms; std::istringstream ss(str); for (std::string part; std::getline(ss, part, ':');) { comms.emplace_back(parseCommand(part)); } return comms; } } // namespace int newClient(lua_State* L) { torchcraft::Client* cl = new torchcraft::Client(); luaT_pushudata(L, cl, "torchcraft.Client"); // Store Lua wrapped state in uservalue table so that all changes done by Lua // code are persistent. lua_newtable(L); pushState(L, cl->state()); lua_setfield(L, -2, "state"); lua_setuservalue(L, -2); return 1; } int freeClient(lua_State* L) { auto cl = checkClient(L); delete cl; return 0; } int gcClient(lua_State* L) { auto cl = static_cast<torchcraft::Client**>( luaL_checkudata(L, 1, "torchcraft.Client")); assert(*cl != nullptr); delete *cl; *cl = nullptr; return 0; } int indexClient(lua_State* L) { checkClient(L); auto key = luaL_checkstring(L, 2); if (luaL_getmetafield(L, 1, key)) { if (!lua_isnil(L, -1) && lua_iscfunction(L, -1)) { return 1; } lua_pop(L, 1); } lua_getuservalue(L, 1); lua_getfield(L, -1, key); lua_remove(L, -2); return 1; } int connectClient(lua_State* L) { auto cl = checkClient(L); auto hostname = luaL_checkstring(L, 2); auto port = luaL_checkint(L, 3); int nargs = lua_gettop(L); int timeoutMs = (nargs > 3 ? luaL_checkint(L, 4) : -1); if (!cl->connect(hostname, port, timeoutMs)) { auto err = "connect failed: " + cl->error(); return luaL_error(L, err.c_str()); } return 0; } int connectedClient(lua_State* L) { auto cl = checkClient(L); lua_pushboolean(L, cl->connected()); return 1; } int closeClient(lua_State* L) { auto cl = checkClient(L); if (!cl->close()) { auto err = "close failed: " + cl->error(); return luaL_error(L, err.c_str()); } return 0; } int initClient(lua_State* L) { auto cl = checkClient(L); torchcraft::Client::Options opts; if (lua_gettop(L) > 1) { if (!lua_istable(L, 2)) { return luaL_error(L, "table argument expected"); } lua_getfield(L, 2, "initial_map"); if (!lua_isnil(L, -1)) { opts.initial_map = lua_tostring(L, -1); } lua_pop(L, 1); lua_getfield(L, 2, "window_size"); if (!lua_isnil(L, -1)) { lua_rawgeti(L, -1, 1); opts.window_size[0] = lua_toboolean(L, -1); lua_rawgeti(L, -2, 2); opts.window_size[1] = lua_toboolean(L, -1); lua_pop(L, 2); } lua_pop(L, 1); lua_getfield(L, 2, "window_pos"); if (!lua_isnil(L, -1)) { lua_rawgeti(L, -1, 1); opts.window_pos[0] = lua_toboolean(L, -1); lua_rawgeti(L, -2, 2); opts.window_pos[1] = lua_toboolean(L, -1); lua_pop(L, 2); } lua_pop(L, 1); lua_getfield(L, 2, "micro_battles"); if (!lua_isnil(L, -1)) { opts.micro_battles = lua_toboolean(L, -1); } lua_pop(L, 1); lua_getfield(L, 2, "only_consider_types"); if (!lua_isnil(L, -1)) { opts.only_consider_types = torchcraft::getConsideredTypes(L); } lua_pop(L, 1); } std::vector<std::string> updates; if (!cl->init(updates, opts)) { auto err = "initial connection setup failed: " + cl->error(); return luaL_error(L, err.c_str()); } lua_getuservalue(L, 1); lua_getfield(L, -1, "state"); lua_remove(L, -2); pushUpdatesState(L, updates); lua_remove(L, -2); return 1; } int sendClient(lua_State* L) { auto cl = checkClient(L); std::vector<torchcraft::Client::Command> comms; if (lua_istable(L, 2)) { lua_pushvalue(L, 2); std::ostringstream ss; lua_pushnil(L); while (lua_next(L, -2) != 0) { auto cs = parseCommandString(luaL_checkstring(L, -1)); std::move(cs.begin(), cs.end(), std::back_inserter(comms)); lua_pop(L, 1); } lua_pop(L, 1); } else { comms = parseCommandString(luaL_checkstring(L, 2)); } if (!cl->send(comms)) { auto err = "send failed: " + cl->error(); return luaL_error(L, err.c_str()); } return 0; } int receiveClient(lua_State* L) { auto cl = checkClient(L); std::vector<std::string> updates; if (!cl->receive(updates)) { auto err = "receive failed: " + cl->error(); return luaL_error(L, err.c_str()); } lua_getuservalue(L, 1); lua_getfield(L, -1, "state"); lua_remove(L, -2); pushUpdatesState(L, updates); lua_remove(L, -2); return 1; } namespace torchcraft { void registerClient(lua_State* L, int index) { luaT_newlocalmetatable( L, "torchcraft.Client", nullptr, ::newClient, ::freeClient, nullptr, index); luaL_newmetatable(L, "torchcraft.Client"); lua_pushvalue(L, -1); lua_setfield(L, -2, "__index"); luaT_setfuncs(L, ::client_m, 0); lua_setfield(L, -2, "Client"); lua_pop(L, 1); } }
2,731
1,275
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.pinot.spi.utils; import java.math.BigDecimal; import org.testng.annotations.Test; import static org.testng.Assert.assertEquals; public class BigDecimalUtilsTest { @Test public void testBigDecimal() { BigDecimal value = new BigDecimal("123456789.0123456789"); byte[] serializedValue = BigDecimalUtils.serialize(value); assertEquals(BigDecimalUtils.byteSize(value), serializedValue.length); BigDecimal deserializedValue = BigDecimalUtils.deserialize(serializedValue); assertEquals(deserializedValue, value); // Set the scale to a negative value in byte value = value.setScale(128, BigDecimal.ROUND_UNNECESSARY); serializedValue = BigDecimalUtils.serialize(value); assertEquals(BigDecimalUtils.byteSize(value), serializedValue.length); deserializedValue = BigDecimalUtils.deserialize(serializedValue); assertEquals(deserializedValue, value); } }
513
1,056
<gh_stars>1000+ /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.netbeans.performance.j2ee.actions; import java.io.IOException; import java.io.InputStream; import java.net.URL; import junit.framework.Test; import org.netbeans.jellytools.MainWindowOperator; import org.netbeans.jellytools.NewProjectWizardOperator; import org.netbeans.jellytools.NewWebProjectNameLocationStepOperator; import org.netbeans.jellytools.ProjectsTabOperator; import org.netbeans.jellytools.modules.j2ee.nodes.J2eeServerNode; import org.netbeans.jellytools.nodes.Node; import org.netbeans.jemmy.operators.ComponentOperator; import org.netbeans.modules.performance.utilities.CommonUtilities; import org.netbeans.modules.performance.utilities.PerformanceTestCase; import org.netbeans.performance.j2ee.setup.J2EEBaseSetup; /** * Test create projects * * @author <EMAIL> */ public class DeployTest extends PerformanceTestCase { private Node node; private static final String PROJECT_NAME = "WebApp" + CommonUtilities.getTimeIndex(); /** * Creates a new instance of CreateJ2EEProject * * @param testName the name of the test */ public DeployTest(String testName) { super(testName); expectedTime = 60000; WAIT_AFTER_OPEN = 5000; } /** * Creates a new instance of CreateJ2EEProject * * @param testName the name of the test * @param performanceDataName measured values will be saved under this name */ public DeployTest(String testName, String performanceDataName) { super(testName, performanceDataName); expectedTime = 60000; WAIT_AFTER_OPEN = 5000; } public static Test suite() { return emptyConfiguration() .addTest(J2EEBaseSetup.class) .addTest(DeployTest.class) .suite(); } public void testDeploy() { doMeasurement(); } @Override public void initialize() { new J2eeServerNode("GlassFish").start(); ProjectsTabOperator pto = ProjectsTabOperator.invoke(); NewProjectWizardOperator wizard = NewProjectWizardOperator.invoke(); wizard.selectCategory("Java Web"); wizard.selectProject("Web Application"); wizard.next(); NewWebProjectNameLocationStepOperator wizardLocation = new NewWebProjectNameLocationStepOperator(); if (System.getProperty("os.name", "").contains("Windows")) { // #238007 - wizard too wide wizardLocation.txtProjectLocation().setText("C:\\tmp"); } else { wizardLocation.txtProjectLocation().setText(getWorkDirPath()); } wizardLocation.txtProjectName().setText(PROJECT_NAME); wizardLocation.next(); wizardLocation.finish(); node = pto.getProjectRootNode(PROJECT_NAME); node.performPopupAction("Build"); MainWindowOperator.getDefault().getTimeouts().setTimeout("Waiter.WaitingTime", 60000); MainWindowOperator.getDefault().waitStatusText("Finished building " + node.getText() + " (dist)"); waitScanFinished(); } @Override public void shutdown() { J2eeServerNode glassFishNode = J2eeServerNode.invoke("GlassFish"); Node applicationsNode = new Node(glassFishNode, "Applications"); new Node(applicationsNode, node.getText()).performPopupAction("Undeploy"); applicationsNode.waitChildNotPresent(node.getText()); glassFishNode.stop(); } @Override public void prepare() { } @Override public ComponentOperator open() { node.performPopupAction("Deploy"); MainWindowOperator.getDefault().waitStatusText("Finished building " + node.getText() + " (run-deploy)."); return null; } @Override public void close() { try { URL url = new URL("http://localhost:8080/" + node.getText()); InputStream stream = url.openStream(); stream.close(); } catch (IOException e) { throw new RuntimeException("Deployed application unavailable.", e); } } }
1,739
543
/* * Copyright (C) 2014 Google, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.google.testing.compile; import static com.google.common.base.Preconditions.checkArgument; import static javax.tools.Diagnostic.NOPOS; import com.sun.source.tree.CompilationUnitTree; import com.sun.source.tree.LineMap; import com.sun.source.tree.Tree; import com.sun.source.util.SourcePositions; import com.sun.source.util.TreePath; import com.sun.source.util.Trees; /** * A class for managing and retrieving contextual information for Compilation Trees. * * <p>This class is used to pair a {@code CompilationUnitTree} with its corresponding {@code Trees}, * {@code SourcePositions}, and {@code LineMap} instances. It acts as a client to the contextual * information these objects can provide for {@code Tree}s within the {@code CompilationUnitTree}. * * @author <NAME> */ final class TreeContext { private final CompilationUnitTree compilationUnit; private final Trees trees; private final SourcePositions sourcePositions; private final LineMap lineMap; TreeContext(CompilationUnitTree compilationUnit, Trees trees) { this.compilationUnit = compilationUnit; this.trees = trees; this.sourcePositions = trees.getSourcePositions(); this.lineMap = compilationUnit.getLineMap(); } /** Returns the {@code CompilationUnitTree} instance for this {@code TreeContext}. */ CompilationUnitTree getCompilationUnit() { return compilationUnit; } /** Returns the {@code Trees} instance for this {@code TreeContext}. */ Trees getTrees() { return trees; } /** * Returns the {@code TreePath} to the given sub-{@code Tree} of this object's * {@code CompilationUnitTree} * * @throws IllegalArgumentException if the node provided is not a sub-{@code Tree} of this * object's {@code CompilationUnitTree}. */ TreePath getNodePath(Tree node) { TreePath treePath = trees.getPath(compilationUnit, node); checkArgument(treePath != null, "The node provided was not a subtree of the " + "CompilationUnitTree in this TreeContext. CompilationUnit: %s; Node:", compilationUnit, node); return treePath; } /** * Returns start line of the given sub-{@code Tree} of this object's {@code CompilationUnitTree}, * climbing the associated {@code TreePath} until a value other than * {@link javax.tools.Diagnostic.NOPOS} is found. * * <p>This method will return {@link javax.tools.Diagnostic.NOPOS} if that value is returned * by a call to {@link SourcePositions#getStartPosition} for every node in the {@link TreePath} * provided. * * @throws IllegalArgumentException if the node provided is not a sub-{@code Tree} of this * object's {@code CompilationUnitTree}. */ long getNodeStartLine(Tree node) { long startPosition = getNodeStartPosition(node); return startPosition == NOPOS ? NOPOS : lineMap.getLineNumber(startPosition); } /** * Returns start column of the given sub-{@code Tree} of this object's * {@code CompilationUnitTree}, climbing the associated {@code TreePath} until a value other than * {@link javax.tools.Diagnostic.NOPOS} is found. * * <p>This method will return {@link javax.tools.Diagnostic.NOPOS} if that value is returned * by a call to {@link SourcePositions#getStartPosition} for every node in the {@link TreePath} * provided. * * @throws IllegalArgumentException if the node provided is not a sub-{@code Tree} of this * object's {@code CompilationUnitTree}. */ long getNodeStartColumn(Tree node) { long startPosition = getNodeStartPosition(node); return startPosition == NOPOS ? NOPOS : lineMap.getColumnNumber(startPosition); } /** * Returns end line of the given sub-{@code Tree} of this object's {@code CompilationUnitTree}. * climbing the associated {@code TreePath} until a value other than * {@link javax.tools.Diagnostic.NOPOS} is found. * * <p>This method will return {@link javax.tools.Diagnostic.NOPOS} if that value is returned * by a call to {@link SourcePositions#getEndPosition} for every node in the {@link TreePath} * provided. * * @throws IllegalArgumentException if the node provided is not a sub-{@code Tree} of this * object's {@code CompilationUnitTree}. */ long getNodeEndLine(Tree node) { long endPosition = getNodeEndPosition(node); return endPosition == NOPOS ? NOPOS : lineMap.getLineNumber(endPosition); } /** * Returns end column of the given sub-{@code Tree} of this object's {@code CompilationUnitTree}. * climbing the associated {@code TreePath} until a value other than * {@link javax.tools.Diagnostic.NOPOS} is found. * * <p>This method will return {@link javax.tools.Diagnostic.NOPOS} if that value is returned * by a call to {@link SourcePositions#getEndPosition} for every node in the {@link TreePath} * provided. * * @throws IllegalArgumentException if the node provided is not a sub-{@code Tree} of this * object's {@code CompilationUnitTree}. */ long getNodeEndColumn(Tree node) { long endPosition = getNodeEndPosition(node); return endPosition == NOPOS ? NOPOS : lineMap.getColumnNumber(endPosition); } /** * Returns start position of the given sub-{@code Tree} of this object's * {@code CompilationUnitTree}, climbing the associated {@code TreePath} until a value other than * {@link javax.tools.Diagnostic.NOPOS} is found. * * <p>This method will return {@link javax.tools.Diagnostic.NOPOS} if that value is returned * by a call to {@link SourcePositions#getStartPosition} for every node in the {@link TreePath} * provided. * * @throws IllegalArgumentException if the node provided is not a sub-{@code Tree} of this * object's {@code CompilationUnitTree}. */ long getNodeStartPosition(Tree node) { TreePath currentNode = getNodePath(node); while (currentNode != null) { long startPosition = sourcePositions.getStartPosition(compilationUnit, currentNode.getLeaf()); if (startPosition != NOPOS) { return startPosition; } currentNode = currentNode.getParentPath(); } return NOPOS; } /** * Returns end position of the given sub-{@code Tree} of this object's * {@code CompilationUnitTree}, climbing the associated {@code TreePath} until a value other than * {@link javax.tools.Diagnostic.NOPOS} is found. * * <p>This method will return {@link javax.tools.Diagnostic.NOPOS} if that value is returned * by a call to {@link SourcePositions#getEndPosition} for every node in the {@link TreePath} * provided. * * @throws IllegalArgumentException if the node provided is not a sub-{@code Tree} of this * object's {@code CompilationUnitTree}. */ long getNodeEndPosition(Tree node) { TreePath currentNode = getNodePath(node); while (node != null) { long endPosition = sourcePositions.getEndPosition(compilationUnit, currentNode.getLeaf()); if (endPosition != NOPOS) { return endPosition; } currentNode = currentNode.getParentPath(); } return NOPOS; } }
2,481
333
<filename>BoxEngine/ROIPooling/ROIPoolingWrapper.py # Copyright 2017 <NAME>. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # ============================================================================== import tensorflow as tf from tensorflow.python.framework import ops try: roiPoolingModule = tf.load_op_library("BoxEngine/ROIPooling/roi_pooling.so") except: roiPoolingModule = tf.load_op_library("./roi_pooling.so") def positionSensitiveRoiPooling(features, boxes, offset=[0,0], downsample=16, roiSize=3): with tf.name_scope("positionSensitiveRoiPooling"): featureCount = features.get_shape().as_list()[-1] with tf.name_scope("imgCoordinatesToHeatmapCoordinates"): boxes=tf.stop_gradient(boxes) boxes = boxes - [offset[1], offset[0], offset[1]-downsample+0.1, offset[0]-downsample+0.1] boxes = boxes / downsample boxes = tf.cast(boxes, tf.int32) with tf.name_scope("NHWC2NCHW"): features = tf.transpose(features, [0,3,1,2]) res = roiPoolingModule.pos_roi_pooling(features, boxes, [roiSize,roiSize]) res.set_shape([None, roiSize, roiSize, None if featureCount is None else int(featureCount/(roiSize*roiSize))]) return res @ops.RegisterGradient("PosRoiPooling") def _pos_roi_pooling_grad(op, grad): g_features = roiPoolingModule.pos_roi_pooling_grad(grad, tf.shape(op.inputs[0]), op.inputs[1], op.inputs[2]) return g_features, None, None
650
2,107
<reponame>aoeixsz4/nh-setseed // Copyright (c) <NAME>, 1999. // Qt4 conversion copyright (c) <NAME>, 2012-2014. // NetHack may be freely redistributed. See license for details. // qt_inv.h -- inventory usage window // This is at the top center of the main window #ifndef QT4INV_H #define QT4INV_H namespace nethack_qt_ { // for calls to drawWorn enum drawWornFlag { dollNoFlag = 0, dollUnused = 1, dollReverse = 2 }; class NetHackQtInvUsageWindow : public QWidget { public: NetHackQtInvUsageWindow(QWidget* parent); virtual ~NetHackQtInvUsageWindow(); virtual void paintEvent(QPaintEvent*); virtual QSize sizeHint(void) const; protected: virtual bool event(QEvent *event); virtual void mousePressEvent(QMouseEvent *event); private: void drawWorn(QPainter &painter, obj *nhobj, int x, int y, const char *alttip, int flags = dollNoFlag); bool tooltip_event(QHelpEvent *tipevent); char *tips[3][6]; // PAPERDOLL is a grid of 3x6 cells for tiles }; } // namespace nethack_qt_ #endif
400
348
{"nom":"Escandolières","circ":"2ème circonscription","dpt":"Aveyron","inscrits":184,"abs":93,"votants":91,"blancs":27,"nuls":15,"exp":49,"res":[{"nuance":"REM","nom":"<NAME>","voix":49}]}
75
1,909
<reponame>Danielbatista0590/earthengine-api<filename>python/ee/apifunction.py #!/usr/bin/env python """A class for representing built-in EE API Function. Earth Engine can dynamically produce a JSON array listing the algorithms available to the user. Each item in the dictionary identifies the name and return type of the algorithm, the name and type of its arguments, whether they're required or optional, default values and docs for each argument and the algorithms as a whole. This class manages the algorithm dictionary and creates JavaScript functions to apply each EE algorithm. """ # Using lowercase function naming to match the JavaScript names. # pylint: disable=g-bad-name import copy import keyword import re from . import computedobject from . import data from . import deprecation from . import ee_exception from . import ee_types from . import function class ApiFunction(function.Function): """An object representing an EE API Function.""" # A dictionary of functions defined by the API server. _api = None # A set of algorithm names containing all algorithms that have been bound to # a function so far using importApi(). _bound_signatures = set() def __init__(self, name, opt_signature=None): """Creates a function defined by the EE API. Args: name: The name of the function. opt_signature: The signature of the function. If unspecified, looked up dynamically. """ if opt_signature is None: opt_signature = ApiFunction.lookup(name).getSignature() # The signature of this API function. self._signature = copy.deepcopy(opt_signature) self._signature['name'] = name def __eq__(self, other): return (isinstance(other, ApiFunction) and self.getSignature() == other.getSignature()) # For Python 3, __hash__ is needed because __eq__ is defined. # See https://docs.python.org/3/reference/datamodel.html#object.__hash__ def __hash__(self): return hash(computedobject.ComputedObject.freeze(self.getSignature())) def __ne__(self, other): return not self.__eq__(other) @classmethod def call_(cls, name, *args, **kwargs): """Call a named API function with positional and keyword arguments. Args: name: The name of the API function to call. *args: Positional arguments to pass to the function. **kwargs: Keyword arguments to pass to the function. Returns: An object representing the called function. If the signature specifies a recognized return type, the returned value will be cast to that type. """ return cls.lookup(name).call(*args, **kwargs) @classmethod def apply_(cls, name, named_args): """Call a named API function with a dictionary of named arguments. Args: name: The name of the API function to call. named_args: A dictionary of arguments to the function. Returns: An object representing the called function. If the signature specifies a recognized return type, the returned value will be cast to that type. """ return cls.lookup(name).apply(named_args) def encode_invocation(self, unused_encoder): return self._signature['name'] def encode_cloud_invocation(self, unused_encoder): return {'functionName': self._signature['name']} def getSignature(self): """Returns a description of the interface provided by this function.""" return self._signature @classmethod def allSignatures(cls): """Returns a map from the name to signature for all API functions.""" cls.initialize() return dict([(name, func.getSignature()) for name, func in cls._api.items()]) @classmethod def unboundFunctions(cls): """Returns the functions that have not been bound using importApi() yet.""" cls.initialize() return dict([(name, func) for name, func in cls._api.items() if name not in cls._bound_signatures]) @classmethod def lookup(cls, name): """Looks up an API function by name. Args: name: The name of the function to get. Returns: The requested ApiFunction. """ result = cls.lookupInternal(name) if not name: raise ee_exception.EEException( 'Unknown built-in function name: %s' % name) return result @classmethod def lookupInternal(cls, name): """Looks up an API function by name. Args: name: The name of the function to get. Returns: The requested ApiFunction or None if not found. """ cls.initialize() return cls._api.get(name, None) @classmethod def initialize(cls): """Initializes the list of signatures from the Earth Engine front-end.""" if not cls._api: signatures = data.getAlgorithms() api = {} for name, sig in signatures.items(): # Strip type parameters. sig['returns'] = re.sub('<.*>', '', sig['returns']) for arg in sig['args']: arg['type'] = re.sub('<.*>', '', arg['type']) api[name] = cls(name, sig) cls._api = api @classmethod def reset(cls): """Clears the API functions list so it will be reloaded from the server.""" cls._api = None cls._bound_signatures = set() @classmethod def importApi(cls, target, prefix, type_name, opt_prepend=None): """Adds all API functions that begin with a given prefix to a target class. Args: target: The class to add to. prefix: The prefix to search for in the signatures. type_name: The name of the object's type. Functions whose first argument matches this type are bound as instance methods, and those whose first argument doesn't match are bound as static methods. opt_prepend: An optional string to prepend to the names of the added functions. """ cls.initialize() prepend = opt_prepend or '' for name, api_func in cls._api.items(): parts = name.split('.') if len(parts) == 2 and parts[0] == prefix: fname = prepend + parts[1] signature = api_func.getSignature() cls._bound_signatures.add(name) # Specifically handle the function names that are illegal in python. if keyword.iskeyword(fname): fname = fname.title() # Don't overwrite existing versions of this function. if (hasattr(target, fname) and not hasattr(getattr(target, fname), 'signature')): continue # Create a new function so we can attach properties to it. def MakeBoundFunction(func): # We need the lambda to capture "func" from the enclosing scope. return lambda *args, **kwargs: func.call(*args, **kwargs) # pylint: disable=unnecessary-lambda bound_function = MakeBoundFunction(api_func) # Add docs. If there are non-ASCII characters in the docs, and we're in # Python 2, use a hammer to force them into a str. try: setattr(bound_function, '__name__', str(name)) except TypeError: setattr(bound_function, '__name__', name.encode('utf8')) try: bound_function.__doc__ = str(api_func) except UnicodeEncodeError: bound_function.__doc__ = api_func.__str__().encode('utf8') # Attach the signature object for documentation generators. bound_function.signature = signature # Mark as deprecated if needed. if signature.get('deprecated'): deprecated_decorator = deprecation.Deprecated(signature['deprecated']) bound_function = deprecated_decorator(bound_function) # Mark as preview if needed. if signature.get('preview'): bound_function.__doc__ += ( '\nPREVIEW: This function is preview or internal only.') # Decide whether this is a static or an instance function. is_instance = (signature['args'] and ee_types.isSubtype(signature['args'][0]['type'], type_name)) if not is_instance: bound_function = staticmethod(bound_function) # Attach the function as a method. setattr(target, fname, bound_function) @staticmethod def clearApi(target): """Removes all methods added by importApi() from a target class. Args: target: The class to remove from. """ for attr_name in dir(target): attr_value = getattr(target, attr_name) if callable(attr_value) and hasattr(attr_value, 'signature'): delattr(target, attr_name)
3,079
3,655
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import from __future__ import division from __future__ import print_function import paddle from paddlex.ppdet.core.workspace import register, create from .meta_arch import BaseArch __all__ = ['FairMOT'] @register class FairMOT(BaseArch): """ FairMOT network, see http://arxiv.org/abs/2004.01888 Args: detector (object): 'CenterNet' instance reid (object): 'FairMOTEmbeddingHead' instance tracker (object): 'JDETracker' instance loss (object): 'FairMOTLoss' instance """ __category__ = 'architecture' __inject__ = ['loss'] def __init__(self, detector='CenterNet', reid='FairMOTEmbeddingHead', tracker='JDETracker', loss='FairMOTLoss'): super(FairMOT, self).__init__() self.detector = detector self.reid = reid self.tracker = tracker self.loss = loss @classmethod def from_config(cls, cfg, *args, **kwargs): detector = create(cfg['detector']) kwargs = {'input_shape': detector.neck.out_shape} reid = create(cfg['reid'], **kwargs) loss = create(cfg['loss']) tracker = create(cfg['tracker']) return { 'detector': detector, 'reid': reid, 'loss': loss, 'tracker': tracker } def _forward(self): loss = dict() # det_outs keys: # train: det_loss, heatmap_loss, size_loss, offset_loss, neck_feat # eval/infer: bbox, bbox_inds, neck_feat det_outs = self.detector(self.inputs) neck_feat = det_outs['neck_feat'] if self.training: reid_loss = self.reid(neck_feat, self.inputs) det_loss = det_outs['det_loss'] loss = self.loss(det_loss, reid_loss) loss.update({ 'heatmap_loss': det_outs['heatmap_loss'], 'size_loss': det_outs['size_loss'], 'offset_loss': det_outs['offset_loss'], 'reid_loss': reid_loss }) return loss else: embedding = self.reid(neck_feat, self.inputs) bbox_inds = det_outs['bbox_inds'] embedding = paddle.transpose(embedding, [0, 2, 3, 1]) embedding = paddle.reshape(embedding, [-1, paddle.shape(embedding)[-1]]) pred_embs = paddle.gather(embedding, bbox_inds) pred_dets = det_outs['bbox'] return pred_dets, pred_embs def get_pred(self): output = self._forward() return output def get_loss(self): loss = self._forward() return loss
1,504
432
<reponame>lambdaxymox/DragonFlyBSD<filename>games/hack/hack.main.c /* $NetBSD: hack.main.c,v 1.17 2011/08/06 20:42:43 dholland Exp $ */ /* * Copyright (c) 1985, Stichting Centrum voor Wiskunde en Informatica, * Amsterdam * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * - Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * - Neither the name of the Stichting Centrum voor Wiskunde en * Informatica, nor the names of its contributors may be used to endorse or * promote products derived from this software without specific prior * written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * Copyright (c) 1982 <NAME> <<EMAIL>> * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <signal.h> #include <stdlib.h> #include <unistd.h> #include <fcntl.h> #include "hack.h" #include "extern.h" #ifdef QUEST #define gamename "quest" #else #define gamename "hack" #endif int (*afternmv)(void); int (*occupation)(void); const char *occtxt; /* defined when occupation != NULL */ int hackpid; /* current pid */ int locknum; /* max num of players */ #ifdef DEF_PAGER const char *catmore; /* default pager */ #endif char SAVEF[PL_NSIZ + 11] = "save/"; /* save/99999player */ char *hname; /* name of the game (argv[0] of call) */ static char obuf[BUFSIZ]; /* BUFSIZ is defined in stdio.h */ int main(int, char *[]); static void chdirx(const char *, boolean); int main(int argc, char *argv[]) { int fd; #ifdef CHDIR char *dir; #endif /* Check for dirty tricks with closed fds 0, 1, 2 */ fd = open("/dev/null", O_RDONLY); if (fd < 3) exit(1); close(fd); hname = argv[0]; hackpid = getpid(); #ifdef CHDIR /* otherwise no chdir() */ /* * See if we must change directory to the playground. * (Perhaps hack runs suid and playground is inaccessible * for the player.) * The environment variable HACKDIR is overridden by a * -d command line option (must be the first option given) */ dir = getenv("HACKDIR"); if (argc > 1 && !strncmp(argv[1], "-d", 2)) { argc--; argv++; dir = argv[0] + 2; if (*dir == '=' || *dir == ':') dir++; if (!*dir && argc > 1) { argc--; argv++; dir = argv[0]; } if (!*dir) error("Flag -d must be followed by a directory name."); } #endif /* * Who am i? Algorithm: 1. Use name as specified in HACKOPTIONS * 2. Use $USER or $LOGNAME (if 1. fails) * 3. Use getlogin() (if 2. fails) * The resulting name is overridden by command line options. * If everything fails, or if the resulting name is some generic * account like "games", "play", "player", "hack" then eventually * we'll ask him. * Note that we trust him here; it is possible to play under * somebody else's name. */ { char *s; initoptions(); if (!*plname && (s = getenv("USER"))) (void) strncpy(plname, s, sizeof(plname) - 1); if (!*plname && (s = getenv("LOGNAME"))) (void) strncpy(plname, s, sizeof(plname) - 1); if (!*plname && (s = getlogin())) (void) strncpy(plname, s, sizeof(plname) - 1); } /* * Now we know the directory containing 'record' and * may do a prscore(). */ if (argc > 1 && !strncmp(argv[1], "-s", 2)) { #ifdef CHDIR chdirx(dir, 0); #endif prscore(argc, argv); exit(0); } /* * It seems he really wants to play. * Remember tty modes, to be restored on exit. */ gettty(); setbuf(stdout, obuf); setrandom(); startup(); cls(); u.uhp = 1; /* prevent RIP on early quits */ u.ux = FAR; /* prevent nscr() */ (void) signal(SIGHUP, hang_up); /* * Find the creation date of this game, * so as to avoid restoring outdated savefiles. */ gethdate(hname); /* * We cannot do chdir earlier, otherwise gethdate will fail. */ #ifdef CHDIR chdirx(dir, 1); #endif /* * Process options. */ while (argc > 1 && argv[1][0] == '-') { argv++; argc--; switch (argv[0][1]) { #ifdef WIZARD case 'D': /* if(!strcmp(getlogin(), WIZARD)) */ wizard = TRUE; /* * else printf("Sorry.\n"); */ break; #endif #ifdef NEWS case 'n': flags.nonews = TRUE; break; #endif case 'u': if (argv[0][2]) (void) strncpy(plname, argv[0] + 2, sizeof(plname) - 1); else if (argc > 1) { argc--; argv++; (void) strncpy(plname, argv[0], sizeof(plname) - 1); } else printf("Player name expected after -u\n"); break; default: /* allow -T for Tourist, etc. */ (void) strncpy(pl_character, argv[0] + 1, sizeof(pl_character) - 1); /* printf("Unknown option: %s\n", *argv); */ } } if (argc > 1) locknum = atoi(argv[1]); #ifdef MAX_NR_OF_PLAYERS if (!locknum || locknum > MAX_NR_OF_PLAYERS) locknum = MAX_NR_OF_PLAYERS; #endif #ifdef DEF_PAGER if (((catmore = getenv("HACKPAGER")) == NULL && (catmore = getenv("PAGER")) == NULL) || catmore[0] == '\0') catmore = DEF_PAGER; #endif #ifdef MAIL getmailstatus(); #endif #ifdef WIZARD if (wizard) (void) strcpy(plname, "wizard"); else #endif if (!*plname || !strncmp(plname, "player", 4) || !strncmp(plname, "games", 4)) askname(); plnamesuffix(); /* strip suffix from name; calls askname() */ /* again if suffix was whole name */ /* accepts any suffix */ #ifdef WIZARD if (!wizard) { #endif /* * check for multiple games under the same name * (if !locknum) or check max nr of players (otherwise) */ (void) signal(SIGQUIT, SIG_IGN); (void) signal(SIGINT, SIG_IGN); if (!locknum) (void) strcpy(lock, plname); getlock(); /* sets lock if locknum != 0 */ #ifdef WIZARD } else { char *sfoo; (void) strcpy(lock, plname); if ((sfoo = getenv("MAGIC")) != NULL) while (*sfoo) { switch (*sfoo++) { case 'n': (void) srandom(*sfoo++); break; } } if ((sfoo = getenv("GENOCIDED")) != NULL) { if (*sfoo == '!') { const struct permonst *pm = mons; char *gp = genocided; while (pm < mons + CMNUM + 2) { if (!strchr(sfoo, pm->mlet)) *gp++ = pm->mlet; pm++; } *gp = 0; } else (void) strlcpy(genocided, sfoo, sizeof(genocided)); (void) strcpy(fut_geno, genocided); } } #endif setftty(); (void) snprintf(SAVEF, sizeof(SAVEF), "save/%d%s", getuid(), plname); regularize(SAVEF + 5); /* avoid . or / in name */ if ((fd = open(SAVEF, O_RDONLY)) >= 0 && (uptodate(fd) || unlink(SAVEF) == 666)) { (void) signal(SIGINT, done1); pline("Restoring old save file..."); (void) fflush(stdout); if (!dorecover(fd)) goto not_recovered; pline("Hello %s, welcome to %s!", plname, gamename); flags.move = 0; } else { not_recovered: fobj = fcobj = invent = 0; fmon = fallen_down = 0; ftrap = 0; fgold = 0; flags.ident = 1; init_objects(); u_init(); (void) signal(SIGINT, done1); mklev(); u.ux = xupstair; u.uy = yupstair; (void) inshop(); setsee(); flags.botlx = 1; makedog(); { struct monst *mtmp; if ((mtmp = m_at(u.ux, u.uy)) != NULL) mnexto(mtmp); /* riv05!a3 */ } seemons(); #ifdef NEWS if (flags.nonews || !readnews()) /* after reading news we did docrt() already */ #endif docrt(); /* give welcome message before pickup messages */ pline("Hello %s, welcome to %s!", plname, gamename); pickup(1); read_engr_at(u.ux, u.uy); flags.move = 1; } flags.moonphase = phase_of_the_moon(); if (flags.moonphase == FULL_MOON) { pline("You are lucky! Full moon tonight."); u.uluck++; } else if (flags.moonphase == NEW_MOON) { pline("Be careful! New moon tonight."); } initrack(); for (;;) { if (flags.move) { /* actual time passed */ settrack(); if (moves % 2 == 0 || (!(Fast & ~INTRINSIC) && (!Fast || rn2(3)))) { movemon(); if (!rn2(70)) (void) makemon((struct permonst *) 0, 0, 0); } if (Glib) glibr(); timeout(); ++moves; if (flags.time) flags.botl = 1; if (u.uhp < 1) { pline("You die..."); done("died"); } if (u.uhp * 10 < u.uhpmax && moves - wailmsg > 50) { wailmsg = moves; if (u.uhp == 1) pline("You hear the wailing of the Banshee..."); else pline("You hear the howling of the CwnAnnwn..."); } if (u.uhp < u.uhpmax) { if (u.ulevel > 9) { if (Regeneration || !(moves % 3)) { flags.botl = 1; u.uhp += rnd((int) u.ulevel - 9); if (u.uhp > u.uhpmax) u.uhp = u.uhpmax; } } else if (Regeneration || (!(moves % (22 - u.ulevel * 2)))) { flags.botl = 1; u.uhp++; } } if (Teleportation && !rn2(85)) tele(); if (Searching && multi >= 0) (void) dosearch(); gethungry(); invault(); amulet(); } if (multi < 0) { if (!++multi) { if (nomovemsg) pline("%s", nomovemsg); else pline("You can move again."); nomovemsg = 0; if (afternmv) (*afternmv) (); afternmv = 0; } } find_ac(); #ifndef QUEST if (!flags.mv || Blind) #endif { seeobjs(); seemons(); nscr(); } if (flags.botl || flags.botlx) bot(); flags.move = 1; if (multi >= 0 && occupation) { if (monster_nearby()) stop_occupation(); else if ((*occupation) () == 0) occupation = 0; continue; } if (multi > 0) { #ifdef QUEST if (flags.run >= 4) finddir(); #endif lookaround(); if (!multi) { /* lookaround may clear multi */ flags.move = 0; continue; } if (flags.mv) { if (multi < COLNO && !--multi) flags.mv = flags.run = 0; domove(); } else { --multi; rhack(save_cm); } } else if (multi == 0) { #ifdef MAIL ckmailstatus(); #endif rhack(NULL); } if (multi && multi % 7 == 0) (void) fflush(stdout); } } void glo(int foo) { /* construct the string xlock.n */ size_t pos; pos = 0; while (lock[pos] && lock[pos] != '.') pos++; (void) snprintf(lock + pos, sizeof(lock) - pos, ".%d", foo); } /* * plname is filled either by an option (-u Player or -uPlayer) or * explicitly (-w implies wizard) or by askname. * It may still contain a suffix denoting pl_character. */ void askname(void) { int c, ct; printf("\nWho are you? "); (void) fflush(stdout); ct = 0; while ((c = getchar()) != '\n') { if (c == EOF) error("End of input\n"); /* some people get confused when their erase char is not ^H */ if (c == '\010') { if (ct) ct--; continue; } if (c != '-') if (c < 'A' || (c > 'Z' && c < 'a') || c > 'z') c = '_'; if (ct < (int)sizeof(plname) - 1) plname[ct++] = c; } plname[ct] = 0; if (ct == 0) askname(); } /* VARARGS1 */ void impossible(const char *s, ...) { va_list ap; va_start(ap, s); vpline(s, ap); va_end(ap); pline("Program in disorder - perhaps you'd better Quit."); } #ifdef CHDIR static void chdirx(const char *dir, boolean wr) { #ifdef SECURE if (dir /* User specified directory? */ #ifdef HACKDIR && strcmp(dir, HACKDIR) /* and not the default? */ #endif ) { (void) setuid(getuid()); /* <NAME> */ (void) setgid(getgid()); } #endif #ifdef HACKDIR if (dir == NULL) dir = HACKDIR; #endif if (dir && chdir(dir) < 0) { perror(dir); error("Cannot chdir to %s.", dir); } /* warn the player if he cannot write the record file */ /* perhaps we should also test whether . is writable */ /* unfortunately the access systemcall is worthless */ if (wr) { int fd; if (dir == NULL) dir = "."; if ((fd = open(RECORD, O_RDWR)) < 0) { printf("Warning: cannot write %s/%s", dir, RECORD); getret(); } else (void) close(fd); } } #endif void stop_occupation(void) { if (occupation) { pline("You stop %s.", occtxt); occupation = 0; } }
6,014
1,133
#ifndef CasterRound_h #define CasterRound_h #ifndef MESSAGE #define MESSAGE cout << "file " << __FILE__ << " line " << __LINE__ << endl; #endif #ifndef ERR_MESSAGE #define ERR_MESSAGE cout << "Error in file " << __FILE__ << " at line " << __LINE__ << " Exiting" << endl; exit(1); #endif #include <stdint.h> #include "DataCasterT.h" #include <math.h> using namespace std; template<typename F, typename T> class CasterRound : public DataCasterT<F,T> { public: CasterRound() { this->DataSizeIn = sizeof(F); this->DataSizeOut = sizeof(T); } virtual ~CasterRound() {} void convert(char * in, char * out, int numEl) { for (int i = 0, j = 0, k = 0; i < numEl; ++i, j += this->DataSizeIn, k += this->DataSizeOut) { F * tmp = (F *) &in[j]; (*(T *) &out[k]) = (T) round((double)(*tmp)); } } }; #endif //CasterRound_h
426
971
/*-------------------------------------------------------------------- * Symbols referenced in this file: * - function_parse_error_transpose *-------------------------------------------------------------------- */ /*------------------------------------------------------------------------- * * pg_proc.c * routines to support manipulation of the pg_proc relation * * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * * IDENTIFICATION * src/backend/catalog/pg_proc.c * *------------------------------------------------------------------------- */ #include "postgres.h" #include "access/htup_details.h" #include "access/xact.h" #include "catalog/dependency.h" #include "catalog/indexing.h" #include "catalog/objectaccess.h" #include "catalog/pg_language.h" #include "catalog/pg_namespace.h" #include "catalog/pg_proc.h" #include "catalog/pg_proc_fn.h" #include "catalog/pg_transform.h" #include "catalog/pg_type.h" #include "commands/defrem.h" #include "executor/functions.h" #include "funcapi.h" #include "mb/pg_wchar.h" #include "miscadmin.h" #include "nodes/nodeFuncs.h" #include "parser/parse_type.h" #include "tcop/pquery.h" #include "tcop/tcopprot.h" #include "utils/acl.h" #include "utils/builtins.h" #include "utils/lsyscache.h" #include "utils/rel.h" #include "utils/syscache.h" Datum fmgr_internal_validator(PG_FUNCTION_ARGS); Datum fmgr_c_validator(PG_FUNCTION_ARGS); Datum fmgr_sql_validator(PG_FUNCTION_ARGS); typedef struct { char *proname; char *prosrc; } parse_error_callback_arg; //static void sql_function_parse_error_callback(void *arg); //static int match_prosrc_to_query(const char *prosrc, const char *queryText, // int cursorpos); //static bool match_prosrc_to_literal(const char *prosrc, const char *literal, // int cursorpos, int *newcursorpos); /* ---------------------------------------------------------------- * ProcedureCreate * * Note: allParameterTypes, parameterModes, parameterNames, trftypes, and proconfig * are either arrays of the proper types or NULL. We declare them Datum, * not "ArrayType *", to avoid importing array.h into pg_proc_fn.h. * ---------------------------------------------------------------- */ /* * Validator for internal functions * * Check that the given internal function name (the "prosrc" value) is * a known builtin function. */ /* * Validator for C language functions * * Make sure that the library file exists, is loadable, and contains * the specified link symbol. Also check for a valid function * information record. */ /* * Validator for SQL language functions * * Parse it here in order to be sure that it contains no syntax errors. */ /* * Error context callback for handling errors in SQL function definitions */ /* * Adjust a syntax error occurring inside the function body of a CREATE * FUNCTION or DO command. This can be used by any function validator or * anonymous-block handler, not only for SQL-language functions. * It is assumed that the syntax error position is initially relative to the * function body string (as passed in). If possible, we adjust the position * to reference the original command text; if we can't manage that, we set * up an "internal query" syntax error instead. * * Returns true if a syntax error was processed, false if not. */ bool function_parse_error_transpose(const char *prosrc) { return false; } /* * Try to locate the string literal containing the function body in the * given text of the CREATE FUNCTION or DO command. If successful, return * the character (not byte) index within the command corresponding to the * given character index within the literal. If not successful, return 0. */ /* * Try to match the given source text to a single-quoted literal. * If successful, adjust newcursorpos to correspond to the character * (not byte) index corresponding to cursorpos in the source text. * * At entry, literal points just past a ' character. We must check for the * trailing quote. */
1,180
3,055
<reponame>Linghhh/u8g2<gh_stars>1000+ /* Fontname: -FreeType-Bubble-Medium-R-Normal--24-240-72-72-P-183-ISO10646-1 Copyright: Copyright Omegaville 2018 Glyphs: 71/71 BBX Build Mode: 0 */ const uint8_t u8g2_font_bubble_tr[3168] U8G2_FONT_SECTION("u8g2_font_bubble_tr") = "G\0\3\2\5\5\1\4\6\34\30\0\372\22\372\22\372\2\246\7\236\14C \5\0D\24!\23\247." "\64\203\222\372\277&\203N\32\224\324\232\14\12\0*\36\255m\267\363R\231\222\236\24\307\244m\320\206\35" "\34\266AK\32\25\247\244\247\245\62\257\0,\20'%\64\203\222Z\23-\252EY\22j\0.\13\247" "D\64\203\222Z\223A\1/(T\306\372\204A'\244\71\32\347`\234\243u\264\216\326\321:ZG\353" "h\35\255\243u\264\216\306\71\30\347h\235\60\350\4\60\60T\306\232\303\263\216j\71\251NJr`\310" "\1\71\214\325\70U\343T\215S\65N\325\70U\343T\16c\35\30r \311IuR\246\243\362\360\10" "\61 H\306tC\26&\251\234\244I\232\244I\232\244I\232\244I\232\244I\232\244I\232\244\321\240\0" "\62\60T\306Z\303\307\234\22\345\264$\247%\361 'i\30G\203\30\347`\234\203q\16\306\71\30\347" "`\234\203q\216\311\303)\247\351D\235\230\14\277\0\63\66T\306z\303\203\232\23\262\234\224\344\264$\36" "\326$\215\303h\320\206\60\307r \7s \7s$\7\207\64\32t U\343T\36f\235\230\344\244" ",'\244\303\203\6\64\62SF\372\330\60\344`\16\344P\216\344H\16\345@\216\305i\253\32\246I\232" "\245Q\32\245Y\232\304\203\272\323t\232NK\206\203\252\203i\216\326\341A\2\65\60SF:\303\7%" "\247\351\64\235&\17\7%\315\321x\270\345\24%\247%\303k\16\207\71\34\16C\16\204\352\60\244:-" "\311IuB\70<H\0\66\60T\306z\303\307\234\22\345\244$\247%\361\360\222\346p<\34\262\234T" "\247%\361\60\253q\252\306\251\32\247\362\60\353\304$'e\71!\35\36\64\0\67,T\306:\303/\71" "Q'\352\304dx\316\301\70\7\343\34\214s\60\316\321:Z\207\323\34\255\303i\216\326\341\64\207\323\234" "\60\350\20\0\70\61T\306z\303\203\232\23\262\234T'U\207\65\12\343\60+gi\66lq\216\326\11" "Y:\254I\32\247j\234\312\303\254\23\223\234\224\345\204tx\320\0\71.T\306z\303\203\232\23\262\234" "\224\344Dy\230\325\70U\343T\215Sy\230ub\222\323\242\341\65\207\323\341\253NKrZ\224\23\304" "\341A\4:\22\307E\64\203\222Z\223A\347\64(\251\65\31\24\0;\25'.\64\203\222Z\223A\347" "\64(\251\65\321\242Z\22j\0\77\63\262\256Y\303\203\226\23\222\234\244\223\344!V\303\64\31\324:\226" "\346X\232ci\216\245\71\226\346X\232\203i\216\16:\267AG\353`\232\203i\216\16:\0A\61T" "\306\272\303!GtL\315\11YN\252\16k\222\306\251\32\247j\234\252q*\17\263N\324\211:Q\36" "f\65N\325\70M\302\70\314\206\34\31\42\0B/T\306:\303\207,'\325iIN\224\207Y\215S" "\65N\345aMrR\235\222\305\303\30\245q\230\244q*\17\263N\324iIN\321\206\217\0C\61T" "\306\272\303!GtL\315\11YN\252\16\263\32\247j\234\252\71pHs\70\315\341\64\207\323x\30\324" "\70\225\207\71\311iIN\312tT\36\36\1D\64T\306:\303\203\232\23\264\234T\247%\361\260&i" "\234\252\71\20\252\71\20\252\71\20\252\71\20\252\71\20\252\71\20\252q*\17\263NKrZ\222S\264\341" "#\0E-R\306\71\303\207$'\351$\235$\17\207$\315\301\64\7\343a\315\321\60G\303\34\15\343" "a\255\203\361pHr\222N\322I\311\360!\1F-R\306\71\303\207$'\351$\235$\17\207$\315" "\301\64\7\323\34\214\207\65G\303\34\15s\64\214\207\265\16\246\71\230\346`\232\243\203\216\2G\61T\306" "\232\303C\252\303YN\252\323\222t\230\325x\30\324\34Ns\70\215\207%-\253e\265\254\306\251<\314" "INKrZ\244ca\70\34\222!\1H\60T\306:\203\216\14J\32\247j\234\252q\252\306\251\32" "\247j\234\252q*\17\263N\324\211:Q\36f\65N\325\70U\343T\215\323d\320\221A\1I\15G" "F\64\203\222\372\377k\62(\0J.P\306\370\330\240ci\16\245\71\224\346P\232Ci\16\245\71\224" "\346P\232Ci\62h\251\32\245j\224\312r\222\243Q\216f:\42\17\203\10K\66SF:\203\16\34" "\322\252\32\246I\232\245Q\32\245Y\232\244a\252\326\301\70\307r \307r \7\343\34\255\312a\232\304" "Y\32\305Q\232\305I\32\306\311 \17C\0L.QF\71\203\16\246\71\226\346X\232ci\216\245\71" "\226\346X\232ci\216\245\71\226\346X\232ci\216\245\303!\311):E\247$\303\27\0MAX\306" "<CN\30\222\60GC\65\307R\71Gb\35\210s@G\302\34\321\241:\244c:\246\263\250:\242" "\252I\234\244j\24F\251\232EY\252\206b\252\346X\252\346X\252\346X\232\14::(\0N\70T" "\306:C\16\15J\230\3\251\32\247r\232\352@\230\352H\226\352PU\307\222T\7U;\250&\71\246" "F\71\244f\71\242\206\71\240\226\325\70Us L\6\35\32\22\0O\62T\306\272\303!GtL\315" "\11YN*\17q\22\207\261\32\247j\234\252q\252\306\251\32\247j\234\312a\234\304C\34\345\244,'" "\244:\246#\303!\5P,SF:\303\203\230\23\244\234\224\344\244$\36d\265\252V\325\252Z\225\7" "Y'%\71)\311\11R<\14b\232\243u\264\16\17:\14Q\65\264\256\232\303\263\216j\71\251NJ" "\342aV\343T\215S\65N\325\70U\343T\215S\65N\325P\226\207\34\251\223\352\244LG\345a\324" "\341\60'\204\71e\210\1R\61T\306:\303\207,'\325iIN\224\207Y\215S\65N\325\70\225\207" "\65\311Iu\202\26W+q\230Fq\226fq\224\206q\222\226\223A\7\16\1S\61T\306Z\303\207" ",'%\71Q'\312\303\254\306\303\22\353h\16\354\330\16\14\71\264#:\246#\71\70\17\203\234\312\303" "\254\23ub\222\223\262\341C\4T(SF:\303\7%\247\351\64\235\226\14:\60\350@\232\243u\264" "\216\326\321:ZG\353h\35\255\243u\264\216\326\341A\7U\60T\306:\203\216\14J\32\247j\234\252" "q\252\306\251\32\247j\234\252q\252\306\251\32\247j\234\252q\252\306\251<\314IN\252\223\62\35\225\207" "G\0V\63T\306:\203\216\14J\32\247j\234\252q\252\306\251\32\247j\234\252q\252\306\251\32\247r" "\30'q\24g\261\234\346h\16\344X\16\345H\16\332\11C\216\0W@\134\306>\203<\304\203\222\206" "\255j\330\252\206\255j\330\252\206\255j\330\252\206\255j\330\252\206\255j\330*Gq\24'\261\216\310Y" "\216\351X\232#Q\216\344@\34\306\71\24\306a\16\16\71\62\304\0X\70T\306:\203\216\14J\32\247" "r\30'q\24gi\224V\325\34\310\261\34\312\221\34\214s\64\316\301\34\311\241\34\313\201TmJ\263" "\70\212\223\70\214\325\70M\6\35\31\24\0Y\63T\306:\203\216\14J\32\247j\234\252q*\207\261\34" "\306I\34\305Y,\247\71\232\3\71\226C\71\222\203q\216\306\71\32\347h\234\243q\216\306\71<\354\0" "Z\65T\306:\303/\71Q'\352\304d\70\344@\216\345@\216\345@\216\345@\216\345@\216\345@\216" "\345@\216\345@\216\345@\216\345\300C\222\23u\242NL\206_\0a,\364\305z\303A\31\262\34\254" "\344\64\235\250\3C\16\310a\254\306\251\32\247j\234\312a\254\3C\16\350\304$\247E\71X\33\16\312" " b\61T\306:\203NHs\70\315\341t\70h\71\251NKr\242<\350\200ZV\343T\215S\65" "N\325\262<\350\200NKrR\61\307\302!\31\16!\0c+\364\305z\303\203\232\23\262\234\224\344D" "\35\30d\71M\325\34\30\224\64\207\323\34\256\16K\16\14\262NLrR\226\23\322\341A\3d\62T" "\306\372\204A'\244\71\234f\303A\215rR\222\323t\242\16\14\262\234\246j\234\252q\252\306\251\234\246" ":\60\310:\61\311iQ\16\326\206\203\62$\0e'\364\305z\303\203\232\23\262\234\224\344Dy\230\325" "\70\225\207Y'\312\303K\232\303\361\360\222\23ub\222\223\262\341C\4f+N\306\267\303\240\346H\226" "CY\16e\361\230\246:\260\344\250\216\352h\242\3c\232\3i\16\244\71\220\346@\232\3i\216\14*" "\0g\67\264\226z\303A\31\262\34\254\344\64\235\250\3\203,\247\251\32\247j\234\252q*\247\251\16\14" "\262NLrZ\224\223\262\341\20\347p\232\14\317:Q\247E\71%\34\276\1h\65T\306:\203NH" "s\70\315\341\64\31\6\61\325\21)\247%\71-\311\201!\7\344\60V\343T\215S\65N\325\70U\343" "T\215S\65N\325\70M\6\35\31\24\0i\22GF\64\203\222&\203\16\15J\352\377\232\14\12\0j" "\70\16\227\367\310\240#i\216\14:\323\240#i\16\244\71\220\346@\232\3i\16\244\71\220\346@\232\3" "i\16\244\71\220\346@\232\14Q*&\251\32\353h\222cu$\35\6\15k\66SF:\203<\14j" "\30\253a\254fq\222fi\224Fi\226&i\230\252u\60\316\261\34\310\301\70U[\322\60\215\322," "\215\342(\315\342$\315r \31\324a\20l\15GF\64\203\222\372\377k\62(\0m\67\374\305\36C" "\70\210\303\20f\211\232\350\200\24\346H\16&\71\253\316\252\3:\242\3r\24G\261\32\266\252a\253\32" "\266\252a\253\32\266\252a\253\32\266&\203<\304\203\2n.\364\305:\333p\314\222\34\222\302\34Mr" "Z\222\3C\16\310a\254\306\251\32\247j\234\252q\252\306\251\32\247j\234\252q\232\14:\62(\0o" "*\364\305\232\303\263\216j\71\251NJr`\310\1\71\214\325\70U\343T\215S\71\214u`\310\201$" "'\325I\231\216\312\303#\0p\70\264\226:C\62\34\302:&\345\264$\247%\361\240\3jY\215S" "\65N\325\70U\313\362\240\3:-\311iIN\221\322\341\20\246\71\234\346p\232\303i\16\247\71a\320" "\11q:\264\226\232\303!\31\62\35\13\223\234\226\344\64\35\30d\71M\325\70U\343T\215S\71Mu" "`\220ub\222\323\242\234\224\15\7\65\207\323\34Ns\70\315\341\64\207\323\234\60(\0r)\362\305\71" "\203\62\14Z\232#QN\251\223t`\226\263TM\7%\315\301\64\7\323\34Ls\60\315\301\64\7\323" "\34\35t\24s+\364\305Z\303\207,'%\71Q'\312\303\220\252\71pI\207\35\322\241\35\31\6Q" "'\244\311\360\16\350D\235\230\344\244l\370\20\1t'MF\227\203\16\244q\32\247\241\16(\71\250\203" ":\230\350\200\230\306i\234\306i\34\213\71\222\345H\230\3\351\60$\0u-\364\305:\203\216\14J\32" "\247j\234\252q\252\306\251\32\247j\234\252q\252\306\251\34\306:\60\344\200NLrZ\244ca\70\34" "\222!\1v-\364\305:\203\216\14J\32\247j\234\252q\252\306\251\32\247j\234&i\230fi\224V" "\325\34\310\261\34\312\221\34\214s\70\314)C\216\0w\67\374\305>\203<\304\203\222\206\255j\330\252\206" "\255j\330\252\206\255j\330\252\206\255j\330\232\244Q\34\245Y\252#j\35\31r$\7\342\60\316\241\60" "\16sp\310\221!\6x\60\364\305:\203\216\14J\32\247r\30'q\24g\261\234\346h\16\344X\16" "\345H\16\345X\16\344hY\316\342(N\342\60V\343\64\31tdP\0y;\264\226:\203\216\14J" "\32\247j\234\252q\252\306\251\32\247j\234\252q\252\306\251\34\306:\60\344@\222\323\222\234\26\351\224p" "\70\244\71\234\16\203\234\312\303\32\345\224\60\207\343\341!\4z&\364\305:\303/\71Q'\352\264h\70" "\353X\234\203q\16\306\71\30\347`\234c\362p\312i:Q'\16\77(\0\0\0\0\4\377\377\0";
5,563
1,152
<reponame>jamill/VFSForGit #pragma once extern "C" { NATIVE_TESTS_EXPORT bool ProjFS_MoveFolder_NoneToNone(const char* virtualRootPath); NATIVE_TESTS_EXPORT bool ProjFS_MoveFolder_VirtualToNone(const char* virtualRootPath); NATIVE_TESTS_EXPORT bool ProjFS_MoveFolder_PartialToNone(const char* virtualRootPath); NATIVE_TESTS_EXPORT bool ProjFS_MoveFolder_VirtualToVirtual(const char* virtualRootPath); NATIVE_TESTS_EXPORT bool ProjFS_MoveFolder_VirtualToPartial(const char* virtualRootPath); NATIVE_TESTS_EXPORT bool ProjFS_MoveFolder_OutsideToNone(const char* pathOutsideRepo, const char* virtualRootPath); NATIVE_TESTS_EXPORT bool ProjFS_MoveFolder_OutsideToVirtual(const char* pathOutsideRepo, const char* virtualRootPath); NATIVE_TESTS_EXPORT bool ProjFS_MoveFolder_NoneToOutside(const char* pathOutsideRepo, const char* virtualRootPath); NATIVE_TESTS_EXPORT bool ProjFS_MoveFolder_VirtualToOutside(const char* pathOutsideRepo, const char* virtualRootPath); NATIVE_TESTS_EXPORT bool ProjFS_MoveFolder_OutsideToOutside(const char* pathOutsideRepo, const char* virtualRootPath); }
408
5,169
{ "name": "DYEntireRealTimeSDK", "version": "0.0.1", "summary": "本SDK封装了多厂商的音视频SDK和相应的即时通讯SDK", "description": "1. 简化和统一了各厂商服务注册和接口调用方式 2. 支持在各厂商SDK之间安全、高效地调度(目前支持声网、腾讯) 3. 多条通道并行发送自定义消息和消息去重 4. 封装了日志模块(厂商log,自定义log,DDLog),用于数据跟踪和后续分析 5. 解决多SDK引入和编译问题", "homepage": "https://github.com/jackleemeta/DYEntireRealTimeSDK.git", "license": "MIT", "platforms": { "ios": "8.0" }, "authors": { "<NAME>": "<EMAIL>" }, "requires_arc": true, "source": { "git": "https://github.com/jackleemeta/DYEntireRealTimeSDK.git", "tag": "0.0.1" }, "libraries": [ "c++", "resolv", "crypto" ], "default_subspecs": "Framework", "dependencies": { "CocoaLumberjack/Swift": [ "3.6.1" ], "Device": [ ], "CocoaMQTT": [ "1.1.3" ], "TXLiteAVSDK_TRTC": [ "7.2.8961" ], "TXIMSDK_iOS": [ "4.7.2" ], "AgoraRtcEngine_iOS": [ "2.3.4.108" ] }, "static_framework": true, "xcconfig": { "VALID_ARCHS": "armv7 arm64e armv7s arm64 x86_64", "ENABLE_BITCODE": "NO" }, "pod_target_xcconfig": { "ARCHS[sdk=iphonesimulator*]": "$(ARCHS_STANDARD_64_BIT)" }, "subspecs": [ { "name": "Framework", "vendored_frameworks": [ "DYRealTimeSDK/Framewoks/*.framework", "Carthage/Build/**/*.framework" ], "vendored_libraries": "DYRealTimeSDK/Libraries/*.a" }, { "name": "Source", "vendored_frameworks": "DYRealTimeSDK/Framewoks/*.framework", "source_files": "DYRealTimeSDK/Source/**/*.swift", "vendored_libraries": "DYRealTimeSDK/Libraries/*.a" } ] }
1,070
1,330
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.struts2.rest.config.entities; import java.util.Map; import java.util.Objects; import org.apache.struts2.config.entities.ConstantConfig; import org.apache.struts2.rest.RestConstants; public class RestConstantConfig extends ConstantConfig { private String restDefaultExtension; private Boolean restLogger; private String restDefaultErrorResultName; private Boolean restContentRestrictToGet; private String mapperIndexMethodName; private String mapperGetMethodName; private String mapperPostMethodName; private String mapperEditMethodName; private String mapperNewMethodName; private String mapperDeleteMethodName; private String mapperPutMethodName; private String mapperOptionsMethodName; private String mapperPostContinueMethodName; private String mapperPutContinueMethodName; private String restNamespace; private String restValidationFailureStatusCode; @Override public Map<String, String> getAllAsStringsMap() { Map<String, String> map = super.getAllAsStringsMap(); map.put(RestConstants.REST_DEFAULT_EXTENSION, restDefaultExtension); map.put(RestConstants.REST_LOGGER, Objects.toString(restLogger, null)); map.put(RestConstants.REST_DEFAULT_ERROR_RESULT_NAME, restDefaultErrorResultName); map.put(RestConstants.REST_CONTENT_RESTRICT_TO_GET, Objects.toString(restContentRestrictToGet, null)); map.put(RestConstants.REST_MAPPER_INDEX_METHOD_NAME, mapperIndexMethodName); map.put(RestConstants.REST_MAPPER_GET_METHOD_NAME, mapperGetMethodName); map.put(RestConstants.REST_MAPPER_POST_METHOD_NAME, mapperPostMethodName); map.put(RestConstants.REST_MAPPER_EDIT_METHOD_NAME, mapperEditMethodName); map.put(RestConstants.REST_MAPPER_NEW_METHOD_NAME, mapperNewMethodName); map.put(RestConstants.REST_MAPPER_DELETE_METHOD_NAME, mapperDeleteMethodName); map.put(RestConstants.REST_MAPPER_PUT_METHOD_NAME, mapperPutMethodName); map.put(RestConstants.REST_MAPPER_OPTIONS_METHOD_NAME, mapperOptionsMethodName); map.put(RestConstants.REST_MAPPER_POST_CONTINUE_METHOD_NAME, mapperPostContinueMethodName); map.put(RestConstants.REST_MAPPER_PUT_CONTINUE_METHOD_NAME, mapperPutContinueMethodName); map.put(RestConstants.STRUTS_REST_NAMESPACE, restNamespace); map.put(RestConstants.REST_VALIDATION_FAILURE_STATUS_CODE, restValidationFailureStatusCode); return map; } public String getRestDefaultExtension() { return restDefaultExtension; } public void setRestDefaultExtension(String restDefaultExtension) { this.restDefaultExtension = restDefaultExtension; } public Boolean getRestLogger() { return restLogger; } public void setRestLogger(Boolean restLogger) { this.restLogger = restLogger; } public String getRestDefaultErrorResultName() { return restDefaultErrorResultName; } public void setRestDefaultErrorResultName(String restDefaultErrorResultName) { this.restDefaultErrorResultName = restDefaultErrorResultName; } public Boolean getRestContentRestrictToGet() { return restContentRestrictToGet; } public void setRestContentRestrictToGet(Boolean restContentRestrictToGet) { this.restContentRestrictToGet = restContentRestrictToGet; } public String getMapperIndexMethodName() { return mapperIndexMethodName; } public void setMapperIndexMethodName(String mapperIndexMethodName) { this.mapperIndexMethodName = mapperIndexMethodName; } public String getMapperGetMethodName() { return mapperGetMethodName; } public void setMapperGetMethodName(String mapperGetMethodName) { this.mapperGetMethodName = mapperGetMethodName; } public String getMapperPostMethodName() { return mapperPostMethodName; } public void setMapperPostMethodName(String mapperPostMethodName) { this.mapperPostMethodName = mapperPostMethodName; } public String getMapperEditMethodName() { return mapperEditMethodName; } public void setMapperEditMethodName(String mapperEditMethodName) { this.mapperEditMethodName = mapperEditMethodName; } public String getMapperNewMethodName() { return mapperNewMethodName; } public void setMapperNewMethodName(String mapperNewMethodName) { this.mapperNewMethodName = mapperNewMethodName; } public String getMapperDeleteMethodName() { return mapperDeleteMethodName; } public void setMapperDeleteMethodName(String mapperDeleteMethodName) { this.mapperDeleteMethodName = mapperDeleteMethodName; } public String getMapperPutMethodName() { return mapperPutMethodName; } public void setMapperPutMethodName(String mapperPutMethodName) { this.mapperPutMethodName = mapperPutMethodName; } public String getMapperOptionsMethodName() { return mapperOptionsMethodName; } public void setMapperOptionsMethodName(String mapperOptionsMethodName) { this.mapperOptionsMethodName = mapperOptionsMethodName; } public String getMapperPostContinueMethodName() { return mapperPostContinueMethodName; } public void setMapperPostContinueMethodName(String mapperPostContinueMethodName) { this.mapperPostContinueMethodName = mapperPostContinueMethodName; } public String getMapperPutContinueMethodName() { return mapperPutContinueMethodName; } public void setMapperPutContinueMethodName(String mapperPutContinueMethodName) { this.mapperPutContinueMethodName = mapperPutContinueMethodName; } public String getRestNamespace() { return restNamespace; } public void setRestNamespace(String restNamespace) { this.restNamespace = restNamespace; } public String getRestValidationFailureStatusCode() { return restValidationFailureStatusCode; } public void setRestValidationFailureStatusCode(String restValidationFailureStatusCode) { this.restValidationFailureStatusCode = restValidationFailureStatusCode; } }
2,415
393
package com.marverenic.music.ui.library.playlist.contents.edit; import android.content.Context; import androidx.databinding.Bindable; import androidx.recyclerview.widget.RecyclerView; import com.marverenic.adapter.HeterogeneousAdapter; import com.marverenic.music.BR; import com.marverenic.music.data.store.MusicStore; import com.marverenic.music.data.store.PlaylistStore; import com.marverenic.music.model.AutoPlaylist; import com.marverenic.music.model.playlistrules.AutoPlaylistRule; import com.marverenic.music.ui.BaseViewModel; import com.marverenic.music.view.BackgroundDecoration; import com.marverenic.music.view.DividerDecoration; public class AutoPlaylistEditViewModel extends BaseViewModel { private PlaylistStore mPlaylistStore; private MusicStore mMusicStore; private AutoPlaylist mOriginalPlaylist; private AutoPlaylist.Builder mEditedPlaylist; private int mScrollPosition; private HeterogeneousAdapter mAdapter; public AutoPlaylistEditViewModel(Context context, AutoPlaylist originalPlaylist, AutoPlaylist.Builder editedPlaylist, PlaylistStore playlistStore, MusicStore musicStore) { super(context); mOriginalPlaylist = originalPlaylist; mEditedPlaylist = editedPlaylist; mPlaylistStore = playlistStore; mMusicStore = musicStore; createAdapter(); } private void createAdapter() { mAdapter = new HeterogeneousAdapter(); mAdapter.addSection(new RuleHeaderSingleton(mOriginalPlaylist, mEditedPlaylist, mPlaylistStore)); mAdapter.addSection(new RuleSection(mEditedPlaylist.getRules(), mMusicStore, mPlaylistStore)); } @Bindable public RecyclerView.Adapter getAdapter() { return mAdapter; } @Bindable public RecyclerView.ItemDecoration[] getItemDecorations() { return new RecyclerView.ItemDecoration[] { new BackgroundDecoration(), new DividerDecoration(getContext()) }; } @Bindable public int getScrollPosition() { return mScrollPosition; } public void setScrollPosition(int scrollY) { if (scrollY != mScrollPosition) { mScrollPosition = scrollY; notifyPropertyChanged(BR.scrollPosition); } } public void addRule() { mEditedPlaylist.getRules().add(AutoPlaylistRule.emptyRule()); mAdapter.notifyItemInserted(mEditedPlaylist.getRules().size()); } public void focusPlaylistName() { setScrollPosition(0); } }
993
8,232
// Copyright (c) Microsoft Corporation. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception #include <algorithm> #include <cassert> #include <concepts> #include <ranges> #include <utility> #include <range_algorithm_support.hpp> using namespace std; // Validate that copy_result aliases in_out_result STATIC_ASSERT(same_as<ranges::copy_result<int, double>, ranges::in_out_result<int, double>>); // Validate dangling story STATIC_ASSERT( same_as<decltype(ranges::copy(borrowed<false>{}, nullptr_to<int>)), ranges::copy_result<ranges::dangling, int*>>); STATIC_ASSERT(same_as<decltype(ranges::copy(borrowed<true>{}, nullptr_to<int>)), ranges::copy_result<int*, int*>>); struct instantiator { static constexpr int input[3] = {13, 42, 1729}; template <ranges::input_range Read, indirectly_writable<ranges::range_reference_t<Read>> Write> static constexpr void call() { using ranges::copy, ranges::copy_result, ranges::iterator_t; { // Validate iterator + sentinel overload int output[3] = {-1, -1, -1}; Read wrapped_input{input}; auto result = copy(wrapped_input.begin(), wrapped_input.end(), Write{output}); STATIC_ASSERT(same_as<decltype(result), copy_result<iterator_t<Read>, Write>>); assert(result.in == wrapped_input.end()); assert(result.out.peek() == output + 3); assert(ranges::equal(output, input)); } { // Validate range overload int output[3] = {-1, -1, -1}; Read wrapped_input{input}; auto result = copy(wrapped_input, Write{output}); STATIC_ASSERT(same_as<decltype(result), copy_result<iterator_t<Read>, Write>>); assert(result.in == wrapped_input.end()); assert(result.out.peek() == output + 3); assert(ranges::equal(output, input)); } } }; int main() { STATIC_ASSERT((test_in_write<instantiator, int const, int>(), true)); test_in_write<instantiator, int const, int>(); }
890
2,890
package com.github.ltsopensource.json.bean; import java.lang.reflect.Method; /** * @author <NAME> (<EMAIL>) on 12/31/15. */ public class MethodInfo { private String fieldName; private Method method; public MethodInfo(String fieldName, Method method) { this.fieldName = fieldName; this.method = method; } public String getFieldName() { return fieldName; } public void setFieldName(String fieldName) { this.fieldName = fieldName; } public Method getMethod() { return method; } public void setMethod(Method method) { this.method = method; } }
247
326
<reponame>LaudateCorpus1/math // Boost.Geometry (aka GGL, Generic Geometry Library) // Copyright (c) 2007-2014 <NAME>, Amsterdam, the Netherlands. // Copyright (c) 2008-2014 <NAME>, Paris, France. // Copyright (c) 2009-2014 <NAME>, London, UK. // This file was modified by Oracle on 2014-2021. // Modifications copyright (c) 2014-2021, Oracle and/or its affiliates. // Contributed and/or modified by <NAME>, on behalf of Oracle // Contributed and/or modified by <NAME>, on behalf of Oracle // Parts of Boost.Geometry are redesigned from Geodan's Geographic Library // (geolib/GGL), copyright (c) 1995-2010 Geodan, Amsterdam, the Netherlands. // Use, modification and distribution is subject to the Boost Software License, // Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) #ifndef BOOST_GEOMETRY_STRATEGIES_DEFAULT_LENGTH_RESULT_HPP #define BOOST_GEOMETRY_STRATEGIES_DEFAULT_LENGTH_RESULT_HPP #include <boost/geometry/algorithms/detail/select_geometry_type.hpp> #include <boost/geometry/core/coordinate_type.hpp> #include <boost/geometry/util/select_most_precise.hpp> #include <boost/geometry/util/type_traits.hpp> namespace boost { namespace geometry { namespace resolve_strategy { // NOTE: The implementation was simplified greately preserving the old // behavior. In general case the result types of Strategies should be // taken into account. // It would probably be enough to use distance_result and // default_distance_result here. } // namespace resolve_strategy namespace resolve_dynamic { template <typename Sequence> struct default_length_result_impl; template <typename ...Geometries> struct default_length_result_impl<util::type_sequence<Geometries...>> { using type = typename select_most_precise < typename coordinate_type<Geometries>::type..., long double >::type; }; template <typename Geometry> struct default_length_result : default_length_result_impl<typename detail::geometry_types<Geometry>::type> {}; } // namespace resolve_dynamic /*! \brief Meta-function defining return type of length function \ingroup length \note Length of a line of integer coordinates can be double. So we take at least a double. If Big Number types are used, we take that type. */ template <typename Geometry> struct default_length_result : resolve_dynamic::default_length_result<Geometry> {}; }} // namespace boost::geometry #endif // BOOST_GEOMETRY_STRATEGIES_DEFAULT_LENGTH_RESULT_HPP
842
677
<gh_stars>100-1000 // Copyright 2017 The Lynx Authors. All rights reserved. #include <base/log/logging.h> #include "render/image_view.h" #include "render/impl/render_object_impl.h" #include "render/render_tree_host.h" namespace lynx { const static char* kImageSrcAttribute = "src"; #if OS_ANDROID const std::string kAssets = "Asset://assets/"; #elif OS_IOS const std::string kAssets = "Assets://assets.bundle/assets/"; #endif ImageView::ImageView(const char *tag_name, uint64_t id, RenderTreeHost *host) : RenderObject(tag_name, LYNX_IMAGEVIEW, id, RenderObjectImpl::Create(host->thread_manager(), LYNX_IMAGEVIEW), host) { } base::Size ImageView::OnMeasure(int width_descriptor, int height_descriptor) { if (!CSS_IS_UNDEFINED(css_style_.height())) { measured_size_.height_ = css_style_.height(); } if (!CSS_IS_UNDEFINED(css_style_.width())) { measured_size_.width_ = css_style_.width(); } return measured_size_; } void ImageView::SetAttribute(const std::string &key, const std::string &value) { if(key.compare(kImageSrcAttribute) == 0) { if (value.find("http:") != std::string::npos || value.find("https:") != std::string::npos) { RenderObject::SetAttribute(key, value); } else { RenderObject::SetAttribute(key, kAssets + value); } } else { RenderObject::SetAttribute(key, value); } } } // namespace lynx
717
348
{"nom":"Manso","circ":"2ème circonscription","dpt":"Haute-Corse","inscrits":139,"abs":40,"votants":99,"blancs":1,"nuls":0,"exp":98,"res":[{"nuance":"REG","nom":"<NAME>","voix":87},{"nuance":"REM","nom":"<NAME>","voix":11}]}
91
2,728
/* * Phusion Passenger - https://www.phusionpassenger.com/ * Copyright (c) 2013-2017 Phusion Holding B.V. * * "Passenger", "Phusion Passenger" and "Union Station" are registered * trademarks of Phusion Holding B.V. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #include <Exceptions.h> #include <stdlib.h> #include <string.h> void pp_error_init(PP_Error *error) { error->message = NULL; error->errnoCode = PP_NO_ERRNO; error->messageIsStatic = 0; } void pp_error_destroy(PP_Error *error) { if (!error->messageIsStatic) { free(static_cast<void *>(const_cast<char *>(error->message))); error->message = NULL; error->messageIsStatic = 0; } } void pp_error_set(const std::exception &ex, PP_Error *error) { const Passenger::SystemException *sys_e; if (error == NULL) { return; } if (error->message != NULL && !error->messageIsStatic) { free(static_cast<void *>(const_cast<char *>(error->message))); } error->message = strdup(ex.what()); error->messageIsStatic = error->message == NULL; if (error->message == NULL) { error->message = "Unknown error message (unable to allocate memory for the message)"; } sys_e = dynamic_cast<const Passenger::SystemException *>(&ex); if (sys_e != NULL) { error->errnoCode = sys_e->code(); } else { error->errnoCode = PP_NO_ERRNO; } }
752
743
package pl.allegro.tech.hermes.tracker.elasticsearch; import org.elasticsearch.client.AdminClient; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.transport.client.PreBuiltTransportClient; import org.junit.rules.ExternalResource; import pl.allegro.tech.embeddedelasticsearch.EmbeddedElastic; import pl.allegro.tech.hermes.test.helper.util.Ports; import java.net.InetAddress; import java.nio.file.Files; import java.util.concurrent.TimeUnit; import static pl.allegro.tech.embeddedelasticsearch.PopularProperties.CLUSTER_NAME; import static pl.allegro.tech.embeddedelasticsearch.PopularProperties.HTTP_PORT; import static pl.allegro.tech.embeddedelasticsearch.PopularProperties.TRANSPORT_TCP_PORT; public class ElasticsearchResource extends ExternalResource implements LogSchemaAware { private static final String ELASTIC_VERSION = "6.1.4"; private static final String CLUSTER_NAME_VALUE = "myTestCluster"; private final EmbeddedElastic embeddedElastic; private Client client; public ElasticsearchResource() { int port = Ports.nextAvailable(); int httpPort = Ports.nextAvailable(); try { embeddedElastic = EmbeddedElastic.builder() .withElasticVersion(ELASTIC_VERSION) .withSetting(TRANSPORT_TCP_PORT, port) .withSetting(HTTP_PORT, httpPort) .withSetting(CLUSTER_NAME, CLUSTER_NAME_VALUE) .withEsJavaOpts("-Xms128m -Xmx512m") .withStartTimeout(1, TimeUnit.MINUTES) .withCleanInstallationDirectoryOnStop(true) .withInstallationDirectory(Files.createTempDirectory("elasticsearch-installation-" + port).toFile()) .build(); } catch (Exception e) { throw new RuntimeException("Unchecked exception", e); } } @Override public void before() throws Throwable { embeddedElastic.start(); client = new PreBuiltTransportClient(Settings.builder().put(CLUSTER_NAME, CLUSTER_NAME_VALUE).build()) .addTransportAddress( new TransportAddress(InetAddress.getByName("localhost"), embeddedElastic.getTransportTcpPort())); } @Override public void after() { embeddedElastic.stop(); client.close(); } public Client client() { return client; } public AdminClient adminClient() { return client.admin(); } public ImmutableOpenMap<String, IndexMetaData> getIndices() { return client.admin().cluster().prepareState().execute().actionGet().getState().getMetaData().getIndices(); } public void cleanStructures() { embeddedElastic.deleteIndices(); embeddedElastic.deleteTemplates(); } }
1,184
3,227
<reponame>ffteja/cgal // Copyright (c) 1997 INRIA Sophia-Antipolis (France). // All rights reserved. // // This file is part of CGAL (www.cgal.org). // // $URL$ // $Id$ // SPDX-License-Identifier: GPL-3.0-or-later OR LicenseRef-Commercial // // // Author(s) : <NAME> #ifndef CGAL_TRIANGULATION_ON_SPHERE_VERTEX_BASE_2_H #define CGAL_TRIANGULATION_ON_SPHERE_VERTEX_BASE_2_H #include <CGAL/license/Triangulation_on_sphere_2.h> #include <CGAL/Triangulation_ds_vertex_base_2.h> namespace CGAL { template <typename GT, typename Vb = Triangulation_ds_vertex_base_2<> > class Triangulation_on_sphere_vertex_base_2 : public Vb { typedef typename Vb::Triangulation_data_structure Tds; public: typedef GT Geom_traits; typedef typename GT::Point_on_sphere_2 Point; typedef Tds Triangulation_data_structure; typedef typename Tds::Face_handle Face_handle; typedef typename Tds::Vertex_handle Vertex_handle; template < typename TDS2 > struct Rebind_TDS { typedef typename Vb::template Rebind_TDS<TDS2>::Other Vb2; typedef Triangulation_on_sphere_vertex_base_2<GT, Vb2> Other; }; private: Point _p; public: Triangulation_on_sphere_vertex_base_2 () : Vb(), _p() { } Triangulation_on_sphere_vertex_base_2(const Point & p) : Vb(), _p(p) { } Triangulation_on_sphere_vertex_base_2(const Point & p, Face_handle f) : Vb(f), _p(p) { } Triangulation_on_sphere_vertex_base_2(Face_handle f) : Vb(f) { } void set_point(const Point & p) { _p = p; } const Point& point() const { return _p; } // the non-const version of point() is undocument but needed to make the point iterator works // using Lutz projection scheme Point& point() { return _p; } // the following trivial is_valid to allow the user of derived face base classes // to add their own purpose checking bool is_valid(bool /* verbose */ = false, int /* level */ = 0) const {return true;} }; template < class GT, class Vb > std::istream& operator>>(std::istream &is, Triangulation_on_sphere_vertex_base_2<GT, Vb> &v) { // non combinatorial information. Default = point return is >> static_cast<Vb&>(v) >> v.point(); } template < class GT, class Vb > std::ostream& operator<<(std::ostream &os, const Triangulation_on_sphere_vertex_base_2<GT, Vb> &v) { // non combinatorial information. Default = point return os << static_cast<const Vb&>(v) << v.point(); } } // namespace CGAL #endif //CGAL_TRIANGULATION_ON_SPHERE_VERTEX_BASE_2_H
1,198
1,085
<filename>lib/galaxy/model/migrate/versions/0027_request_events.py """ This migration script adds the request_event table and removes the state field in the request table """ import datetime import logging from sqlalchemy import ( Column, DateTime, ForeignKey, Integer, MetaData, Table, TEXT ) from galaxy.model.custom_types import TrimmedString from galaxy.model.migrate.versions.util import ( create_table, drop_column, localtimestamp, nextval ) log = logging.getLogger(__name__) now = datetime.datetime.utcnow metadata = MetaData() RequestEvent_table = Table('request_event', metadata, Column("id", Integer, primary_key=True), Column("create_time", DateTime, default=now), Column("update_time", DateTime, default=now, onupdate=now), Column("request_id", Integer, ForeignKey("request.id"), index=True), Column("state", TrimmedString(255), index=True), Column("comment", TEXT)) def upgrade(migrate_engine): print(__doc__) metadata.bind = migrate_engine metadata.reflect() create_table(RequestEvent_table) # move the current state of all existing requests to the request_event table cmd = \ "INSERT INTO request_event " + \ "SELECT %s AS id," + \ "%s AS create_time," + \ "%s AS update_time," + \ "request.id AS request_id," + \ "request.state AS state," + \ "'%s' AS comment " + \ "FROM request;" cmd = cmd % (nextval(migrate_engine, 'request_event'), localtimestamp(migrate_engine), localtimestamp(migrate_engine), 'Imported from request table') migrate_engine.execute(cmd) drop_column('state', 'request', metadata) def downgrade(migrate_engine): pass
628
10,225
<reponame>mweber03/quarkus package io.quarkus.amazon.lambda.http; import io.quarkus.amazon.lambda.http.model.AwsProxyRequest; import io.quarkus.security.identity.request.BaseAuthenticationRequest; /** * This will execute if and only if there is no identity after invoking a LambdaAuthenticationRequest */ final public class DefaultLambdaAuthenticationRequest extends BaseAuthenticationRequest { private AwsProxyRequest event; public DefaultLambdaAuthenticationRequest(AwsProxyRequest event) { this.event = event; } public AwsProxyRequest getEvent() { return event; } }
194
2,151
// Copyright 2018 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "chrome/browser/ui/app_list/arc/arc_usb_host_permission_manager_factory.h" #include "chrome/browser/profiles/incognito_helpers.h" #include "chrome/browser/ui/app_list/arc/arc_app_list_prefs_factory.h" #include "chrome/browser/ui/app_list/arc/arc_usb_host_permission_manager.h" #include "components/arc/usb/usb_host_bridge.h" #include "components/keyed_service/content/browser_context_dependency_manager.h" #include "content/public/browser/browser_context.h" namespace arc { // static ArcUsbHostPermissionManager* ArcUsbHostPermissionManagerFactory::GetForBrowserContext( content::BrowserContext* context) { return static_cast<ArcUsbHostPermissionManager*>( GetInstance()->GetServiceForBrowserContext(context, true)); } // static ArcUsbHostPermissionManagerFactory* ArcUsbHostPermissionManagerFactory::GetInstance() { return base::Singleton<ArcUsbHostPermissionManagerFactory>::get(); } ArcUsbHostPermissionManagerFactory::ArcUsbHostPermissionManagerFactory() : BrowserContextKeyedServiceFactory( "ArcUsbHostPermissionManager", BrowserContextDependencyManager::GetInstance()) { DependsOn(ArcAppListPrefsFactory::GetInstance()); DependsOn(ArcUsbHostBridge::GetFactory()); } ArcUsbHostPermissionManagerFactory::~ArcUsbHostPermissionManagerFactory() {} KeyedService* ArcUsbHostPermissionManagerFactory::BuildServiceInstanceFor( content::BrowserContext* context) const { return ArcUsbHostPermissionManager::Create(context); } content::BrowserContext* ArcUsbHostPermissionManagerFactory::GetBrowserContextToUse( content::BrowserContext* context) const { // This matches the logic in ExtensionSyncServiceFactory, which uses the // orginal browser context. return chrome::GetBrowserContextRedirectedInIncognito(context); } } // namespace arc
611
35,083
// // ======================================================================== // Copyright (c) 1995-2018 Mort Bay Consulting Pty. Ltd. // ------------------------------------------------------------------------ // All rights reserved. This program and the accompanying materials // are made available under the terms of the Eclipse Public License v1.0 // and Apache License v2.0 which accompanies this distribution. // // The Eclipse Public License is available at // http://www.eclipse.org/legal/epl-v10.html // // The Apache License v2.0 is available at // http://www.opensource.org/licenses/apache2.0.php // // You may elect to redistribute this code under either of these licenses. // ======================================================================== // package org.eclipse.jetty.server.session; import javax.servlet.ServletException; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import java.io.IOException; import org.eclipse.jetty.server.Request; import org.eclipse.jetty.server.SessionManager; import org.eclipse.jetty.server.handler.ScopedHandler; /** * Adapted from https://github.com/eclipse/jetty.project/blob/jetty-9.3.25.v20180904/ * jetty-server/src/main/java/org/eclipse/jetty/server/session/SessionHandler.java */ public class SessionHandler extends ScopedHandler { private SessionManager _sessionManager; public SessionHandler() { } /** * @param manager * The session manager */ public SessionHandler(SessionManager manager) { setSessionManager(manager); } /** * @return Returns the sessionManager. */ public SessionManager getSessionManager() { return _sessionManager; } /** * @param sessionManager * The sessionManager to set. */ public void setSessionManager(SessionManager sessionManager) { if (isStarted()) { throw new IllegalStateException(); } if (sessionManager != null) { updateBean(_sessionManager,sessionManager); _sessionManager=sessionManager; } } /* * @see org.eclipse.jetty.server.Handler#handle(javax.servlet.http.HttpServletRequest, * javax.servlet.http.HttpServletResponse, int) */ @Override public void doHandle(String target, Request baseRequest, HttpServletRequest request, HttpServletResponse response) throws IOException, ServletException { // start manual inline of nextHandle(target,baseRequest,request,response); if (_nextScope != null && _nextScope == _handler) { _nextScope.doHandle(target,baseRequest,request,response); } else if (_handler != null) { _handler.handle(target,baseRequest,request,response); // end manual inline } } public void clearEventListeners() { if (_sessionManager != null) { _sessionManager.clearEventListeners(); } } }
898
3,062
package com.mapswithme.maps.editor.data; import android.os.Parcel; import android.os.Parcelable; import androidx.annotation.NonNull; public class FeatureCategory implements Parcelable { @NonNull private final String mType; @NonNull private final String mLocalizedTypeName; public FeatureCategory(@NonNull String type, @NonNull String localizedTypeName) { mType = type; mLocalizedTypeName = localizedTypeName; } private FeatureCategory(Parcel source) { mType = source.readString(); mLocalizedTypeName = source.readString(); } @NonNull public String getType() { return mType; } @NonNull public String getLocalizedTypeName() { return mLocalizedTypeName; } @Override public int describeContents() { return 0; } @Override public void writeToParcel(Parcel dest, int flags) { dest.writeString(mType); dest.writeString(mLocalizedTypeName); } public static final Creator<FeatureCategory> CREATOR = new Creator<FeatureCategory>() { @Override public FeatureCategory createFromParcel(Parcel source) { return new FeatureCategory(source); } @Override public FeatureCategory[] newArray(int size) { return new FeatureCategory[size]; } }; }
430
381
<reponame>xiaotuoapi/yyblog package net.laoyeye.yyblog.service.impl; import java.util.List; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.scheduling.annotation.Async; import org.springframework.stereotype.Service; import com.github.pagehelper.PageHelper; import com.github.pagehelper.PageInfo; import net.laoyeye.yyblog.common.DataGridResult; import net.laoyeye.yyblog.mapper.LogMapper; import net.laoyeye.yyblog.model.LogDO; import net.laoyeye.yyblog.model.query.LogQuery; import net.laoyeye.yyblog.service.LogService; @Service public class LogServiceImpl implements LogService { @Autowired LogMapper logMapper; @Async @Override public void save(LogDO log) { logMapper.save(log); } @Override public DataGridResult list(LogQuery query) { PageHelper.startPage(query.getPage(), query.getLimit()); List<LogDO> list = logMapper.listByUsernameAndOperation(query.getUsername(),query.getOperation()); //取记录总条数 PageInfo<LogDO> pageInfo = new PageInfo<LogDO>(list); long total = pageInfo.getTotal(); //创建一个返回值对象 DataGridResult result = new DataGridResult(); result.setData(list); result.setCount(total); return result; } @Override public int remove(Long id) { return logMapper.remove(id); } @Override public int removeBatch(Long[] ids) { return logMapper.removeBatch(ids); } }
666
691
/*============================================================================= Copyright (c) 2011-2019 <NAME> https://github.com/bolero-MURAKAMI/Sprout Distributed under the Boost Software License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) =============================================================================*/ #ifndef SPROUT_TYPE_TRAITS_IS_CHAR_TYPE_HPP #define SPROUT_TYPE_TRAITS_IS_CHAR_TYPE_HPP #include <sprout/config.hpp> #include <sprout/type_traits/integral_constant.hpp> namespace sprout { // // is_char_type // template<typename T> struct is_char_type : public sprout::false_type {}; template<typename T> struct is_char_type<T const> : public sprout::is_char_type<T> {}; template<typename T> struct is_char_type<T volatile> : public sprout::is_char_type<T> {}; template<typename T> struct is_char_type<T const volatile> : public sprout::is_char_type<T> {}; template<> struct is_char_type<char> : public sprout::true_type {}; template<> struct is_char_type<wchar_t> : public sprout::true_type {}; #if SPROUT_USE_UNICODE_LITERALS template<> struct is_char_type<char16_t> : public sprout::true_type {}; template<> struct is_char_type<char32_t> : public sprout::true_type {}; #endif #if SPROUT_USE_VARIABLE_TEMPLATES template<typename T> SPROUT_STATIC_CONSTEXPR bool is_char_type_v = sprout::is_char_type<T>::value; #endif // #if SPROUT_USE_VARIABLE_TEMPLATES } // namespace sprout #endif // #ifndef SPROUT_TYPE_TRAITS_IS_CHAR_TYPE_HPP
673
1,428
<filename>C++/implement_kadane.cpp using namespace std; int main() { int t; cin>>t; while(t--) { int n; cin>>n; int arr[n]; for(int i = 0; i < n; i++) { cin>>arr[i]; } int max = INT_MIN; int max2 = 0; for(int i = 0; i < n; i++) { max2 = max2 + arr[i]; if(max < max2) { max = max2; } if(max2 < 0) { max2 = 0; } } cout<<max<<"\n"; } return 0; }
323
2,757
// // /*++ Copyright (c) 1999 - 2014, Intel Corporation. All rights reserved This program and the accompanying materials are licensed and made available under the terms and conditions of the BSD License that accompanies this distribution. The full text of the license may be found at http://opensource.org/licenses/bsd-license.php. THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. Module Name: SmbusPolicy.h Abstract: Smbus Policy PPI as defined in EFI 2.0 --*/ #ifndef _PEI_SMBUS_POLICY_PPI_H #define _PEI_SMBUS_POLICY_PPI_H #define PEI_SMBUS_POLICY_PPI_GUID \ { \ 0x63b6e435, 0x32bc, 0x49c6, 0x81, 0xbd, 0xb7, 0xa1, 0xa0, 0xfe, 0x1a, 0x6c \ } typedef struct _PEI_SMBUS_POLICY_PPI PEI_SMBUS_POLICY_PPI; struct _PEI_SMBUS_POLICY_PPI { UINTN BaseAddress; UINT32 PciAddress; UINT8 NumRsvdAddress; UINT8 *RsvdAddress; }; extern EFI_GUID gPeiSmbusPolicyPpiGuid; #endif
472
462
<gh_stars>100-1000 /********************************************************************** * * GEOS - Geometry Engine Open Source * http://geos.osgeo.org * * Copyright (C) 2011 <NAME> <<EMAIL>> * Copyright (C) 2006 Refractions Research Inc. * Copyright (C) 2001-2002 Vivid Solutions Inc. * * This is free software; you can redistribute and/or modify it under * the terms of the GNU Lesser General Public Licence as published * by the Free Software Foundation. * See the COPYING file for more information. * ********************************************************************** * * Last port: operation/linemerge/EdgeString.java r378 (JTS-1.12) * **********************************************************************/ #include <geos/operation/linemerge/EdgeString.h> #include <geos/operation/linemerge/LineMergeEdge.h> #include <geos/operation/linemerge/LineMergeDirectedEdge.h> #include <geos/geom/GeometryFactory.h> #include <geos/geom/CoordinateSequenceFactory.h> #include <geos/geom/CoordinateSequence.h> #include <geos/geom/LineString.h> #include <vector> #include <cassert> using namespace std; using namespace geos::geom; namespace geos { namespace operation { // geos.operation namespace linemerge { // geos.operation.linemerge /** * Constructs an EdgeString with the given factory used to convert * this EdgeString to a LineString */ EdgeString::EdgeString(const GeometryFactory *newFactory): factory(newFactory), directedEdges(), coordinates(NULL) { } EdgeString::~EdgeString() { } /** * Adds a directed edge which is known to form part of this line. */ void EdgeString::add(LineMergeDirectedEdge *directedEdge) { directedEdges.push_back(directedEdge); } CoordinateSequence * EdgeString::getCoordinates() { if (coordinates==NULL) { int forwardDirectedEdges = 0; int reverseDirectedEdges = 0; coordinates=factory->getCoordinateSequenceFactory()->create(NULL); for (std::size_t i=0, e=directedEdges.size(); i<e; ++i) { LineMergeDirectedEdge* directedEdge = directedEdges[i]; if (directedEdge->getEdgeDirection()) { forwardDirectedEdges++; } else { reverseDirectedEdges++; } assert(dynamic_cast<LineMergeEdge*>(directedEdge->getEdge())); LineMergeEdge* lme=static_cast<LineMergeEdge*>( directedEdge->getEdge()); coordinates->add(lme->getLine()->getCoordinatesRO(), false, directedEdge->getEdgeDirection()); } if (reverseDirectedEdges > forwardDirectedEdges) { CoordinateSequence::reverse(coordinates); } } return coordinates; } /* * Converts this EdgeString into a new LineString. */ LineString* EdgeString::toLineString() { return factory->createLineString(getCoordinates()); } } // namespace geos.operation.linemerge } // namespace geos.operation } // namespace geos
895
384
#ifndef OMPI_SKIP_MPICXX #define OMPI_SKIP_MPICXX #endif #ifndef MPICH_SKIP_MPICXX #define MPICH_SKIP_MPICXX #endif #include "FAST_Library.h" #include <sstream> #include <iostream> #include <string> #include <vector> #include <map> #include "mpi.h" #include "hdf5.h" #include "dlfcn.h" class scInitOutData { public: int nInpGlobal; int nCtrl2SC; int nSC2CtrlGlob; int nSC2Ctrl; std::vector<float> from_SCglob; std::vector<std::vector<float>> from_SC; }; class SuperController { public: // Data structures to interface with OpenFAST per turbine // Unfortunately have to be public std::vector<SC_DX_InputType_t> ip_from_FAST; // At time step 'n+1' std::vector<SC_DX_OutputType_t> op_to_FAST; // At time step 'n' private: MPI_Comm fastMPIComm; int nTurbinesGlob; int nTurbinesProc; std::map<int, int> turbineMapProcToGlob; int nCtrl2SC; int nSC2Ctrl; int nInpGlobal; int nSC2CtrlGlob; int nStatesGlobal; // Global states like time std::vector<float> globStates; std::vector<float> globStates_np1; int nStatesTurbine; // States for each turbine std::vector<float> turbineStates ; std::vector<float> turbineStates_np1 ; // Time 'n-1' std::vector<float> from_SC_nm1; // # outputs from the supercontroller for turbines std::vector<float> to_SC_nm1; // # inputs to the supercontroller from turbines std::vector<float> from_SCglob_nm1; // # outputs from the supercontroller for glob std::vector<float> to_SCglob_nm1; // # inputs to the supercontroller from glob // Time 'n' std::vector<float> from_SC_n; // # outputs from the supercontroller for turbines std::vector<float> to_SC_n; // # inputs to the supercontroller from turbines std::vector<float> from_SCglob_n; // # outputs from the supercontroller for glob std::vector<float> to_SCglob_n; // # inputs to the supercontroller from glob // Time 'n+1' std::vector<float> from_SC_np1; // # outputs from the supercontroller for turbines std::vector<float> to_SC_np1; // # inputs to the supercontroller from turbines std::vector<float> from_SCglob_np1; // # outputs from the supercontroller for glob std::vector<float> to_SCglob_np1; // # inputs to the supercontroller from glob int nParamGlobal; std::vector<float> paramGlobal; int nParamTurbine; std::vector<float> paramTurbine; int ErrStat; char ErrMsg[INTERFACE_STRING_LENGTH]; // make sure this is the same size as IntfStrLen in FAST_Library.f90 float d2R = 0.01745329251 ; //Degrees to Radians //Supercontroller stuff std::string scLibFile; // Dynamic load stuff copied from 'C++ dlopen mini HOWTO' on tldp.org void *scLibHandle ; typedef void sc_init_t(int * nTurbinesGlob, int * nInpGlobal, int * nCtrl2SC, int * nParamGlobal, int * nParamTurbine, int * nStatesGlobal, int * nStatesTurbine, int * nSC2CtrlGlob, int * nSC2Ctrl, int *ErrStat, char * ErrMsg); sc_init_t * sc_init; bool sc_library_loaded = false; typedef void sc_getInitData_t(int * nTurbinesGlob, int * nParamGlobal, int * nParamTurbine, float * paramGlobal, float * paramTurbine, int * nSC2CtrlGlob, float * from_SCglob, int * nSC2Ctrl, float * from_SC, int * nStatesGlobal, float * globStates, int * nStatesTurbine, float * turbineStates, int *ErrStat, char * ErrMsg); sc_getInitData_t * sc_getInitData; typedef void sc_updateStates_t(double * t, int * nTurbinesGlob, int * nParamGlobal, float * paramGlobal, int * nParamTurbine, float * paramTurbine, int * nInpGlobal, float * to_SCglob, int * nCtrl2SC, float * to_SC, int * nStatesGlobal, float * statesGlob_n, float * statesGlob_np1, int * nStatesTurbine, float * statesTurbine_n, float * statesTurbine_np1, int * ErrStat, char * ErrMsg); sc_updateStates_t * sc_updateStates; typedef void sc_calcOutputs_t(double * t, int * nTurbinesGlob, int * nParamGlobal, float * paramGlobal, int * nParamTurbine, float * paramTurbine, int * nInpGlobal, float * to_SCglob, int * nCtrl2SC, float * to_SC, int * nStatesGlobal, float * statesGlob, int * nStatesTurbine, float * statesTurbine, int * nSC2CtrlGlob, float * from_SCglob, int * nSC2Ctrl, float * from_SC, int * ErrStat, char * ErrMsg); sc_calcOutputs_t * sc_calcOutputs; public: SuperController(); ~SuperController() ; void init(scInitOutData & scio, int nTurbinesProc); void init_sc(scInitOutData & scio, int inNTurbinesProc, std::map<int, int> iTurbineMapProcToGlob, MPI_Comm inFastMPIComm); void load(int inNTurbinesGlob, std::string inScLibFile, scInitOutData & scio); void updateStates(double t) ; //Make a prediction for states at 'n+1' based on inputs and states at 'n' void calcOutputs_n(double t) ; void calcOutputs_np1(double t) ; void fastSCInputOutput() ; // Exchange input output information with OpenFAST turbines void advanceTime() ; //Advance states to time step 'n+1' int writeRestartFile(int n_t_global); int readRestartFile(int n_t_global); void end() {} ; };
1,914
486
<reponame>laurentedel/sqlline /* // Licensed to <NAME> under one or more contributor license // agreements. See the NOTICE file distributed with this work for // additional information regarding copyright ownership. // // <NAME> licenses this file to you under the Modified BSD License // (the "License"); you may not use this file except in compliance with // the License. You may obtain a copy of the License at: // // http://opensource.org/licenses/BSD-3-Clause */ package sqlline; import java.io.BufferedReader; import java.io.File; import java.io.FileInputStream; import java.io.IOException; import java.io.InputStreamReader; import java.nio.charset.StandardCharsets; import java.util.Objects; import java.util.Properties; import org.jline.reader.Expander; import org.jline.reader.History; /** * SQLLine expander class for live templates. */ public class SqlLineExpander implements Expander { private final SqlLine sqlLine; private Properties expandProperties = null; public SqlLineExpander(SqlLine sqlLine) { this.sqlLine = sqlLine; } @Override public String expandHistory(History history, String line) { return line; } @Override public String expandVar(String word) { if (expandProperties != null) { final String expandValue = (String) expandProperties.get(word); if (expandValue != null) { return expandValue; } } return word; } public void reset() { expandProperties = null; final String liveTemplatesFile = sqlLine.getOpts().getLiveTemplatesFile(); if (Objects.equals(liveTemplatesFile, BuiltInProperty.LIVE_TEMPLATES.defaultValue())) { return; } final File path = new File(liveTemplatesFile); if (!path.exists() || !path.isFile()) { sqlLine.error(sqlLine.loc("no-file", path.getAbsolutePath())); return; } try (BufferedReader reader = new BufferedReader( new InputStreamReader( new FileInputStream(path), StandardCharsets.UTF_8))) { expandProperties = new Properties(); expandProperties.load(reader); } catch (IOException e) { sqlLine.error(e); } } } // End SqlLineExpander.java
742
737
/* hex_dump.h -*- C++ -*- <NAME>, 6 October 2010 Copyright (c) 2010 <NAME>. All rights reserved. Copyright (c) 2010 Datacratic. All rights reserved. Routine to dump memory in hex format. */ #ifndef __utils__hex_dump_h__ #define __utils__hex_dump_h__ #include <stddef.h> namespace ML { /** Dump the given range of memory (up to a minimum of total_memory and max_size) as a hex/ascii dump to the screen. */ void hex_dump(const void * mem, size_t total_memory, size_t max_size = 1024); } // namespace ML #endif /* __utils__hex_dump_h__ */
259