max_stars_count
int64 301
224k
| text
stringlengths 6
1.05M
| token_count
int64 3
727k
|
---|---|---|
590 | <filename>uflo-core/src/main/java/com/bstek/uflo/command/impl/QueryCountCommand.java
/*******************************************************************************
* Copyright 2017 Bstek
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
******************************************************************************/
package com.bstek.uflo.command.impl;
import org.hibernate.Criteria;
import org.hibernate.criterion.Projections;
import com.bstek.uflo.command.Command;
import com.bstek.uflo.env.Context;
import com.bstek.uflo.query.QueryJob;
/**
* @author Jacky.gao
* @since 2013年8月14日
*/
public class QueryCountCommand implements Command<Integer> {
private QueryJob job;
public QueryCountCommand(QueryJob job){
this.job=job;
}
public Integer execute(Context context) {
Criteria criteria=job.getCriteria(context.getSession(),true);
Object obj=criteria.setProjection(Projections.rowCount()).uniqueResult();
if(obj==null)return 0;
if(obj instanceof Integer){
return (Integer)obj;
}else{
return ((Long)obj).intValue();
}
}
}
| 469 |
470 | // Fill out your copyright notice in the Description page of Project Settings.
#pragma once
#include "CoreMinimal.h"
class IPatchableInterface
{
// Add interface functions to this class. This is the class that will be inherited to implement this interface.
public:
virtual void ImportConfig()=0;
virtual void ImportProjectConfig()=0;
virtual void ExportConfig()const=0;
virtual void ResetConfig() = 0;
virtual void DoGenerate()=0;
virtual FString GetMissionName()=0;
};
| 154 |
558 | '''
* @file BubbleSort.py
* @author (original JAVA) <NAME>, <EMAIL>
* (conversion to Python) <NAME>, <EMAIL>
* @date 27 Jun 2020
* @version 0.1
* @brief Bubble sort implementation
'''
class BubbleSort():
def __init__(self):
pass
def sort(self, ar):
"""
Sort the array using bubble sort. The idea behind
bubble sort is to look for adjacent indexes which
are out of place and interchange their elements
until the entire array is sorted.
"""
if ar == None:
return
sorted = False
while True:
sorted = True
for i in range(1, len(ar)):
if ar[i] < ar[i - 1]:
self.swap(ar, i - 1, i)
sorted = False
if sorted:
break
def swap(self, ar, i, j):
tmp = ar[i]
ar[i] = ar[j]
ar[j] = tmp
if __name__ == '__main__':
"""
Example usage
"""
array = [10, 4, 6, 8, 13, 2, 3]
sorter = BubbleSort()
sorter.sort(array)
# Prints:
# [2, 3, 4, 6, 8, 10, 13]
print(array)
| 438 |
361 | <gh_stars>100-1000
{
"name": "Aborea Character Sheet AutoCalculations",
"script": "Aborea Character Sheet AutoCalculations.js",
"version": "1.0",
"previousversions": [],
"description": "Provides some automated calculations for values of attributes that cannot be calculated by the html sheet directly.",
"authors": "Scaby79",
"roll20userid": "603614",
"useroptions": [],
"dependencies": [],
"modifies": {},
"conflicts": []
}
| 165 |
348 | {"nom":"Cappelle-en-Pévèle","circ":"6ème circonscription","dpt":"Nord","inscrits":1744,"abs":902,"votants":842,"blancs":55,"nuls":35,"exp":752,"res":[{"nuance":"LR","nom":"<NAME>","voix":399},{"nuance":"REM","nom":"<NAME>","voix":353}]} | 99 |
3,589 | <reponame>sadernalwis/WickedEngine<gh_stars>1000+
#pragma once
#include "wiLua.h"
#include "wiLuna.h"
#include "LoadingScreen.h"
#include "RenderPath2D_BindLua.h"
class LoadingScreen_BindLua : public RenderPath2D_BindLua
{
private:
LoadingScreen loadingscreen;
public:
static const char className[];
static Luna<LoadingScreen_BindLua>::FunctionType methods[];
static Luna<LoadingScreen_BindLua>::PropertyType properties[];
LoadingScreen_BindLua(LoadingScreen* component)
{
this->component = component;
}
LoadingScreen_BindLua(lua_State* L)
{
this->component = &loadingscreen;
}
int AddLoadingTask(lua_State* L);
int OnFinished(lua_State* L);
static void Bind();
};
| 240 |
1,178 | /*
* Copyright 2020 Makani Technologies LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// System clock based on the ARM Performance Monitoring Unit (PMU) cycle
// counter, a 32-bit hardware counter that increments every CPU cycle. System
// time is stored as 64-bit software counters and maintained by accumulating the
// deltas between samples of the cycle counter. The cycle counter periodically
// overflows and must be sampled at least once every 26.84 seconds on the
// TMS570LS1227 to ensure correct timekeeping. Calling any of these functions
// samples the cycle counter.
#ifndef AVIONICS_FIRMWARE_CPU_CLOCK_H_
#define AVIONICS_FIRMWARE_CPU_CLOCK_H_
#include <stdbool.h>
#include <stdint.h>
#include "avionics/firmware/startup/clock_tms570_config.h"
// Performance Monitor Control Register (PMCR) divide configuration. We can
// specify a value of 1 or 64. A value of 1 requires all time comparisons
// to be within 2^31 / 160e6 = 13.422 seconds, assuming a 160 MHz clock
// frequency. A value of 64 requires all time comparisons to be within
// 2^31 * 64 / 160e6 = 858.99 seconds.
#define PMCR_CYCLE_DIV 64
#define CLOCK32_KHZ (CLOCK_GCLK_KHZ / PMCR_CYCLE_DIV)
#define CLOCK32_HZ (CLOCK32_KHZ * 1000)
#define CLOCK32_KHZ_F ((float)CLOCK32_KHZ)
#define CLOCK32_MHZ_F (CLOCK32_KHZ_F / 1000.0f)
// Helper macros for handling unsigned 32-bit timestamps.
#define CLOCK32_SUBTRACT(a, b) ((int32_t)((uint32_t)(a) - (uint32_t)(b)))
#define CLOCK32_EQ(a, b) (CLOCK32_SUBTRACT(a, b) == 0)
#define CLOCK32_NE(a, b) (CLOCK32_SUBTRACT(a, b) != 0)
#define CLOCK32_LE(a, b) (CLOCK32_SUBTRACT(a, b) <= 0)
#define CLOCK32_LT(a, b) (CLOCK32_SUBTRACT(a, b) < 0)
#define CLOCK32_GE(a, b) (CLOCK32_SUBTRACT(a, b) >= 0)
#define CLOCK32_GT(a, b) (CLOCK32_SUBTRACT(a, b) > 0)
// Helper macros to convert to/from cycles.
#define CLOCK32_USEC_TO_CYCLES(usec) (((usec) * CLOCK32_KHZ + 500) / 1000)
#define CLOCK32_MSEC_TO_CYCLES(msec) ((msec) * CLOCK32_KHZ)
#define CLOCK32_SEC_TO_CYCLES(msec) CLOCK32_MSEC_TO_CYCLES((msec) * 1000)
#define CLOCK32_CYCLES_TO_MSEC_F(cycles) ((cycles) / CLOCK32_KHZ_F)
#define CLOCK32_CYCLES_TO_USEC_F(cycles) ((cycles) / CLOCK32_MHZ_F)
// See TMS570LS1227 Table 6-8 "Available Clock Sources".
typedef enum {
kClockSourceOscIn = 0,
kClockSourcePll1 = 1,
kClockSourceExtClkIn = 3,
kClockSourceLfLpo = 4,
kClockSourceHfLpo = 5,
kClockSourcePll2 = 6,
kClockSourceExtClkIn2 = 7
} ClockSource;
// See TMS570LS1227 Section 6.6.2 "Clock Domains".
typedef enum {
kClockDomainHclk,
kClockDomainGclk,
kClockDomainVclk,
kClockDomainVclk2,
kClockDomainVclk3,
kClockDomainVclk4,
kClockDomainVclka1,
kClockDomainVclka2,
kClockDomainVclka3divr,
kClockDomainVclka4divr,
kClockDomainRticlk
} ClockDomain;
// Starts the PMU cycle counter. Called from startup code.
void StartupClockInit(void);
// Get clock frequencies.
int32_t ClockGetOscInFreq(void);
int32_t ClockGetPll1Freq(void);
int32_t ClockGetPll2Freq(void);
int32_t ClockGetRticlkFreq(void);
int32_t ClockGetCycleCounterFreq(void);
int32_t ClockSourceGetFreq(ClockSource source);
int32_t ClockDomainGetFreq(ClockDomain domain);
// Sample cycle counter.
static inline uint32_t Clock32GetCycles(void) {
// [ARM Cortex-R4 TRM 6.3.7]
uint32_t clock_count;
__asm__ __volatile__("mrc p15, 0, %0, c9, c13, 0" : "=r"(clock_count));
return clock_count;
}
// Returns the number of microseconds since ClockInit.
int64_t ClockGetUs(void);
// Waits for at least the given number of cycles.
void Clock32WaitCycles(int32_t cycles);
// Saturate latency measurement to 32-bit signed.
int32_t SaturateLatency(int64_t latency);
// Helper function for polling periodic tasks.
bool PollPeriodicCycles(int32_t period, uint32_t *next);
#endif // AVIONICS_FIRMWARE_CPU_CLOCK_H_
| 1,631 |
14,668 | /*
* Protocol Buffers - Google's data interchange format
* Copyright 2014 Google Inc. All rights reserved.
* https://developers.google.com/protocol-buffers/
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package com.google.protobuf.jruby;
import org.jruby.Ruby;
import org.jruby.RubyModule;
import org.jruby.anno.JRubyMethod;
import org.jruby.anno.JRubyModule;
import org.jruby.runtime.ThreadContext;
import org.jruby.runtime.builtin.IRubyObject;
@JRubyModule(name = "Protobuf")
public class RubyProtobuf {
public static void createProtobuf(Ruby runtime) {
RubyModule mGoogle = runtime.getModule("Google");
RubyModule mProtobuf = mGoogle.defineModuleUnder("Protobuf");
mProtobuf.defineAnnotatedMethods(RubyProtobuf.class);
}
/*
* call-seq:
* Google::Protobuf.deep_copy(obj) => copy_of_obj
*
* Performs a deep copy of either a RepeatedField instance or a message object,
* recursively copying its members.
*/
@JRubyMethod(name = "deep_copy", meta = true)
public static IRubyObject deepCopy(ThreadContext context, IRubyObject self, IRubyObject message) {
if (message instanceof RubyMessage) {
return ((RubyMessage) message).deepCopy(context);
} else if (message instanceof RubyRepeatedField) {
return ((RubyRepeatedField) message).deepCopy(context);
} else {
return ((RubyMap) message).deepCopy(context);
}
}
}
| 941 |
348 | <reponame>chamberone/Leaflet.PixiOverlay<gh_stars>100-1000
{"nom":"Cenans","circ":"2ème circonscription","dpt":"Haute-Saône","inscrits":106,"abs":48,"votants":58,"blancs":4,"nuls":1,"exp":53,"res":[{"nuance":"REM","nom":"<NAME>","voix":34},{"nuance":"FN","nom":"M. <NAME>","voix":19}]} | 121 |
575 | <filename>ui/events/ozone/evdev/input_controller_evdev.h
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef UI_EVENTS_OZONE_EVDEV_INPUT_CONTROLLER_EVDEV_H_
#define UI_EVENTS_OZONE_EVDEV_INPUT_CONTROLLER_EVDEV_H_
#include <string>
#include "base/component_export.h"
#include "base/macros.h"
#include "base/memory/weak_ptr.h"
#include "mojo/public/cpp/bindings/pending_receiver.h"
#include "ui/events/ozone/evdev/input_device_settings_evdev.h"
#include "ui/ozone/public/input_controller.h"
namespace ui {
class InputDeviceFactoryEvdevProxy;
class KeyboardEvdev;
class MouseButtonMapEvdev;
// Ozone InputController implementation for the Linux input subsystem ("evdev").
class COMPONENT_EXPORT(EVDEV) InputControllerEvdev : public InputController {
public:
InputControllerEvdev(KeyboardEvdev* keyboard,
MouseButtonMapEvdev* mouse_button_map,
MouseButtonMapEvdev* pointing_stick_button_map);
~InputControllerEvdev() override;
// Initialize device factory. This would be in the constructor if it was
// built early enough for that to be possible.
void SetInputDeviceFactory(
InputDeviceFactoryEvdevProxy* input_device_factory);
void set_has_mouse(bool has_mouse);
void set_has_pointing_stick(bool has_pointing_stick);
void set_has_touchpad(bool has_touchpad);
void SetInputDevicesEnabled(bool enabled);
// InputController:
bool HasMouse() override;
bool HasPointingStick() override;
bool HasTouchpad() override;
bool IsCapsLockEnabled() override;
void SetCapsLockEnabled(bool enabled) override;
void SetNumLockEnabled(bool enabled) override;
bool IsAutoRepeatEnabled() override;
void SetAutoRepeatEnabled(bool enabled) override;
void SetAutoRepeatRate(const base::TimeDelta& delay,
const base::TimeDelta& interval) override;
void GetAutoRepeatRate(base::TimeDelta* delay,
base::TimeDelta* interval) override;
void SetCurrentLayoutByName(const std::string& layout_name) override;
void SetTouchEventLoggingEnabled(bool enabled) override;
void SetTouchpadSensitivity(int value) override;
void SetTouchpadScrollSensitivity(int value) override;
void SetTapToClick(bool enabled) override;
void SetThreeFingerClick(bool enabled) override;
void SetTapDragging(bool enabled) override;
void SetNaturalScroll(bool enabled) override;
void SetMouseSensitivity(int value) override;
void SetMouseScrollSensitivity(int value) override;
void SetPrimaryButtonRight(bool right) override;
void SetMouseReverseScroll(bool enabled) override;
void SetMouseAcceleration(bool enabled) override;
void SuspendMouseAcceleration() override;
void EndMouseAccelerationSuspension() override;
void SetMouseScrollAcceleration(bool enabled) override;
void SetPointingStickSensitivity(int value) override;
void SetPointingStickPrimaryButtonRight(bool right) override;
void SetPointingStickAcceleration(bool enabled) override;
void SetTouchpadAcceleration(bool enabled) override;
void SetTouchpadScrollAcceleration(bool enabled) override;
void SetTapToClickPaused(bool state) override;
void GetTouchDeviceStatus(GetTouchDeviceStatusReply reply) override;
void GetTouchEventLog(const base::FilePath& out_dir,
GetTouchEventLogReply reply) override;
void SetInternalTouchpadEnabled(bool enabled) override;
bool IsInternalTouchpadEnabled() const override;
void SetTouchscreensEnabled(bool enabled) override;
void SetInternalKeyboardFilter(bool enable_filter,
std::vector<DomCode> allowed_keys) override;
void GetGesturePropertiesService(
mojo::PendingReceiver<ozone::mojom::GesturePropertiesService> receiver)
override;
void PlayVibrationEffect(int id,
uint8_t amplitude,
uint16_t duration_millis) override;
void StopVibration(int id) override;
private:
FRIEND_TEST_ALL_PREFIXES(InputControllerEvdevTest, AccelerationSuspension);
FRIEND_TEST_ALL_PREFIXES(InputControllerEvdevTest,
AccelerationChangeDuringSuspension);
struct StoredAccelerationSettings {
bool mouse = false;
bool pointing_stick = false;
};
// Post task to update settings.
void ScheduleUpdateDeviceSettings();
// Send settings update to input_device_factory_.
void UpdateDeviceSettings();
// Send caps lock update to input_device_factory_.
void UpdateCapsLockLed();
// Indicates whether the mouse acceleration is turned off for PointerLock.
bool is_mouse_acceleration_suspended() {
return stored_acceleration_settings_.get() != nullptr;
}
// Configuration that needs to be passed on to InputDeviceFactory.
InputDeviceSettingsEvdev input_device_settings_;
// Holds acceleration settings while suspended. Should only be considered
// valid while |mouse_acceleration_suspended| is true.
std::unique_ptr<StoredAccelerationSettings> stored_acceleration_settings_;
// Task to update config from input_device_settings_ is pending.
bool settings_update_pending_ = false;
// Factory for devices. Needed to update device config.
InputDeviceFactoryEvdevProxy* input_device_factory_ = nullptr;
// Keyboard state.
KeyboardEvdev* const keyboard_;
// Mouse button map.
MouseButtonMapEvdev* const mouse_button_map_;
// Pointing stick button map.
MouseButtonMapEvdev* const pointing_stick_button_map_;
// Device presence.
bool has_mouse_ = false;
bool has_pointing_stick_ = false;
bool has_touchpad_ = false;
// LED state.
bool caps_lock_led_state_ = false;
base::WeakPtrFactory<InputControllerEvdev> weak_ptr_factory_{this};
DISALLOW_COPY_AND_ASSIGN(InputControllerEvdev);
};
} // namespace ui
#endif // UI_EVENTS_OZONE_EVDEV_INPUT_CONTROLLER_EVDEV_H_
| 1,944 |
12,718 | /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
* Copyright (C) 2018 Canonical Ltd.
*
*/
#ifndef _LINUX_BINDERFS_H
#define _LINUX_BINDERFS_H
#include <linux/android/binder.h>
#include <linux/types.h>
#include <linux/ioctl.h>
#define BINDERFS_MAX_NAME 255
/**
* struct binderfs_device - retrieve information about a new binder device
* @name: the name to use for the new binderfs binder device
* @major: major number allocated for binderfs binder devices
* @minor: minor number allocated for the new binderfs binder device
*
*/
struct binderfs_device {
char name[BINDERFS_MAX_NAME + 1];
__u32 major;
__u32 minor;
};
/**
* Allocate a new binder device.
*/
#define BINDER_CTL_ADD _IOWR('b', 1, struct binderfs_device)
#endif /* _LINUX_BINDERFS_H */ | 293 |
1,425 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.tinkerpop.gremlin.structure;
import org.apache.tinkerpop.gremlin.process.traversal.Path;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.LinkedHashSet;
import java.util.Map;
import java.util.function.Function;
/**
* Column references a particular type of column in a complex data structure such as a {@code Map}, a
* {@code Map.Entry}, or a {@link Path}.
*
* @author <NAME> (http://markorodriguez.com)
*/
public enum Column implements Function<Object, Object> {
/**
* The keys associated with the data structure.
*/
keys {
@Override
public Object apply(final Object object) {
if (object instanceof Map)
return new LinkedHashSet<>(((Map<?,?>) object).keySet());
else if (object instanceof Map.Entry)
return ((Map.Entry) object).getKey();
else if (object instanceof Path)
return new ArrayList<>(((Path) object).labels());
else
throw new IllegalArgumentException("The provided object does not have accessible keys: " + object.getClass());
}
},
/**
* The values associated with the data structure.
*/
values {
@Override
public Object apply(final Object object) {
if (object instanceof Map)
return new ArrayList<>(((Map<?,?>) object).values());
else if (object instanceof Map.Entry)
return ((Map.Entry) object).getValue();
else if (object instanceof Path)
return new ArrayList<>(((Path) object).objects());
else
throw new IllegalArgumentException("The provided object does not have accessible keys: " + object.getClass());
}
}
}
| 914 |
765 | #ifndef B_H
#define B_H "$Header: /Users/acg/CVSROOT/systemc-2.3/src/sysc/qt/b.h,v 1.1.1.1 2006/12/15 20:20:06 acg Exp $"
#include "copyright.h"
extern void b_call_reg (int n);
extern void b_call_imm (int n);
extern void b_add (int n);
extern void b_load (int n);
#endif /* ndef B_H */
| 141 |
652 | import random
import warnings
from copy import deepcopy
from pandas.core.common import _values_from_object
import numpy as np
from pandas.compat import range, zip
#
# TODO:
# * Make sure legends work properly
#
warnings.warn("\n"
"The rplot trellis plotting interface is deprecated and will be "
"removed in a future version. We refer to external packages "
"like seaborn for similar but more refined functionality. \n\n"
"See our docs http://pandas.pydata.org/pandas-docs/stable/visualization.html#rplot "
"for some example how to convert your existing code to these "
"packages.", FutureWarning, stacklevel=2)
class Scale:
"""
Base class for mapping between graphical and data attributes.
"""
pass
class ScaleGradient(Scale):
"""
A mapping between a data attribute value and a
point in colour space between two specified colours.
"""
def __init__(self, column, colour1, colour2):
"""Initialize ScaleGradient instance.
Parameters:
-----------
column: string, pandas DataFrame column name
colour1: tuple, 3 element tuple with float values representing an RGB colour
colour2: tuple, 3 element tuple with float values representing an RGB colour
"""
self.column = column
self.colour1 = colour1
self.colour2 = colour2
self.categorical = False
def __call__(self, data, index):
"""Return a colour corresponding to data attribute value.
Parameters:
-----------
data: pandas DataFrame
index: pandas DataFrame row index
Returns:
--------
A three element tuple representing an RGB somewhere between colour1 and colour2
"""
x = data[self.column].iget(index)
a = min(data[self.column])
b = max(data[self.column])
r1, g1, b1 = self.colour1
r2, g2, b2 = self.colour2
x_scaled = (x - a) / (b - a)
return (r1 + (r2 - r1) * x_scaled,
g1 + (g2 - g1) * x_scaled,
b1 + (b2 - b1) * x_scaled)
class ScaleGradient2(Scale):
"""
Create a mapping between a data attribute value and a
point in colour space in a line of three specified colours.
"""
def __init__(self, column, colour1, colour2, colour3):
"""Initialize ScaleGradient2 instance.
Parameters:
-----------
column: string, pandas DataFrame column name
colour1: tuple, 3 element tuple with float values representing an RGB colour
colour2: tuple, 3 element tuple with float values representing an RGB colour
colour3: tuple, 3 element tuple with float values representing an RGB colour
"""
self.column = column
self.colour1 = colour1
self.colour2 = colour2
self.colour3 = colour3
self.categorical = False
def __call__(self, data, index):
"""Return a colour corresponding to data attribute value.
Parameters:
-----------
data: pandas DataFrame
index: pandas DataFrame row index
Returns:
--------
A three element tuple representing an RGB somewhere along the line
of colour1, colour2 and colour3
"""
x = data[self.column].iget(index)
a = min(data[self.column])
b = max(data[self.column])
r1, g1, b1 = self.colour1
r2, g2, b2 = self.colour2
r3, g3, b3 = self.colour3
x_scaled = (x - a) / (b - a)
if x_scaled < 0.5:
x_scaled *= 2.0
return (r1 + (r2 - r1) * x_scaled,
g1 + (g2 - g1) * x_scaled,
b1 + (b2 - b1) * x_scaled)
else:
x_scaled = (x_scaled - 0.5) * 2.0
return (r2 + (r3 - r2) * x_scaled,
g2 + (g3 - g2) * x_scaled,
b2 + (b3 - b2) * x_scaled)
class ScaleSize(Scale):
"""
Provide a mapping between a DataFrame column and matplotlib
scatter plot shape size.
"""
def __init__(self, column, min_size=5.0, max_size=100.0, transform=lambda x: x):
"""Initialize ScaleSize instance.
Parameters:
-----------
column: string, a column name
min_size: float, minimum point size
max_size: float, maximum point size
transform: a one argument function of form float -> float (e.g. lambda x: log(x))
"""
self.column = column
self.min_size = min_size
self.max_size = max_size
self.transform = transform
self.categorical = False
def __call__(self, data, index):
"""Return matplotlib scatter plot marker shape size.
Parameters:
-----------
data: pandas DataFrame
index: pandas DataFrame row index
"""
x = data[self.column].iget(index)
a = float(min(data[self.column]))
b = float(max(data[self.column]))
return self.transform(self.min_size + ((x - a) / (b - a)) *
(self.max_size - self.min_size))
class ScaleShape(Scale):
"""
Provides a mapping between matplotlib marker shapes
and attribute values.
"""
def __init__(self, column):
"""Initialize ScaleShape instance.
Parameters:
-----------
column: string, pandas DataFrame column name
"""
self.column = column
self.shapes = ['o', '+', 's', '*', '^', '<', '>', 'v', '|', 'x']
self.legends = set([])
self.categorical = True
def __call__(self, data, index):
"""Returns a matplotlib marker identifier.
Parameters:
-----------
data: pandas DataFrame
index: pandas DataFrame row index
Returns:
--------
a matplotlib marker identifier
"""
values = sorted(list(set(data[self.column])))
if len(values) > len(self.shapes):
raise ValueError("Too many different values of the categorical attribute for ScaleShape")
x = data[self.column].iget(index)
return self.shapes[values.index(x)]
class ScaleRandomColour(Scale):
"""
Maps a random colour to a DataFrame attribute.
"""
def __init__(self, column):
"""Initialize ScaleRandomColour instance.
Parameters:
-----------
column: string, pandas DataFrame column name
"""
self.column = column
self.categorical = True
def __call__(self, data, index):
"""Return a tuple of three floats, representing
an RGB colour.
Parameters:
-----------
data: pandas DataFrame
index: pandas DataFrame row index
"""
random.seed(data[self.column].iget(index))
return [random.random() for _ in range(3)]
class ScaleConstant(Scale):
"""
Constant returning scale. Usually used automatically.
"""
def __init__(self, value):
"""Initialize ScaleConstant instance.
Parameters:
-----------
value: any Python value to be returned when called
"""
self.value = value
self.categorical = False
def __call__(self, data, index):
"""Return the constant value.
Parameters:
-----------
data: pandas DataFrame
index: pandas DataFrame row index
Returns:
--------
A constant value specified during initialisation
"""
return self.value
def default_aes(x=None, y=None):
"""Create the default aesthetics dictionary.
Parameters:
-----------
x: string, DataFrame column name
y: string, DataFrame column name
Returns:
--------
a dictionary with aesthetics bindings
"""
return {
'x' : x,
'y' : y,
'size' : ScaleConstant(40.0),
'colour' : ScaleConstant('grey'),
'shape' : ScaleConstant('o'),
'alpha' : ScaleConstant(1.0),
}
def make_aes(x=None, y=None, size=None, colour=None, shape=None, alpha=None):
"""Create an empty aesthetics dictionary.
Parameters:
-----------
x: string, DataFrame column name
y: string, DataFrame column name
size: function, binding for size attribute of Geoms
colour: function, binding for colour attribute of Geoms
shape: function, binding for shape attribute of Geoms
alpha: function, binding for alpha attribute of Geoms
Returns:
--------
a dictionary with aesthetics bindings
"""
if not hasattr(size, '__call__') and size is not None:
size = ScaleConstant(size)
if not hasattr(colour, '__call__') and colour is not None:
colour = ScaleConstant(colour)
if not hasattr(shape, '__call__') and shape is not None:
shape = ScaleConstant(shape)
if not hasattr(alpha, '__call__') and alpha is not None:
alpha = ScaleConstant(alpha)
if any([isinstance(size, scale) for scale in [ScaleConstant, ScaleSize]]) or size is None:
pass
else:
raise ValueError('size mapping should be done through ScaleConstant or ScaleSize')
if any([isinstance(colour, scale) for scale in [ScaleConstant, ScaleGradient, ScaleGradient2, ScaleRandomColour]]) or colour is None:
pass
else:
raise ValueError('colour mapping should be done through ScaleConstant, ScaleRandomColour, ScaleGradient or ScaleGradient2')
if any([isinstance(shape, scale) for scale in [ScaleConstant, ScaleShape]]) or shape is None:
pass
else:
raise ValueError('shape mapping should be done through ScaleConstant or ScaleShape')
if any([isinstance(alpha, scale) for scale in [ScaleConstant]]) or alpha is None:
pass
else:
raise ValueError('alpha mapping should be done through ScaleConstant')
return {
'x' : x,
'y' : y,
'size' : size,
'colour' : colour,
'shape' : shape,
'alpha' : alpha,
}
class Layer:
"""
Layer object representing a single plot layer.
"""
def __init__(self, data=None, **kwds):
"""Initialize layer object.
Parameters:
-----------
data: pandas DataFrame instance
aes: aesthetics dictionary with bindings
"""
self.data = data
self.aes = make_aes(**kwds)
self.legend = {}
def work(self, fig=None, ax=None):
"""Do the drawing (usually) work.
Parameters:
-----------
fig: matplotlib figure
ax: matplotlib axis object
Returns:
--------
a tuple with the same figure and axis instances
"""
return fig, ax
class GeomPoint(Layer):
def work(self, fig=None, ax=None):
"""Render the layer on a matplotlib axis.
You can specify either a figure or an axis to draw on.
Parameters:
-----------
fig: matplotlib figure object
ax: matplotlib axis object to draw on
Returns:
--------
fig, ax: matplotlib figure and axis objects
"""
if ax is None:
if fig is None:
return fig, ax
else:
ax = fig.gca()
for index in range(len(self.data)):
row = self.data.iloc[index]
x = row[self.aes['x']]
y = row[self.aes['y']]
size_scaler = self.aes['size']
colour_scaler = self.aes['colour']
shape_scaler = self.aes['shape']
alpha = self.aes['alpha']
size_value = size_scaler(self.data, index)
colour_value = colour_scaler(self.data, index)
marker_value = shape_scaler(self.data, index)
alpha_value = alpha(self.data, index)
patch = ax.scatter(x, y,
s=size_value,
c=colour_value,
marker=marker_value,
alpha=alpha_value)
label = []
if colour_scaler.categorical:
label += [colour_scaler.column, row[colour_scaler.column]]
if shape_scaler.categorical:
label += [shape_scaler.column, row[shape_scaler.column]]
self.legend[tuple(label)] = patch
ax.set_xlabel(self.aes['x'])
ax.set_ylabel(self.aes['y'])
return fig, ax
class GeomPolyFit(Layer):
"""
Draw a polynomial fit of specified degree.
"""
def __init__(self, degree, lw=2.0, colour='grey'):
"""Initialize GeomPolyFit object.
Parameters:
-----------
degree: an integer, polynomial degree
lw: line width
colour: matplotlib colour
"""
self.degree = degree
self.lw = lw
self.colour = colour
Layer.__init__(self)
def work(self, fig=None, ax=None):
"""Draw the polynomial fit on matplotlib figure or axis
Parameters:
-----------
fig: matplotlib figure
ax: matplotlib axis
Returns:
--------
a tuple with figure and axis objects
"""
if ax is None:
if fig is None:
return fig, ax
else:
ax = fig.gca()
from numpy.polynomial.polynomial import polyfit
from numpy.polynomial.polynomial import polyval
x = self.data[self.aes['x']]
y = self.data[self.aes['y']]
min_x = min(x)
max_x = max(x)
c = polyfit(x, y, self.degree)
x_ = np.linspace(min_x, max_x, len(x))
y_ = polyval(x_, c)
ax.plot(x_, y_, lw=self.lw, c=self.colour)
return fig, ax
class GeomScatter(Layer):
"""
An efficient scatter plot, use this instead of GeomPoint for speed.
"""
def __init__(self, marker='o', colour='lightblue', alpha=1.0):
"""Initialize GeomScatter instance.
Parameters:
-----------
marker: matplotlib marker string
colour: matplotlib colour
alpha: matplotlib alpha
"""
self.marker = marker
self.colour = colour
self.alpha = alpha
Layer.__init__(self)
def work(self, fig=None, ax=None):
"""Draw a scatter plot on matplotlib figure or axis
Parameters:
-----------
fig: matplotlib figure
ax: matplotlib axis
Returns:
--------
a tuple with figure and axis objects
"""
if ax is None:
if fig is None:
return fig, ax
else:
ax = fig.gca()
x = self.data[self.aes['x']]
y = self.data[self.aes['y']]
ax.scatter(x, y, marker=self.marker, c=self.colour, alpha=self.alpha)
return fig, ax
class GeomHistogram(Layer):
"""
An efficient histogram, use this instead of GeomBar for speed.
"""
def __init__(self, bins=10, colour='lightblue'):
"""Initialize GeomHistogram instance.
Parameters:
-----------
bins: integer, number of histogram bins
colour: matplotlib colour
"""
self.bins = bins
self.colour = colour
Layer.__init__(self)
def work(self, fig=None, ax=None):
"""Draw a histogram on matplotlib figure or axis
Parameters:
-----------
fig: matplotlib figure
ax: matplotlib axis
Returns:
--------
a tuple with figure and axis objects
"""
if ax is None:
if fig is None:
return fig, ax
else:
ax = fig.gca()
x = self.data[self.aes['x']]
ax.hist(_values_from_object(x), self.bins, facecolor=self.colour)
ax.set_xlabel(self.aes['x'])
return fig, ax
class GeomDensity(Layer):
"""
A kernel density estimation plot.
"""
def work(self, fig=None, ax=None):
"""Draw a one dimensional kernel density plot.
You can specify either a figure or an axis to draw on.
Parameters:
-----------
fig: matplotlib figure object
ax: matplotlib axis object to draw on
Returns:
--------
fig, ax: matplotlib figure and axis objects
"""
if ax is None:
if fig is None:
return fig, ax
else:
ax = fig.gca()
from scipy.stats import gaussian_kde
x = self.data[self.aes['x']]
gkde = gaussian_kde(x)
ind = np.linspace(x.min(), x.max(), 200)
ax.plot(ind, gkde.evaluate(ind))
return fig, ax
class GeomDensity2D(Layer):
def work(self, fig=None, ax=None):
"""Draw a two dimensional kernel density plot.
You can specify either a figure or an axis to draw on.
Parameters:
-----------
fig: matplotlib figure object
ax: matplotlib axis object to draw on
Returns:
--------
fig, ax: matplotlib figure and axis objects
"""
if ax is None:
if fig is None:
return fig, ax
else:
ax = fig.gca()
x = self.data[self.aes['x']]
y = self.data[self.aes['y']]
rvs = np.array([x, y])
x_min = x.min()
x_max = x.max()
y_min = y.min()
y_max = y.max()
X, Y = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
positions = np.vstack([X.ravel(), Y.ravel()])
values = np.vstack([x, y])
import scipy.stats as stats
kernel = stats.gaussian_kde(values)
Z = np.reshape(kernel(positions).T, X.shape)
ax.contour(Z, extent=[x_min, x_max, y_min, y_max])
return fig, ax
class TrellisGrid(Layer):
def __init__(self, by):
"""Initialize TreelisGrid instance.
Parameters:
-----------
by: column names to group by
"""
if len(by) != 2:
raise ValueError("You must give a list of length 2 to group by")
elif by[0] == '.' and by[1] == '.':
raise ValueError("At least one of grouping attributes must be not a dot")
self.by = by
def trellis(self, layers):
"""Create a trellis structure for a list of layers.
Each layer will be cloned with different data in to a two dimensional grid.
Parameters:
-----------
layers: a list of Layer objects
Returns:
--------
trellised_layers: Clones of each layer in the list arranged in a trellised latice
"""
trellised_layers = []
for layer in layers:
data = layer.data
if self.by[0] == '.':
grouped = data.groupby(self.by[1])
elif self.by[1] == '.':
grouped = data.groupby(self.by[0])
else:
grouped = data.groupby(self.by)
groups = list(grouped.groups.keys())
if self.by[0] == '.' or self.by[1] == '.':
shingle1 = set([g for g in groups])
else:
shingle1 = set([g[0] for g in groups])
shingle2 = set([g[1] for g in groups])
if self.by[0] == '.':
self.rows = 1
self.cols = len(shingle1)
elif self.by[1] == '.':
self.rows = len(shingle1)
self.cols = 1
else:
self.rows = len(shingle1)
self.cols = len(shingle2)
trellised = [[None for _ in range(self.cols)] for _ in range(self.rows)]
self.group_grid = [[None for _ in range(self.cols)] for _ in range(self.rows)]
row = 0
col = 0
for group, data in grouped:
new_layer = deepcopy(layer)
new_layer.data = data
trellised[row][col] = new_layer
self.group_grid[row][col] = group
col += 1
if col >= self.cols:
col = 0
row += 1
trellised_layers.append(trellised)
return trellised_layers
def dictionary_union(dict1, dict2):
"""Take two dictionaries, return dictionary union.
Parameters:
-----------
dict1: Python dictionary
dict2: Python dictionary
Returns:
--------
A union of the dictionaries. It assumes that values
with the same keys are identical.
"""
keys1 = list(dict1.keys())
keys2 = list(dict2.keys())
result = {}
for key1 in keys1:
result[key1] = dict1[key1]
for key2 in keys2:
result[key2] = dict2[key2]
return result
def merge_aes(layer1, layer2):
"""Merges the aesthetics dictionaries for the two layers.
Look up sequence_layers function. Which layer is first and which
one is second is important.
Parameters:
-----------
layer1: Layer object
layer2: Layer object
"""
for key in layer2.aes.keys():
if layer2.aes[key] is None:
layer2.aes[key] = layer1.aes[key]
def sequence_layers(layers):
"""Go through the list of layers and fill in the missing bits of information.
The basic rules are this:
* If the current layer has data set to None, take the data from previous layer.
* For each aesthetic mapping, if that mapping is set to None, take it from previous layer.
Parameters:
-----------
layers: a list of Layer objects
"""
for layer1, layer2 in zip(layers[:-1], layers[1:]):
if layer2.data is None:
layer2.data = layer1.data
merge_aes(layer1, layer2)
return layers
def sequence_grids(layer_grids):
"""Go through the list of layer girds and perform the same thing as sequence_layers.
Parameters:
-----------
layer_grids: a list of two dimensional layer grids
"""
for grid1, grid2 in zip(layer_grids[:-1], layer_grids[1:]):
for row1, row2 in zip(grid1, grid2):
for layer1, layer2 in zip(row1, row2):
if layer2.data is None:
layer2.data = layer1.data
merge_aes(layer1, layer2)
return layer_grids
def work_grid(grid, fig):
"""Take a two dimensional grid, add subplots to a figure for each cell and do layer work.
Parameters:
-----------
grid: a two dimensional grid of layers
fig: matplotlib figure to draw on
Returns:
--------
axes: a two dimensional list of matplotlib axes
"""
nrows = len(grid)
ncols = len(grid[0])
axes = [[None for _ in range(ncols)] for _ in range(nrows)]
for row in range(nrows):
for col in range(ncols):
axes[row][col] = fig.add_subplot(nrows, ncols, ncols * row + col + 1)
grid[row][col].work(ax=axes[row][col])
return axes
def adjust_subplots(fig, axes, trellis, layers):
"""Adjust the subtplots on matplotlib figure with the
fact that we have a trellis plot in mind.
Parameters:
-----------
fig: matplotlib figure
axes: a two dimensional grid of matplotlib axes
trellis: TrellisGrid object
layers: last grid of layers in the plot
"""
# Flatten the axes grid
axes = [ax for row in axes for ax in row]
min_x = min([ax.get_xlim()[0] for ax in axes])
max_x = max([ax.get_xlim()[1] for ax in axes])
min_y = min([ax.get_ylim()[0] for ax in axes])
max_y = max([ax.get_ylim()[1] for ax in axes])
[ax.set_xlim(min_x, max_x) for ax in axes]
[ax.set_ylim(min_y, max_y) for ax in axes]
for index, axis in enumerate(axes):
if index % trellis.cols == 0:
pass
else:
axis.get_yaxis().set_ticks([])
axis.set_ylabel('')
if index / trellis.cols == trellis.rows - 1:
pass
else:
axis.get_xaxis().set_ticks([])
axis.set_xlabel('')
if trellis.by[0] == '.':
label1 = "%s = %s" % (trellis.by[1], trellis.group_grid[index // trellis.cols][index % trellis.cols])
label2 = None
elif trellis.by[1] == '.':
label1 = "%s = %s" % (trellis.by[0], trellis.group_grid[index // trellis.cols][index % trellis.cols])
label2 = None
else:
label1 = "%s = %s" % (trellis.by[0], trellis.group_grid[index // trellis.cols][index % trellis.cols][0])
label2 = "%s = %s" % (trellis.by[1], trellis.group_grid[index // trellis.cols][index % trellis.cols][1])
if label2 is not None:
axis.table(cellText=[[label1], [label2]],
loc='top', cellLoc='center',
cellColours=[['lightgrey'], ['lightgrey']])
else:
axis.table(cellText=[[label1]], loc='top', cellLoc='center', cellColours=[['lightgrey']])
# Flatten the layer grid
layers = [layer for row in layers for layer in row]
legend = {}
for layer in layers:
legend = dictionary_union(legend, layer.legend)
patches = []
labels = []
if len(list(legend.keys())) == 0:
key_function = lambda tup: tup
elif len(list(legend.keys())[0]) == 2:
key_function = lambda tup: (tup[1])
else:
key_function = lambda tup: (tup[1], tup[3])
for key in sorted(list(legend.keys()), key=key_function):
value = legend[key]
patches.append(value)
if len(key) == 2:
col, val = key
labels.append("%s" % str(val))
elif len(key) == 4:
col1, val1, col2, val2 = key
labels.append("%s, %s" % (str(val1), str(val2)))
else:
raise ValueError("Maximum 2 categorical attributes to display a lengend of")
if len(legend):
fig.legend(patches, labels, loc='upper right')
fig.subplots_adjust(wspace=0.05, hspace=0.2)
class RPlot:
"""
The main plot object. Add layers to an instance of this object to create a plot.
"""
def __init__(self, data, x=None, y=None):
"""Initialize RPlot instance.
Parameters:
-----------
data: pandas DataFrame instance
x: string, DataFrame column name
y: string, DataFrame column name
"""
self.layers = [Layer(data, **default_aes(x=x, y=y))]
trellised = False
def add(self, layer):
"""Add a layer to RPlot instance.
Parameters:
-----------
layer: Layer instance
"""
if not isinstance(layer, Layer):
raise TypeError("The operand on the right side of + must be a Layer instance")
self.layers.append(layer)
def render(self, fig=None):
"""Render all the layers on a matplotlib figure.
Parameters:
-----------
fig: matplotlib figure
"""
import matplotlib.pyplot as plt
if fig is None:
fig = plt.gcf()
# Look for the last TrellisGrid instance in the layer list
last_trellis = None
for layer in self.layers:
if isinstance(layer, TrellisGrid):
last_trellis = layer
if last_trellis is None:
# We have a simple, non-trellised plot
new_layers = sequence_layers(self.layers)
for layer in new_layers:
layer.work(fig=fig)
legend = {}
for layer in new_layers:
legend = dictionary_union(legend, layer.legend)
patches = []
labels = []
if len(list(legend.keys())) == 0:
key_function = lambda tup: tup
elif len(list(legend.keys())[0]) == 2:
key_function = lambda tup: (tup[1])
else:
key_function = lambda tup: (tup[1], tup[3])
for key in sorted(list(legend.keys()), key=key_function):
value = legend[key]
patches.append(value)
if len(key) == 2:
col, val = key
labels.append("%s" % str(val))
elif len(key) == 4:
col1, val1, col2, val2 = key
labels.append("%s, %s" % (str(val1), str(val2)))
else:
raise ValueError("Maximum 2 categorical attributes to display a lengend of")
if len(legend):
fig.legend(patches, labels, loc='upper right')
else:
# We have a trellised plot.
# First let's remove all other TrellisGrid instances from the layer list,
# including this one.
new_layers = []
for layer in self.layers:
if not isinstance(layer, TrellisGrid):
new_layers.append(layer)
new_layers = sequence_layers(new_layers)
# Now replace the old layers by their trellised versions
new_layers = last_trellis.trellis(new_layers)
# Prepare the subplots and draw on them
new_layers = sequence_grids(new_layers)
axes_grids = [work_grid(grid, fig) for grid in new_layers]
axes_grid = axes_grids[-1]
adjust_subplots(fig, axes_grid, last_trellis, new_layers[-1])
# And we're done
return fig
| 13,401 |
1,127 | <gh_stars>1000+
// Copyright (C) 2018-2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <vector>
#include <gtest/gtest.h>
#include "memory/gna_memory.hpp"
using namespace GNAPluginNS::memory;
class GNAMemoryTest : public ::testing::Test {
protected:
GNAMemory<GNAFloatAllocator> mem{ GNAFloatAllocator{} };
void SetUp() override {
}
};
TEST_F(GNAMemoryTest, canStoreActualBlob) {
float input[] = {1, 2, 3};
float* pFuture = nullptr;
size_t len = sizeof(input);
mem.getQueue(REGION_SCRATCH)->push_ptr(nullptr, &pFuture, input, len);
mem.commit();
ASSERT_NE(pFuture, nullptr);
ASSERT_NE(pFuture, input);
ASSERT_EQ(pFuture[0], 1);
ASSERT_EQ(pFuture[1], 2);
ASSERT_EQ(pFuture[2], 3);
}
TEST_F(GNAMemoryTest, canStore2Blobs) {
float input[] = {1, 2, 3, 4};
float* pFuture = nullptr;
float* pFuture2 = nullptr;
mem.getQueue(REGION_SCRATCH)->push_ptr(nullptr, &pFuture, input, 3*4);
mem.getQueue(REGION_SCRATCH)->push_ptr(nullptr, &pFuture2, input+1, 3*4);
mem.commit();
ASSERT_NE(pFuture, input);
ASSERT_NE(pFuture2, input);
ASSERT_EQ(pFuture + 3, pFuture2);
ASSERT_EQ(pFuture[0], 1);
ASSERT_EQ(pFuture[1], 2);
ASSERT_EQ(pFuture[2], 3);
ASSERT_EQ(pFuture[3], 2);
ASSERT_EQ(pFuture[4], 3);
ASSERT_EQ(pFuture[5], 4);
}
TEST_F(GNAMemoryTest, canStoreBlobsALIGNED) {
float input[] = {1, 2, 3, 4, 5, 6, 7, 8};
float* pFuture = nullptr;
auto queue = mem.getQueue(REGION_SCRATCH);
queue->push_ptr(nullptr, &pFuture, input, 3 * 4, 8);
mem.commit();
ASSERT_EQ(16, queue->getSize());
ASSERT_NE(pFuture, input);
ASSERT_NE(pFuture, nullptr);
ASSERT_EQ(pFuture[0], 1);
ASSERT_EQ(pFuture[1], 2);
ASSERT_EQ(pFuture[2], 3);
//least probability for next element to be equal if not copied
ASSERT_NE(pFuture[3], 4);
}
TEST_F(GNAMemoryTest, canStore2BlobsALIGNED) {
float input[] = {1, 2, 3, 4, 5, 6, 7, 8};
float* pFuture = nullptr;
float* pFuture2 = nullptr;
auto queue = mem.getQueue(REGION_SCRATCH);
queue->push_ptr(nullptr, &pFuture, input, 3 * 4, 8);
queue->push_ptr(nullptr, &pFuture2, input, 3 * 4, 16);
mem.commit();
ASSERT_EQ(32 , queue->getSize());
ASSERT_NE(pFuture, nullptr);
ASSERT_EQ(pFuture[0], 1);
ASSERT_EQ(pFuture[1], 2);
ASSERT_EQ(pFuture[2], 3);
//least probability for next element to be equal if not copied
ASSERT_EQ(pFuture[4], 1);
ASSERT_EQ(pFuture[5], 2);
ASSERT_EQ(pFuture[6], 3);
}
TEST_F(GNAMemoryTest, canReserveData) {
float* pFuture = nullptr;
mem.getQueue(REGION_SCRATCH)->reserve_ptr(nullptr, &pFuture, 3*4);
mem.commit();
ASSERT_NE(pFuture, nullptr);
}
TEST_F(GNAMemoryTest, canReserveDataByVoid) {
mem.getQueue(REGION_SCRATCH)->reserve_ptr(nullptr, nullptr, 3*4);
ASSERT_NO_THROW(mem.commit());
}
TEST_F(GNAMemoryTest, canReserveAndPushData) {
float input[] = {1, 2, 3};
float *pFuture = nullptr;
float* pFuture2 = nullptr;
size_t len = sizeof(input);
mem.getQueue(REGION_SCRATCH)->push_ptr(nullptr, &pFuture, input, len);
mem.getQueue(REGION_SCRATCH)->reserve_ptr(nullptr, &pFuture2, 3*4);
mem.commit();
ASSERT_NE(pFuture, nullptr);
ASSERT_NE(pFuture2, nullptr);
ASSERT_NE(pFuture, input);
ASSERT_NE(pFuture2, pFuture);
pFuture2[0] = -1;
pFuture2[1] = -1;
pFuture2[2] = -1;
ASSERT_EQ(pFuture[0], 1);
ASSERT_EQ(pFuture[1], 2);
ASSERT_EQ(pFuture[2], 3);
}
TEST_F(GNAMemoryTest, canBindAndResolve) {
float input[] = {1, 2, 3};
float *pFuture = nullptr;
float *pFuture2 = nullptr;
float *pFuture3 = nullptr;
size_t len = sizeof(input);
mem.getQueue(REGION_AUTO)->bind_ptr(nullptr, &pFuture3, &pFuture);
mem.getQueue(REGION_SCRATCH)->push_ptr(nullptr, &pFuture, input, len);
mem.getQueue(REGION_AUTO)->bind_ptr(nullptr, &pFuture2, &pFuture);
mem.commit();
ASSERT_NE(pFuture, input);
ASSERT_NE(pFuture2, nullptr);
ASSERT_EQ(pFuture2, pFuture);
ASSERT_EQ(pFuture3, pFuture);
ASSERT_EQ(pFuture2[0], 1);
ASSERT_EQ(pFuture2[1], 2);
ASSERT_EQ(pFuture2[2], 3);
}
TEST_F(GNAMemoryTest, canBindTransitevlyAndResolve) {
float input[] = {1, 2, 3};
float *pFuture = nullptr;
float *pFuture3 = nullptr;
float *pFuture4 = nullptr;
size_t len = sizeof(input);
mem.getQueue(REGION_AUTO)->bind_ptr(nullptr, &pFuture4, &pFuture3);
mem.getQueue(REGION_AUTO)->bind_ptr(nullptr, &pFuture3, &pFuture);
mem.getQueue(REGION_SCRATCH)->push_ptr(nullptr, &pFuture, input, len);
mem.commit();
ASSERT_NE(pFuture, input);
ASSERT_EQ(pFuture3, pFuture);
ASSERT_EQ(pFuture4, pFuture);
ASSERT_NE(pFuture4, nullptr);
ASSERT_EQ(pFuture4[0], 1);
ASSERT_EQ(pFuture4[1], 2);
ASSERT_EQ(pFuture4[2], 3);
}
TEST_F(GNAMemoryTest, canBindTransitevlyWithOffsetsAndResolve) {
float input[] = {1, 2, 3};
float *pFuture = nullptr;
float *pFuture3 = nullptr;
float *pFuture4 = nullptr;
size_t len = sizeof(input);
mem.getQueue(REGION_AUTO)->bind_ptr(nullptr, &pFuture4, &pFuture3, 4);
mem.getQueue(REGION_AUTO)->bind_ptr(nullptr, &pFuture3, &pFuture, 4);
mem.getQueue(REGION_SCRATCH)->push_ptr(nullptr, &pFuture, input, len);
mem.commit();
ASSERT_NE(pFuture, input);
ASSERT_EQ(pFuture3, pFuture + 1);
ASSERT_EQ(pFuture4, pFuture + 2);
ASSERT_NE(pFuture, nullptr);
ASSERT_EQ(pFuture[0], 1);
ASSERT_EQ(pFuture[1], 2);
ASSERT_EQ(pFuture[2], 3);
}
TEST_F(GNAMemoryTest, canBindWithOffsetAndResolve) {
float input[] = {1, 2, 3};
float *pFuture = nullptr;
float *pFuture2 = nullptr;
float *pFuture3 = nullptr;
size_t len = sizeof(input);
mem.getQueue(REGION_AUTO)->bind_ptr(nullptr, &pFuture3, &pFuture, 4);
mem.getQueue(REGION_SCRATCH)->push_ptr(nullptr, &pFuture, input, len);
mem.getQueue(REGION_AUTO)->bind_ptr(nullptr, &pFuture2, &pFuture);
mem.commit();
ASSERT_NE(pFuture, input);
ASSERT_NE(pFuture2, nullptr);
ASSERT_EQ(pFuture2, pFuture);
ASSERT_NE(pFuture3, nullptr);
ASSERT_EQ(pFuture3, pFuture + 1);
ASSERT_EQ(pFuture2[0], 1);
ASSERT_EQ(pFuture2[1], 2);
ASSERT_EQ(pFuture2[2], 3);
ASSERT_EQ(pFuture3[0], 2);
}
TEST_F(GNAMemoryTest, canPushLocal) {
float* pFuture = reinterpret_cast<float*>(&pFuture);
{
std::vector<float> input = {1.0f, 2.0f, 3.0f, 4.0f};
mem.getQueue(REGION_SCRATCH)->push_local_ptr(nullptr, pFuture, &*input.begin(), 4 * 4, 1);
}
//poison stack
mem.commit();
ASSERT_FLOAT_EQ(pFuture[0], 1);
ASSERT_FLOAT_EQ(pFuture[1], 2);
ASSERT_FLOAT_EQ(pFuture[2], 3);
ASSERT_FLOAT_EQ(pFuture[3], 4);
}
TEST_F(GNAMemoryTest, canPushValue) {
float* pFuture = reinterpret_cast<float*>(&pFuture);
float* pFuture2 = reinterpret_cast<float*>(&pFuture2);
{
mem.getQueue(REGION_SCRATCH)->push_value(nullptr, pFuture, 3.f, 2);
mem.getQueue(REGION_SCRATCH)->push_value(nullptr, pFuture2, 13.f, 2);
}
mem.commit();
ASSERT_FLOAT_EQ(pFuture[0], 3);
ASSERT_FLOAT_EQ(pFuture[1], 3);
ASSERT_FLOAT_EQ(pFuture[2], 13);
ASSERT_FLOAT_EQ(pFuture[3], 13);
}
TEST_F(GNAMemoryTest, canPushReadOnlyValue) {
float* pFuture = reinterpret_cast<float*>(&pFuture);
float* pFuture2 = reinterpret_cast<float*>(&pFuture2);
{
mem.getQueue(REGION_SCRATCH)->push_value(nullptr, pFuture, 3.f, 2);
mem.getQueue(REGION_RO)->push_value(nullptr, pFuture2, 13.f, 2);
}
mem.commit();
ASSERT_FLOAT_EQ(pFuture[0], 3);
ASSERT_FLOAT_EQ(pFuture[1], 3);
ASSERT_FLOAT_EQ(pFuture2[0], 13);
ASSERT_FLOAT_EQ(pFuture2[1], 13);
}
TEST_F(GNAMemoryTest, canCalculateReadWriteSectionSizeEmptyReqs) {
mem.getQueue(REGION_SCRATCH)->push_value(nullptr, nullptr, 3.f, 2);
mem.getQueue(REGION_RO)->push_value(nullptr, nullptr, 13.f, 2);
mem.commit();
ASSERT_EQ(mem.getRegionBytes(rRegion::REGION_SCRATCH), 0);
ASSERT_EQ(mem.getRegionBytes(rRegion::REGION_RO), 0);
}
TEST_F(GNAMemoryTest, canCalculateReadWriteSectionSizeWithEmptyReqs) {
// empty request before
mem.getQueue(REGION_SCRATCH)->push_value(nullptr, nullptr, 3.f, 2);
// not empty requests
float* pFuture1 = reinterpret_cast<float*>(&pFuture1);
float* pFuture2 = reinterpret_cast<float*>(&pFuture2);
mem.getQueue(REGION_SCRATCH)->push_value(nullptr, pFuture1, 3.f, 2);
mem.getQueue(REGION_RO)->push_value(nullptr, pFuture2, 13.f, 2);
// empty request after
mem.getQueue(REGION_SCRATCH)->push_value(nullptr, nullptr, 3.f, 2);
mem.getQueue(REGION_RO)->push_value(nullptr, nullptr, 13.f, 2);
mem.commit();
ASSERT_EQ(mem.getRegionBytes(rRegion::REGION_RO), 2 * sizeof(float));
ASSERT_EQ(mem.getRegionBytes(rRegion::REGION_SCRATCH), 2 * sizeof(float));
}
TEST_F(GNAMemoryTest, canCalculateReadWriteSectionSize) {
float* pFuture1 = reinterpret_cast<float*>(&pFuture1);
float* pFuture2 = reinterpret_cast<float*>(&pFuture2);
mem.getQueue(REGION_SCRATCH)->push_value(nullptr, pFuture1, 3.f, 2);
mem.getQueue(REGION_RO)->push_value(nullptr, pFuture2, 13.f, 2);
mem.commit();
ASSERT_EQ(mem.getRegionBytes(rRegion::REGION_RO), 2 * sizeof(float));
ASSERT_EQ(mem.getRegionBytes(rRegion::REGION_SCRATCH), 2 * sizeof(float));
}
TEST_F(GNAMemoryTest, canCalculateReadWriteSectionSizeWithAlignment) {
GNAMemory<GNAPluginNS::memory::GNAFloatAllocator> memAligned(64);
float* pFuture1 = reinterpret_cast<float*>(&pFuture1);
float* pFuture2 = reinterpret_cast<float*>(&pFuture2);
memAligned.getQueue(REGION_SCRATCH)->push_value(nullptr, pFuture1, 3.f, 2);
memAligned.getQueue(REGION_RO)->push_value(nullptr, pFuture2, 13.f, 2);
memAligned.commit();
ASSERT_EQ(memAligned.getRegionBytes(rRegion::REGION_RO), 64);
ASSERT_EQ(memAligned.getRegionBytes(rRegion::REGION_SCRATCH), 64);
}
TEST_F(GNAMemoryTest, canSetUpReadWriteSectionPtr) {
float* pFuture1 = reinterpret_cast<float*>(&pFuture1);
float* pFuture2 = reinterpret_cast<float*>(&pFuture2);
float* pFuture3 = reinterpret_cast<float*>(&pFuture3);
mem.getQueue(REGION_RO)->push_value(nullptr, pFuture1, 3.f, 2);
mem.getQueue(REGION_SCRATCH)->push_value(nullptr, pFuture2, 13.f, 3);
mem.getQueue(REGION_RO)->push_value(nullptr, pFuture3, 32.f, 4);
mem.commit();
ASSERT_EQ(mem.getRegionBytes(rRegion::REGION_RO), (2 + 4) * sizeof(float));
ASSERT_EQ(mem.getRegionBytes(rRegion::REGION_SCRATCH), 3 * sizeof(float));
ASSERT_NE(&pFuture2[0], &pFuture1[0]);
ASSERT_LT(&pFuture1[0], &pFuture3[0]);
ASSERT_FLOAT_EQ(pFuture1[0], 3.f);
ASSERT_FLOAT_EQ(pFuture1[1], 3.f);
ASSERT_FLOAT_EQ(pFuture2[0], 13.f);
ASSERT_FLOAT_EQ(pFuture2[1], 13.f);
ASSERT_FLOAT_EQ(pFuture2[2], 13.f);
ASSERT_FLOAT_EQ(pFuture3[0], 32.f);
ASSERT_FLOAT_EQ(pFuture3[1], 32.f);
ASSERT_FLOAT_EQ(pFuture3[2], 32.f);
ASSERT_FLOAT_EQ(pFuture3[3], 32.f);
}
TEST_F(GNAMemoryTest, canUpdateSizeOfPushRequestWithBindRequest) {
float input[] = {1, 2, 3};
float *pFuture = nullptr;
float *pFuture2 = nullptr;
float *pFuture3 = nullptr;
size_t len = sizeof(input);
mem.getQueue(REGION_SCRATCH)->push_ptr(nullptr, &pFuture, input, len);
mem.getQueue(REGION_AUTO)->bind_ptr(nullptr, &pFuture2, &pFuture, len, len);
mem.getQueue(REGION_AUTO)->bind_ptr(nullptr, &pFuture3, &pFuture2, 2 * len, len);
mem.commit();
ASSERT_EQ(mem.getRegionBytes(REGION_SCRATCH), 4 * len);
ASSERT_NE(pFuture, nullptr);
ASSERT_EQ(pFuture2, pFuture + 3);
ASSERT_EQ(pFuture3, pFuture + 9);
ASSERT_FLOAT_EQ(pFuture[0], 1);
ASSERT_FLOAT_EQ(pFuture[1], 2);
ASSERT_FLOAT_EQ(pFuture[2], 3);
ASSERT_FLOAT_EQ(pFuture[3], 0);
ASSERT_FLOAT_EQ(pFuture[4], 0);
ASSERT_FLOAT_EQ(pFuture[5], 0);
ASSERT_FLOAT_EQ(pFuture[6], 0);
ASSERT_FLOAT_EQ(pFuture[7], 0);
ASSERT_FLOAT_EQ(pFuture[8], 0);
}
TEST_F(GNAMemoryTest, canUpdateSizeOfPushRequestWithBindRequestWhenPush) {
float input[] = {1, 2, 3};
float input2[] = {6, 7, 8};
float *pFutureInput2 = nullptr;
float *pFuture = nullptr;
float *pFuture2 = nullptr;
size_t len = sizeof(input);
mem.getQueue(REGION_SCRATCH)->push_ptr(nullptr, &pFuture, input, len);
mem.getQueue(REGION_AUTO)->bind_ptr(nullptr, &pFuture2, &pFuture, len, len);
mem.getQueue(REGION_SCRATCH)->push_ptr(nullptr, &pFutureInput2, input2, len);
mem.commit();
ASSERT_EQ(mem.getRegionBytes(REGION_SCRATCH), 3 * len);
ASSERT_NE(pFuture, nullptr);
ASSERT_NE(pFutureInput2, nullptr);
ASSERT_EQ(pFuture2, pFuture + 3);
ASSERT_FLOAT_EQ(pFuture[0], 1);
ASSERT_FLOAT_EQ(pFuture[1], 2);
ASSERT_FLOAT_EQ(pFuture[2], 3);
ASSERT_FLOAT_EQ(pFuture[3], 0);
ASSERT_FLOAT_EQ(pFuture[4], 0);
ASSERT_FLOAT_EQ(pFutureInput2[0], 6);
ASSERT_FLOAT_EQ(pFutureInput2[1], 7);
ASSERT_FLOAT_EQ(pFutureInput2[2], 8);
}
TEST_F(GNAMemoryTest, canUpdateSizeOfPushRequestWithBindRequestWhenAlloc) {
float input[] = {1, 2, 3};
float *pFutureInput = nullptr;
float *pFuture = nullptr;
float *pFuture2 = nullptr;
size_t len = sizeof(input);
mem.getQueue(REGION_SCRATCH)->reserve_ptr(nullptr, &pFuture, len);
mem.getQueue(REGION_AUTO)->bind_ptr(nullptr, &pFuture2, &pFuture, len, len);
mem.getQueue(REGION_SCRATCH)->push_ptr(nullptr, &pFutureInput, input, len);
mem.commit();
ASSERT_EQ(mem.getRegionBytes(REGION_SCRATCH), 3 * len);
ASSERT_NE(pFuture, nullptr);
ASSERT_NE(pFutureInput, nullptr);
ASSERT_EQ(pFuture2, pFuture + 3);
ASSERT_FLOAT_EQ(pFuture[0], 0);
ASSERT_FLOAT_EQ(pFuture[1], 0);
ASSERT_FLOAT_EQ(pFuture[2], 0);
ASSERT_FLOAT_EQ(pFuture[3], 0);
ASSERT_FLOAT_EQ(pFuture[4], 0);
ASSERT_FLOAT_EQ(pFutureInput[0], 1);
ASSERT_FLOAT_EQ(pFutureInput[1], 2);
ASSERT_FLOAT_EQ(pFutureInput[2], 3);
}
| 6,525 |
659 | <reponame>swe-zzf/shark-jdbc
/*
* Copyright 2015-2101 gaoxianglong
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.test.sharksharding.util.xml;
import java.io.File;
import org.junit.Test;
import com.sharksharding.util.xml.CreateC3p0Xml;
import com.sharksharding.util.xml.CreateCoreXml;
import com.sharksharding.util.xml.CreateDruidXml;
import junit.framework.Assert;
/**
* 自动生成配置文件测试类
*
* @author gaoxianglong
*/
public class CreateXmlTest {
/**
* 生成核心配置文件
*
* @author gaoxianglong
*/
public @Test void testCreateCoreXml() {
CreateCoreXml c_xml = new CreateCoreXml();
/* 是否控制台输出生成的配置文件 */
c_xml.setIsShow(true);
/* 配置分库分片信息 */
c_xml.setDbSize("64");
c_xml.setShard("true");
c_xml.setWr_index("r0w32");
c_xml.setShardMode("false");
c_xml.setConsistent("false");
c_xml.setDbRuleArray("#userinfo_id|email_hash# % 1024 / 32");
c_xml.setTbRuleArray("#userinfo_id|email_hash# % 1024 % 32");
c_xml.setSqlPath("classpath:properties/sql.xml");
c_xml.setTbSuffix("_0000");
/* 执行配置文件输出 */
Assert.assertTrue(c_xml.createCoreXml(new File("e:/shark-context.xml")));
}
/**
* 生成c3p0数据源配置文件
*
* @author gaoxianglong
*/
public @Test void testCreateC3p0Xml() {
CreateC3p0Xml c_xml = new CreateC3p0Xml();
/* 是否控制台输出生成的配置文件 */
c_xml.setIsShow(true);
c_xml.setTbSuffix("_0000");
/* 数据源索引起始 */
c_xml.setDataSourceIndex(1);
/* 配置分库分片信息 */
c_xml.setDbSize("16");
/* 配置数据源信息 */
c_xml.setJdbcUrl("jdbc:mysql://ip1:3306/db");
c_xml.setUser("${name}");
c_xml.setPassword("${password}");
c_xml.setDriverClass("${driverClass}");
c_xml.setInitialPoolSize("${initialPoolSize}");
c_xml.setMinPoolSize("${minPoolSize}");
c_xml.setMaxPoolSize("${maxPoolSize}");
c_xml.setMaxStatements("${maxStatements}");
c_xml.setMaxIdleTime("${maxIdleTime}");
/* 执行配置文件输出 */
Assert.assertTrue(c_xml.createDatasourceXml(new File("e:/dataSource-context.xml")));
}
/**
* 生成c3p0的master/slave数据源配置文件
*
* @author gaoxianglong
*/
public @Test void testCreateC3p0MSXml() {
CreateC3p0Xml c_xml = new CreateC3p0Xml();
c_xml.setIsShow(true);
c_xml.setTbSuffix("_0000");
/* 生成master数据源信息 */
c_xml.setDataSourceIndex(1);
c_xml.setDbSize("16");
c_xml.setJdbcUrl("jdbc:mysql://ip1:3306/db");
c_xml.setUser("${name}");
c_xml.setPassword("${password}");
c_xml.setDriverClass("${driverClass}");
c_xml.setInitialPoolSize("${initialPoolSize}");
c_xml.setMinPoolSize("${minPoolSize}");
c_xml.setMaxPoolSize("${maxPoolSize}");
c_xml.setMaxStatements("${maxStatements}");
c_xml.setMaxIdleTime("${maxIdleTime}");
Assert.assertTrue(c_xml.createDatasourceXml(new File("e:/masterDataSource-context.xml")));
/* 生成slave数据源信息 */
c_xml.setDataSourceIndex(17);
c_xml.setDbSize("16");
c_xml.setJdbcUrl("jdbc:mysql://ip2:3306/db");
c_xml.setUser("${name}");
c_xml.setPassword("${password}");
Assert.assertTrue(c_xml.createDatasourceXml(new File("e:/slaveDataSource-context.xml")));
}
/**
* 生成druid数据源配置文件
*
* @author gaoxianglong
*/
public @Test void testCreateDruidXml() {
CreateDruidXml c_xml = new CreateDruidXml();
/* 是否控制台输出生成的配置文件 */
c_xml.setIsShow(true);
/* 数据源索引起始 */
c_xml.setDataSourceIndex(1);
/* 配置分库分片信息 */
c_xml.setDbSize("16");
/* false为懒加载模式,反之启动时开始初始化数据源 */
c_xml.setInit_method(true);
c_xml.setTbSuffix("_0000");
/* 生成数据源信息 */
c_xml.setUsername("${username}");
c_xml.setPassword("${password}");
c_xml.setUrl("jdbc:mysql://ip1:3306/db");
c_xml.setInitialSize("${initialSize}");
c_xml.setMinIdle("${minIdle}");
c_xml.setMaxActive("${maxActive}");
c_xml.setPoolPreparedStatements("${poolPreparedStatements}");
c_xml.setMaxOpenPreparedStatements("${maxOpenPreparedStatements}");
c_xml.setTestOnBorrow("${testOnBorrow}");
c_xml.setTestOnReturn("${testOnReturn}");
c_xml.setTestWhileIdle("${testWhileIdle}");
c_xml.setFilters("${filters}");
c_xml.setConnectionProperties("${connectionProperties}");
c_xml.setUseGlobalDataSourceStat("${useGlobalDataSourceStat}");
c_xml.setTimeBetweenLogStatsMillis("${timeBetweenLogStatsMillis}");
/* 执行配置文件输出 */
Assert.assertTrue(c_xml.createDatasourceXml(new File("e:/dataSource-context.xml")));
}
/**
* 生成druid数据源配置文件
*
* @author gaoxianglong
*/
public @Test void testCreateDruidMSXml() {
CreateDruidXml c_xml = new CreateDruidXml();
/* 是否控制台输出生成的配置文件 */
c_xml.setIsShow(true);
/* 数据源索引起始 */
c_xml.setDataSourceIndex(1);
/* 配置分库分片信息 */
c_xml.setDbSize("16");
/* false为懒加载模式,反之启动时开始初始化数据源 */
c_xml.setInit_method(true);
c_xml.setTbSuffix("_0000");
/* 生成master数据源信息 */
c_xml.setUsername("${username}");
c_xml.setPassword("${password}");
c_xml.setUrl("jdbc:mysql://ip1:3306/db");
c_xml.setInitialSize("${initialSize}");
c_xml.setMinIdle("${minIdle}");
c_xml.setMaxActive("${maxActive}");
c_xml.setPoolPreparedStatements("${poolPreparedStatements}");
c_xml.setMaxOpenPreparedStatements("${maxOpenPreparedStatements}");
c_xml.setTestOnBorrow("${testOnBorrow}");
c_xml.setTestOnReturn("${testOnReturn}");
c_xml.setTestWhileIdle("${testWhileIdle}");
c_xml.setFilters("${filters}");
c_xml.setConnectionProperties("${connectionProperties}");
c_xml.setUseGlobalDataSourceStat("${useGlobalDataSourceStat}");
c_xml.setTimeBetweenLogStatsMillis("${timeBetweenLogStatsMillis}");
/* 执行配置文件输出 */
Assert.assertTrue(c_xml.createDatasourceXml(new File("e:/masterDataSource-context.xml")));
/* 生成slave数据源信息 */
c_xml.setDataSourceIndex(17);
c_xml.setDbSize("16");
c_xml.setUsername("${username}");
c_xml.setPassword("${password}");
c_xml.setUrl("jdbc:mysql://ip2:3306/db");
/* 执行配置文件输出 */
Assert.assertTrue(c_xml.createDatasourceXml(new File("e:/slaveDataSource-context.xml")));
}
} | 3,182 |
575 | // Copyright 2019 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "chrome/browser/ui/webui/internals/notifications/notifications_internals_ui_message_handler.h"
#include <memory>
#include <string>
#include <utility>
#include "base/bind.h"
#include "base/strings/utf_string_conversions.h"
#include "base/values.h"
#include "chrome/browser/notifications/scheduler/notification_schedule_service_factory.h"
#include "chrome/browser/notifications/scheduler/public/notification_params.h"
#include "chrome/browser/notifications/scheduler/public/notification_schedule_service.h"
#include "chrome/browser/profiles/profile.h"
#include "chrome/browser/profiles/profile_key.h"
#include "content/public/browser/web_ui.h"
NotificationsInternalsUIMessageHandler::NotificationsInternalsUIMessageHandler(
Profile* profile)
: schedule_service_(NotificationScheduleServiceFactory::GetForKey(
profile->GetProfileKey())) {}
NotificationsInternalsUIMessageHandler::
~NotificationsInternalsUIMessageHandler() = default;
void NotificationsInternalsUIMessageHandler::RegisterMessages() {
web_ui()->RegisterMessageCallback(
"scheduleNotification",
base::BindRepeating(
&NotificationsInternalsUIMessageHandler::HandleScheduleNotification,
base::Unretained(this)));
}
void NotificationsInternalsUIMessageHandler::HandleScheduleNotification(
const base::ListValue* args) {
CHECK_EQ(args->GetSize(), 3u);
notifications::ScheduleParams schedule_params;
schedule_params.deliver_time_start = base::Time::Now();
schedule_params.deliver_time_end =
base::Time::Now() + base::TimeDelta::FromMinutes(5);
notifications::NotificationData data;
// TOOD(hesen): Enable adding icons from notifications-internals HTML.
data.custom_data.emplace("url", args->GetList()[0].GetString());
data.title = base::UTF8ToUTF16(args->GetList()[1].GetString());
data.message = base::UTF8ToUTF16(args->GetList()[2].GetString());
auto params = std::make_unique<notifications::NotificationParams>(
notifications::SchedulerClientType::kWebUI, std::move(data),
std::move(schedule_params));
schedule_service_->Schedule(std::move(params));
}
| 746 |
396 | <filename>glyce/models/srl/make_char_vocab.py<gh_stars>100-1000
import collections
import pickle
import os
def make_char_vocab(filename, output_path):
chars = []
with open(filename, 'r', encoding='utf-8') as f:
data = f.read()
words = data.strip().split('\n')
print(len(words))
for word in words[4:]: # ignore ['<PAD>', '<UNK>', '<ROOT>', '<NUM>']
for char in word:
chars.append(char)
chars_counter = collections.Counter(chars).most_common()
char_vocab = {'<PAD>', '<UNK>', '<ROOT>', '<NUM>'}
char_vocab = ['<PAD>', '<UNK>', '<ROOT>', '<NUM>'] + [item[0] for item in chars_counter]
print(char_vocab)
char_to_idx = {char:idx for idx, char in enumerate(char_vocab)}
idx_to_char = {idx:char for idx, char in enumerate(char_vocab)}
print(char_to_idx)
vocab_path = os.path.join(output_path,'char.vocab')
char2idx_path = os.path.join(output_path,'char2idx.bin')
idx2char_path = os.path.join(output_path,'idx2char.bin')
with open(vocab_path, 'w', encoding='utf-8') as f:
f.write('\n'.join(char_vocab))
with open(char2idx_path, 'wb') as f:
pickle.dump(char_to_idx, f)
with open(idx2char_path, 'wb') as f:
pickle.dump(idx_to_char, f)
make_char_vocab('temp/word.vocab', 'temp') | 604 |
1,609 | package com.mossle.bpm.persistence.domain;
// Generated by Hibernate Tools
import java.util.HashSet;
import java.util.Set;
import javax.persistence.Column;
import javax.persistence.Entity;
import javax.persistence.FetchType;
import javax.persistence.Id;
import javax.persistence.JoinColumn;
import javax.persistence.ManyToOne;
import javax.persistence.OneToMany;
import javax.persistence.OrderBy;
import javax.persistence.Table;
/**
* BpmCategory 流程分类.
*
* @author Lingo
*/
@Entity
@Table(name = "BPM_CATEGORY")
public class BpmCategory implements java.io.Serializable {
private static final long serialVersionUID = 0L;
/** 主键. */
private Long id;
/** null. */
private BpmCategory bpmCategory;
/** 名称. */
private String name;
/** 排序. */
private Integer priority;
/** 租户. */
private String tenantId;
/** null. */
private String code;
/** null. */
private String status;
/** . */
private Set<BpmProcess> bpmProcesses = new HashSet<BpmProcess>(0);
/** . */
private Set<BpmCategory> bpmCategories = new HashSet<BpmCategory>(0);
public BpmCategory() {
}
public BpmCategory(Long id) {
this.id = id;
}
public BpmCategory(Long id, BpmCategory bpmCategory, String name,
Integer priority, String tenantId, String code, String status,
Set<BpmProcess> bpmProcesses, Set<BpmCategory> bpmCategories) {
this.id = id;
this.bpmCategory = bpmCategory;
this.name = name;
this.priority = priority;
this.tenantId = tenantId;
this.code = code;
this.status = status;
this.bpmProcesses = bpmProcesses;
this.bpmCategories = bpmCategories;
}
/** @return 主键. */
@Id
@Column(name = "ID", unique = true, nullable = false)
public Long getId() {
return this.id;
}
/**
* @param id
* 主键.
*/
public void setId(Long id) {
this.id = id;
}
/** @return null. */
@ManyToOne(fetch = FetchType.LAZY)
@JoinColumn(name = "PARENT_ID")
public BpmCategory getBpmCategory() {
return this.bpmCategory;
}
/**
* @param bpmCategory
* null.
*/
public void setBpmCategory(BpmCategory bpmCategory) {
this.bpmCategory = bpmCategory;
}
/** @return 名称. */
@Column(name = "NAME", length = 200)
public String getName() {
return this.name;
}
/**
* @param name
* 名称.
*/
public void setName(String name) {
this.name = name;
}
/** @return 排序. */
@Column(name = "PRIORITY")
public Integer getPriority() {
return this.priority;
}
/**
* @param priority
* 排序.
*/
public void setPriority(Integer priority) {
this.priority = priority;
}
/** @return 租户. */
@Column(name = "TENANT_ID", length = 64)
public String getTenantId() {
return this.tenantId;
}
/**
* @param tenantId
* 租户.
*/
public void setTenantId(String tenantId) {
this.tenantId = tenantId;
}
/** @return null. */
@Column(name = "CODE", length = 200)
public String getCode() {
return this.code;
}
/**
* @param code
* null.
*/
public void setCode(String code) {
this.code = code;
}
/** @return null. */
@Column(name = "STATUS", length = 50)
public String getStatus() {
return this.status;
}
/**
* @param status
* null.
*/
public void setStatus(String status) {
this.status = status;
}
/** @return . */
@OneToMany(fetch = FetchType.LAZY, mappedBy = "bpmCategory")
@OrderBy("priority")
public Set<BpmProcess> getBpmProcesses() {
return this.bpmProcesses;
}
/**
* @param bpmProcesses
* .
*/
public void setBpmProcesses(Set<BpmProcess> bpmProcesses) {
this.bpmProcesses = bpmProcesses;
}
/** @return . */
@OneToMany(fetch = FetchType.LAZY, mappedBy = "bpmCategory")
public Set<BpmCategory> getBpmCategories() {
return this.bpmCategories;
}
/**
* @param bpmCategories
* .
*/
public void setBpmCategories(Set<BpmCategory> bpmCategories) {
this.bpmCategories = bpmCategories;
}
}
| 2,031 |
678 | /**
* This header is generated by class-dump-z 0.2b.
*
* Source: /System/Library/PrivateFrameworks/GeoServices.framework/GeoServices
*/
@protocol GEOSupportedTileSetsServerProxyDelegate <NSObject>
- (void)supportedTileSetsServerProxyUpdateDidFail:(id)supportedTileSetsServerProxyUpdate;
- (void)supportedTileSetsServerProxyReportedCacheIsCurrent:(id)current;
- (void)supportedTileSetsServerProxy:(id)proxy didReceiveDocumentData:(id)data;
@end
| 140 |
1,080 | # -*- coding:utf-8 -*-
import sys
from multiprocessing import Process, Value as PValue, current_process
from os.path import getsize
from threading import Thread
from paramiko import SSHClient, AutoAddPolicy
from hypernets.utils import logging
from hypernets.utils.common import Counter
logger = logging.get_logger(__name__)
class DumpFileThread(Thread):
counter = Counter()
def __init__(self, in_file_handle, out_file_handle, buf_size=16):
super(DumpFileThread, self).__init__()
assert in_file_handle and out_file_handle
# self.name = f'{self.__class__.__name__}-{self.counter()}'
self.name = f'{self.__class__.__name__}-{current_process().pid}-{self.counter()}'
self.in_file_handle = in_file_handle
self.out_file_handle = out_file_handle
self.buf_size = buf_size
def run(self):
data = self.in_file_handle.read(self.buf_size)
while data and len(data) > 0:
self.out_file_handle.write(data)
data = self.in_file_handle.read(self.buf_size)
class SshProcess(Process):
def __init__(self, ssh_host, ssh_port, cmd, in_file, out_file, err_file, environment=None):
super(SshProcess, self).__init__()
self.ssh_host = ssh_host
self.ssh_port = ssh_port
self.cmd = cmd
self.in_file = in_file
self.out_file = out_file
self.err_file = err_file
self.environment = environment
self._exit_code = PValue('i', -1)
def run(self):
if logger.is_info_enabled():
logger.info(f'[{self.name}] [SSH {self.ssh_host}]: {self.cmd}')
try:
code = self.ssh_run(self.ssh_host, self.ssh_port,
self.cmd,
self.in_file,
self.out_file,
self.err_file,
self.environment)
except KeyboardInterrupt:
code = 137
if logger.is_info_enabled():
logger.info(f'[{self.name}] [SSH {self.ssh_host}] {self.cmd} done with {code}')
self._exit_code.value = code
@staticmethod
def ssh_run(ssh_host, ssh_port, cmd, in_file, out_file, err_file, environment):
with SSHClient() as ssh:
ssh.set_missing_host_key_policy(AutoAddPolicy())
ssh.connect(ssh_host, ssh_port)
stdin, stdout, stderr = ssh.exec_command(cmd, bufsize=10, environment=environment)
if in_file and getsize(in_file) > 0:
with open(in_file, 'rb') as f:
data = f.read()
stdin.write(data)
stdin.flush()
channel = stdout.channel
# channel.settimeout(0.1)
if out_file and err_file:
with open(out_file, 'wb', buffering=0)as o, open(err_file, 'wb', buffering=0) as e:
threads = [DumpFileThread(stdout, o), DumpFileThread(stderr, e)]
for p in threads: p.start()
for p in threads: p.join()
else:
threads = [DumpFileThread(stdout, sys.stdout), DumpFileThread(stderr, sys.stderr)]
for p in threads: p.start()
for p in threads: p.join()
assert channel.exit_status_ready()
code = channel.recv_exit_status()
return code
@property
def exitcode(self):
code = self._exit_code.value
return code if code >= 0 else None
| 1,728 |
16,989 | // Copyright 2021 The Bazel Authors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.devtools.build.android.xml;
import com.android.aapt.Resources.MacroBody;
import com.android.aapt.Resources.Reference;
import com.android.aapt.Resources.Value;
import com.google.common.collect.ImmutableList;
import com.google.devtools.build.android.AndroidDataWritingVisitor;
import com.google.devtools.build.android.AndroidDataWritingVisitor.StartTag;
import com.google.devtools.build.android.AndroidResourceSymbolSink;
import com.google.devtools.build.android.DataSource;
import com.google.devtools.build.android.DependencyInfo;
import com.google.devtools.build.android.FullyQualifiedName;
import com.google.devtools.build.android.XmlResourceValue;
import com.google.devtools.build.android.XmlResourceValues;
import com.google.devtools.build.android.proto.SerializeFormat;
import com.google.devtools.build.android.proto.SerializeFormat.DataValueXml;
import com.google.devtools.build.android.proto.SerializeFormat.DataValueXml.XmlType;
import com.google.devtools.build.android.resources.Visibility;
import java.io.IOException;
import java.io.OutputStream;
import java.util.Objects;
/**
* Represents an Android Macro resource.
*
* <p>Macros are compile-time resource definitions that have their contents substituted wherever
* they are referenced in xml. For example: <code>
* <macro name="is_enabled">true</macro>
* <bool name="is_prod">@macro/is_enabled</macro>
* </code> The contents of the macro above will be substituted in place of the macro reference,
* resulting a resource table containing: <code>
* <bool name="is_prod">true</macro>
* </code>
*/
public class MacroXmlResourceValue implements XmlResourceValue {
private final String rawString;
private MacroXmlResourceValue(String rawString) {
this.rawString = rawString;
}
public static XmlResourceValue of(String rawContents) {
return new MacroXmlResourceValue(rawContents);
}
public static XmlResourceValue from(Value proto, Visibility visibility) {
MacroBody macro = proto.getCompoundValue().getMacro();
return new MacroXmlResourceValue(macro.getRawString());
}
public static XmlResourceValue from(SerializeFormat.DataValueXml proto) {
return new MacroXmlResourceValue(proto.getValue());
}
/**
* Each XmlValue is expected to write a valid representation in xml to the writer.
*
* @param key The FullyQualified name for the xml resource being written.
* @param source The source of the value to allow for proper comment annotation.
* @param mergedDataWriter The target writer.
*/
@Override
public void write(
FullyQualifiedName key, DataSource source, AndroidDataWritingVisitor mergedDataWriter) {
StartTag startTag =
mergedDataWriter.define(key).derivedFrom(source).startTag("macro").named(key);
if (rawString == null) {
startTag.closeUnaryTag().save();
} else {
startTag.closeTag().addCharactersOf(rawString).endTag().save();
}
}
/** Serializes the resource value to the OutputStream and returns the bytes written. */
@Override
public int serializeTo(int sourceId, Namespaces namespaces, OutputStream out) throws IOException {
DataValueXml.Builder xmlValue =
SerializeFormat.DataValueXml.newBuilder()
.setType(XmlType.MACRO)
.putAllNamespace(namespaces.asMap());
if (rawString != null) {
xmlValue.setValue(rawString);
}
return XmlResourceValues.serializeProtoDataValue(
out, XmlResourceValues.newSerializableDataValueBuilder(sourceId).setXmlValue(xmlValue));
}
/**
* Combines these xml values together and returns a single value.
*
* @throws IllegalArgumentException always since macros are not a combinable resource
*/
@Override
public XmlResourceValue combineWith(XmlResourceValue value) {
throw new IllegalArgumentException(this + " is not a combinable resource.");
}
/**
* Macros cannot be merged with any xml values, so this method always returns 0 which indicates
* this and the other value have equal priority.
*/
@Override
public int compareMergePriorityTo(XmlResourceValue value) {
return 0;
}
/**
* Queue up writing the resource to the given {@link AndroidResourceClassWriter}. Each resource
* can generate one or more (in the case of styleable) fields and inner classes in the R class.
*
* @param dependencyInfo The provenance (in terms of Bazel relationship) of the resource
* @param key The FullyQualifiedName of the resource
* @param sink the symbol sink for producing source and classes
*/
@Override
public void writeResourceToClass(
DependencyInfo dependencyInfo, FullyQualifiedName key, AndroidResourceSymbolSink sink) {}
/** Returns a representation of the xml value as a string suitable for conflict messages. */
@Override
public String asConflictStringWith(DataSource source) {
return source.asConflictString();
}
/** Visibility of this resource as denoted by a {@code <public>} tag, or lack thereof. */
@Override
public Visibility getVisibility() {
return Visibility.UNKNOWN;
}
/** Resources referenced via XML attributes or proxying resource definitions. */
@Override
public ImmutableList<Reference> getReferencedResources() {
return ImmutableList.of();
}
@Override
public int hashCode() {
return Objects.hashCode(rawString);
}
@Override
public boolean equals(Object obj) {
if (!(obj instanceof MacroXmlResourceValue)) {
return false;
}
MacroXmlResourceValue other = (MacroXmlResourceValue) obj;
return Objects.equals(rawString, other.rawString);
}
}
| 1,892 |
348 | <reponame>chamberone/Leaflet.PixiOverlay<filename>docs/data/leg-t2/086/08603237.json
{"nom":"Saint-Pierre-d'Exideuil","circ":"3ème circonscription","dpt":"Vienne","inscrits":635,"abs":338,"votants":297,"blancs":26,"nuls":11,"exp":260,"res":[{"nuance":"REM","nom":"<NAME>","voix":186},{"nuance":"FN","nom":"<NAME>","voix":74}]} | 134 |
623 | import torch
from torch_sparse.tensor import SparseTensor
def test_overload():
row = torch.tensor([0, 1, 1, 2, 2])
col = torch.tensor([1, 0, 2, 1, 2])
mat = SparseTensor(row=row, col=col)
other = torch.tensor([1, 2, 3]).view(3, 1)
other + mat
mat + other
other * mat
mat * other
other = torch.tensor([1, 2, 3]).view(1, 3)
other + mat
mat + other
other * mat
mat * other
| 188 |
593 | <gh_stars>100-1000
/**
* TLS-Attacker - A Modular Penetration Testing Framework for TLS
*
* Copyright 2014-2022 Ruhr University Bochum, Paderborn University, Hackmanit GmbH
*
* Licensed under Apache License, Version 2.0
* http://www.apache.org/licenses/LICENSE-2.0.txt
*/
package de.rub.nds.tlsattacker.core.protocol.preparator;
import de.rub.nds.modifiablevariable.util.ArrayConverter;
import de.rub.nds.modifiablevariable.util.RandomHelper;
import de.rub.nds.tlsattacker.core.config.Config;
import de.rub.nds.tlsattacker.core.constants.CipherAlgorithm;
import de.rub.nds.tlsattacker.core.constants.ClientAuthenticationType;
import de.rub.nds.tlsattacker.core.constants.HandshakeByteLength;
import de.rub.nds.tlsattacker.core.constants.MacAlgorithm;
import de.rub.nds.tlsattacker.core.exceptions.CryptoException;
import de.rub.nds.tlsattacker.core.protocol.message.NewSessionTicketMessage;
import de.rub.nds.tlsattacker.core.state.SessionTicket;
import de.rub.nds.tlsattacker.core.state.StatePlaintext;
import de.rub.nds.tlsattacker.core.state.serializer.SessionTicketSerializer;
import de.rub.nds.tlsattacker.core.state.serializer.StatePlaintextSerializer;
import de.rub.nds.tlsattacker.core.util.StaticTicketCrypto;
import de.rub.nds.tlsattacker.core.workflow.chooser.Chooser;
import de.rub.nds.tlsattacker.util.TimeHelper;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
public class NewSessionTicketPreparator extends HandshakeMessagePreparator<NewSessionTicketMessage> {
private static final Logger LOGGER = LogManager.getLogger();
private final NewSessionTicketMessage msg;
public NewSessionTicketPreparator(Chooser chooser, NewSessionTicketMessage message) {
super(chooser, message);
this.msg = message;
}
private long generateTicketLifetimeHint() {
long ticketLifeTimeHint = chooser.getConfig().getSessionTicketLifetimeHint();
return ticketLifeTimeHint;
}
private void prepareTicketLifetimeHint(NewSessionTicketMessage msg) {
msg.setTicketLifetimeHint(generateTicketLifetimeHint());
LOGGER.debug("TicketLifetimeHint: " + msg.getTicketLifetimeHint().getValue());
}
private void prepareTicket(NewSessionTicketMessage msg) {
Config config = chooser.getConfig();
SessionTicket newTicket = msg.getTicket();
newTicket.setKeyName(config.getSessionTicketKeyName());
CipherAlgorithm cipherAlgorithm = config.getSessionTicketCipherAlgorithm();
byte[] encryptionKey = config.getSessionTicketEncryptionKey();
byte[] iv = new byte[cipherAlgorithm.getBlocksize()];
RandomHelper.getRandom().nextBytes(iv);
newTicket.setIV(iv);
StatePlaintext plainState = new StatePlaintext();
plainState.generateStatePlaintext(chooser);
StatePlaintextSerializer plaintextSerializer = new StatePlaintextSerializer(plainState);
byte[] plainStateSerialized = plaintextSerializer.serialize();
byte[] encryptedState;
try {
encryptedState = StaticTicketCrypto.encrypt(cipherAlgorithm, plainStateSerialized, encryptionKey,
newTicket.getIV().getValue());
} catch (CryptoException e) {
LOGGER.warn("Could not encrypt SessionState. Using empty byte[]");
LOGGER.debug(e);
encryptedState = new byte[0];
}
newTicket.setEncryptedState(encryptedState);
byte[] keyHMAC = config.getSessionTicketKeyHMAC();
// Mac(Name + IV + TicketLength + Ticket)
byte[] macInput = ArrayConverter.concatenate(config.getSessionTicketKeyName(), iv,
ArrayConverter.intToBytes(encryptedState.length, HandshakeByteLength.ENCRYPTED_STATE_LENGTH),
encryptedState);
byte[] hmac;
try {
hmac = StaticTicketCrypto.generateHMAC(config.getSessionTicketMacAlgorithm(), macInput, keyHMAC);
} catch (CryptoException ex) {
LOGGER.warn("Could generate HMAC. Using empty byte[]");
LOGGER.debug(ex);
hmac = new byte[0];
}
newTicket.setMAC(hmac);
newTicket.setEncryptedStateLength(encryptedState.length);
SessionTicketSerializer sessionTicketSerializer = new SessionTicketSerializer(newTicket);
byte[] sessionTicketSerialized = sessionTicketSerializer.serialize();
msg.getTicket().setIdentityLength(sessionTicketSerialized.length);
msg.getTicket().setIdentity(sessionTicketSerialized);
}
@Override
protected void prepareHandshakeMessageContents() {
LOGGER.debug("Preparing NewSessionTicketMessage");
prepareTicketLifetimeHint(msg);
if (chooser.getSelectedProtocolVersion().isTLS13()) {
prepareTicketTls13(msg);
} else {
prepareTicket(msg);
}
}
private void prepareTicketTls13(NewSessionTicketMessage msg) {
prepareTicketAgeAdd(msg);
prepareNonce(msg);
prepareIdentity(msg);
prepareExtensions();
prepareExtensionLength();
}
private void prepareTicketAgeAdd(NewSessionTicketMessage msg) {
msg.getTicket().setTicketAgeAdd(chooser.getConfig().getDefaultSessionTicketAgeAdd());
}
private void prepareIdentity(NewSessionTicketMessage msg) {
msg.getTicket().setIdentity(chooser.getConfig().getDefaultSessionTicketIdentity());
msg.getTicket().setIdentityLength(msg.getTicket().getIdentity().getValue().length);
}
private void prepareNonce(NewSessionTicketMessage msg) {
msg.getTicket().setTicketNonce(chooser.getConfig().getDefaultSessionTicketNonce());
msg.getTicket().setTicketNonceLength(msg.getTicket().getTicketNonce().getValue().length);
}
}
| 2,218 |
471 | <gh_stars>100-1000
"""
List the IDs of deleted users for a given domain
"""
from django.core.management import BaseCommand
from corehq.apps.domain.dbaccessors import get_doc_ids_in_domain_by_class
from corehq.apps.hqwebapp.doc_info import get_doc_info_by_id
from corehq.apps.users.dbaccessors import (
get_mobile_user_ids,
)
from corehq.apps.users.models import CommCareUser
class Command(BaseCommand):
help = __doc__.strip() # (The module's docstring)
def add_arguments(self, parser):
parser.add_argument('domain')
parser.add_argument(
'--usernames',
action='store_true',
dest='with_usernames',
default=False,
help="Include usernames in the list of IDs",
)
def handle(self, domain, **options):
mobile_users = get_mobile_user_ids(domain)
everyone = set(get_doc_ids_in_domain_by_class(domain, CommCareUser))
deleted = everyone - mobile_users
# See also corehq.apps.dump_reload.management.commands.print_domain_stats._get_couchdb_counts
id_ = None
for id_ in deleted:
if options['with_usernames']:
doc_info = get_doc_info_by_id(domain, id_)
print(id_, doc_info.display)
else:
print(id_)
if id_ is None:
print('Domain "{}" has no deleted users'.format(domain))
| 622 |
435 | {
"description": "The Python data ecosystem has grown beyond the confines of single\nmachines to embrace scalability. Here we describe one of our approaches\nto scaling, which is already being used in production systems. The goal\nof in-database analytics is to bring the calculations to the data,\nreducing transport costs and I/O bottlenecks. Using PL/Python we can run\nparallel queries across terabytes of data using not only pure SQL but\nalso familiar PyData packages such as scikit- learn and nltk. This\napproach can also be used with PL/R to make use of a wide variety of R\npackages. We look at examples on Postgres compatible systems such as the\nGreenplum Database and on Hadoop through Pivotal HAWQ. We will also\nintroduce MADlib, Pivotal\u2019s open source library for scalable in-database\nmachine learning, which uses Python to glue SQL queries to low level C++\nfunctions and is also usable through the PyMADlib package.\n",
"duration": 2169,
"language": "eng",
"recorded": "2014-02-22",
"related_urls": [
{
"label": "slides",
"url": "http://www.slideshare.net/ihuston/massively-parallel-processing-with-procedural-python-pydata-london-2014"
},
{
"label": "repository",
"url": "https://github.com/ihuston/plpython_examples"
}
],
"speakers": [
"<NAME>"
],
"thumbnail_url": "https://i.ytimg.com/vi/Q2qtFkEdG2Q/hqdefault.jpg",
"title": "Massively Parallel Processing with Procedural Python",
"videos": [
{
"type": "youtube",
"url": "https://www.youtube.com/watch?v=Q2qtFkEdG2Q"
}
]
}
| 537 |
571 | /***************************************************************************
Copyright 2015 Ufora Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
****************************************************************************/
#ifndef base_debug_StackTrace_H_
#define base_debug_StackTrace_H_
#include "../Platform.hpp"
#if BSA_PLATFORM_LINUX
#include <cxxabi.h>
#include <execinfo.h>
#endif
#include <vector>
#include <sstream>
#include <iostream>
#include <string>
#include <ctype.h>
#include <stdlib.h>
#include <stdio.h>
#include <stdint.h>
#include <stdexcept>
namespace Ufora {
namespace debug {
class StackTrace {
private:
StackTrace();
StackTrace(const std::vector<void*> &inTrace);
static bool isalpha(const std::string &inTrace);
public:
StackTrace(const StackTrace& in);
static std::string demangleStackSymbol(std::string s);
static StackTrace getTrace(int32_t inDepth = 40);
static std::string getStringTrace(int32_t inDepth = 40);
std::string toString(void) const;
static std::string demangle(std::string s);
static std::string functionAddressToString(void* functionPtr);
private:
std::vector<void*> mTracePtrs;
std::vector<std::string> mDemangled;
}; //class StackTrace
} //namespace debug
} //namespace Ufora
void throwLogicErrorWithStacktrace(const std::string& inMessage = "");
std::logic_error standardLogicErrorWithStacktrace(const std::string& inMessage = "");
#endif
| 613 |
310 | /*
* Copyright [2019] [恒宇少年 - 于起宇]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.minbox.framework.api.boot.sample.mybatis.enhance.dsl;
import com.gitee.hengboy.mybatis.enhance.dsl.expression.ColumnExpression;
import com.gitee.hengboy.mybatis.enhance.dsl.expression.TableExpression;
import org.minbox.framework.api.boot.sample.mybatis.enhance.entity.SystemUser;
/**
* 系统用户信息表
*
* @author ApiBoot Mybatis Enhance Codegen
*/
public class DSystemUser extends TableExpression<SystemUser> {
public DSystemUser(String root) {
super(root);
}
public static DSystemUser DSL() {
return new DSystemUser("iot_system_user");
}
/**
* 主键
*/
public ColumnExpression id = new ColumnExpression("SU_ID", this);
/**
* 用户名
*/
public ColumnExpression userName = new ColumnExpression("SU_USER_NAME", this);
/**
* 用户昵称
*/
public ColumnExpression nickName = new ColumnExpression("SU_NICK_NAME", this);
/**
* 年龄
*/
public ColumnExpression age = new ColumnExpression("SU_AGE", this);
/**
* 用户密码
*/
public ColumnExpression password = new ColumnExpression("SU_PASSWORD", this);
/**
* 用户状态,1:正常,0:冻结,-1:已删除
*/
public ColumnExpression status = new ColumnExpression("SU_STATUS", this);
/**
* 创建时间
*/
public ColumnExpression createTime = new ColumnExpression("SU_CREATE_TIME", this);
/**
* 备注信息
*/
public ColumnExpression mark = new ColumnExpression("SU_MARK", this);
@Override
public ColumnExpression[] getColumns() {
return new ColumnExpression[]{id, userName, nickName, age, password, status, createTime, mark};
}
}
| 935 |
5,766 | //
// TestFailure.h
//
#ifndef Poco_CppUnit_TestFailure_INCLUDED
#define Poco_CppUnit_TestFailure_INCLUDED
#include "CppUnit/TestFailure.h"
#endif // Poco_CppUnit_TestFailure_INCLUDED
| 78 |
735 | <reponame>1443213244/small-package
/*
* Copyright (C) AlexWoo(<NAME>) <EMAIL>
*/
#include <ngx_config.h>
#include <ngx_core.h>
#include <ngx_http.h>
#include "ngx_map.h"
static ngx_pool_t *ngx_rbuf_pool;
static ngx_map_t ngx_rbuf_map;
static ngx_chain_t *ngx_rbuf_free_chain;
static ngx_uint_t ngx_rbuf_nalloc_node;
static ngx_uint_t ngx_rbuf_nalloc_buf;
static ngx_uint_t ngx_rbuf_nfree_buf;
static ngx_uint_t ngx_rbuf_nalloc_chain;
static ngx_uint_t ngx_rbuf_nfree_chain;
static ngx_map_t ngx_rbuf_using;
#define ngx_rbuf_buf(b) \
(ngx_rbuf_t *) ((u_char *) (b) - offsetof(ngx_rbuf_t, buf))
typedef struct ngx_rbuf_s ngx_rbuf_t;
struct ngx_rbuf_s {
size_t size;
ngx_rbuf_t *next;
u_char buf[];
};
typedef struct {
ngx_map_node_t node;
ngx_rbuf_t *rbuf;
} ngx_rbuf_node_t;
typedef struct {
ngx_chain_t cl;
ngx_buf_t buf;
unsigned alloc;
ngx_map_node_t node;
char *file;
int line;
} ngx_chainbuf_t;
static ngx_int_t
ngx_rbuf_init()
{
ngx_rbuf_pool = ngx_create_pool(4096, ngx_cycle->log);
if (ngx_rbuf_pool == NULL) {
return NGX_ERROR;
}
ngx_map_init(&ngx_rbuf_map, ngx_map_hash_uint, ngx_cmp_uint);
ngx_map_init(&ngx_rbuf_using, ngx_map_hash_uint, ngx_cmp_uint);
ngx_rbuf_nalloc_node = 0;
ngx_rbuf_nalloc_buf = 0;
ngx_rbuf_nfree_buf = 0;
ngx_rbuf_nalloc_chain = 0;
ngx_rbuf_nfree_chain = 0;
return NGX_OK;
}
static ngx_rbuf_t *
ngx_rbuf_get_buf(size_t key)
{
ngx_rbuf_node_t *rn;
ngx_map_node_t *node;
ngx_rbuf_t *rb;
node = ngx_map_find(&ngx_rbuf_map, key);
if (node == NULL) { /* new key */
rn = ngx_pcalloc(ngx_rbuf_pool, sizeof(ngx_rbuf_node_t));
if (rn == NULL) {
return NULL;
}
node = &rn->node;
node->raw_key = key;
ngx_map_insert(&ngx_rbuf_map, node, 0);
++ngx_rbuf_nalloc_node;
} else {
rn = (ngx_rbuf_node_t *) node;
}
rb = rn->rbuf;
if (rb == NULL) {
rb = ngx_pcalloc(ngx_rbuf_pool, sizeof(ngx_rbuf_t) + key);
if (rb == NULL) {
return NULL;
}
rb->size = key;
++ngx_rbuf_nalloc_buf;
} else {
rn->rbuf = rb->next;
rb->next = NULL;
--ngx_rbuf_nfree_buf;
}
return rb;
}
static void
ngx_rbuf_put_buf(ngx_rbuf_t *rb)
{
ngx_rbuf_node_t *rn;
ngx_map_node_t *node;
node = ngx_map_find(&ngx_rbuf_map, rb->size);
if (node == NULL) {
return;
}
rn = (ngx_rbuf_node_t *) node;
rb->next = rn->rbuf;
rn->rbuf = rb;
++ngx_rbuf_nfree_buf;
}
static u_char *
ngx_rbuf_alloc(size_t size)
{
ngx_rbuf_t *rb;
rb = ngx_rbuf_get_buf(size);
return rb->buf;
}
static void
ngx_rbuf_free(u_char *rb)
{
ngx_rbuf_t *rbuf;
rbuf = ngx_rbuf_buf(rb);
ngx_rbuf_put_buf(rbuf);
}
ngx_chain_t *
ngx_get_chainbuf_debug(size_t size, ngx_flag_t alloc_rbuf, char *file, int line)
{
ngx_chainbuf_t *cb;
ngx_chain_t *cl;
if (ngx_rbuf_pool == NULL) {
ngx_rbuf_init();
}
cl = ngx_rbuf_free_chain;
if (cl) {
ngx_rbuf_free_chain = cl->next;
cl->next = NULL;
cb = (ngx_chainbuf_t *) cl;
--ngx_rbuf_nfree_chain;
} else {
cb = ngx_pcalloc(ngx_rbuf_pool, sizeof(ngx_chainbuf_t));
if (cb == NULL) {
return NULL;
}
cl = &cb->cl;
cl->buf = &cb->buf;
++ngx_rbuf_nalloc_chain;
}
if (alloc_rbuf) {
cl->buf->last = cl->buf->pos = cl->buf->start = ngx_rbuf_alloc(size);
cl->buf->end = cl->buf->start + size;
cb->alloc = 1;
} else {
cl->buf->pos = cl->buf->last = cl->buf->start = cl->buf->end = NULL;
cb->alloc = 0;
}
cl->buf->memory = 1;
// record chainbuf in using map
cb->file = file;
cb->line = line;
cb->node.raw_key = (intptr_t) cl;
ngx_map_insert(&ngx_rbuf_using, &cb->node, 0);
return cl;
}
void
ngx_put_chainbuf_debug(ngx_chain_t *cl, char *file, int line)
{
ngx_chainbuf_t *cb;
if (ngx_rbuf_pool == NULL) {
return;
}
if (cl == NULL) {
return;
}
cb = (ngx_chainbuf_t *) cl;
if (cb->alloc) {
ngx_rbuf_free(cl->buf->start);
}
cl->next = ngx_rbuf_free_chain;
ngx_rbuf_free_chain = cl;
++ngx_rbuf_nfree_chain;
// delete chainbuf from using map
if (ngx_map_find(&ngx_rbuf_using, (intptr_t) cl) == NULL) {
ngx_log_error(NGX_LOG_EMERG, ngx_cycle->log, 0,
"destroy chainbuf twice: %s:%d", file, line);
return;
}
ngx_map_delete(&ngx_rbuf_using, (intptr_t) cl);
}
ngx_chain_t *
ngx_rbuf_state(ngx_http_request_t *r, unsigned detail)
{
ngx_chain_t *cl;
ngx_buf_t *b;
size_t len, len1;
ngx_uint_t n;
ngx_chainbuf_t *cb;
ngx_map_node_t *node;
len = sizeof("##########ngx rbuf state##########\n") - 1
+ sizeof("ngx_rbuf nalloc node: \n") - 1 + NGX_OFF_T_LEN
+ sizeof("ngx_rbuf nalloc buf: \n") - 1 + NGX_OFF_T_LEN
+ sizeof("ngx_rbuf nfree buf: \n") - 1 + NGX_OFF_T_LEN
+ sizeof("ngx_rbuf nalloc chain: \n") - 1 + NGX_OFF_T_LEN
+ sizeof("ngx_rbuf nfree chain: \n") - 1 + NGX_OFF_T_LEN;
len1 = 0;
if (detail) {
n = ngx_rbuf_nalloc_chain - ngx_rbuf_nfree_chain;
/* " file:line\n" */
len1 = 4 + 256 + 1 + NGX_OFF_T_LEN + 1;
len += len1 * n;
}
cl = ngx_alloc_chain_link(r->pool);
if (cl == NULL) {
return NULL;
}
cl->next = NULL;
b = ngx_create_temp_buf(r->pool, len);
if (b == NULL) {
return NULL;
}
cl->buf = b;
b->last = ngx_snprintf(b->last, len,
"##########ngx rbuf state##########\nngx_rbuf nalloc node: %ui\n"
"ngx_rbuf nalloc buf: %ui\nngx_rbuf nfree buf: %ui\n"
"ngx_rbuf nalloc chain: %ui\nngx_rbuf nfree chain: %ui\n",
ngx_rbuf_nalloc_node, ngx_rbuf_nalloc_buf, ngx_rbuf_nfree_buf,
ngx_rbuf_nalloc_chain, ngx_rbuf_nfree_chain);
if (detail) {
for (node = ngx_map_begin(&ngx_rbuf_using); node;
node = ngx_map_next(node))
{
cb = (ngx_chainbuf_t *) ((char *) node
- offsetof(ngx_chainbuf_t, node));
b->last = ngx_snprintf(b->last, len1, " %s:%d\n",
cb->file, cb->line);
}
}
return cl;
}
| 4,124 |
318 | <reponame>adam11grafik/maven-surefire
package org.apache.maven.surefire.api.util;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**
* @author <NAME>
*/
@Deprecated
public interface DirectoryScanner
{
/**
* Locates tests based on scanning directories
*
* @param classLoader The classloader to use when loading classes
* @param scannerFilter The filter to include/exclude test classes
* @return The found classes that match the filter
*/
TestsToRun locateTestClasses( ClassLoader classLoader, ScannerFilter scannerFilter );
}
| 366 |
310 | <filename>gear/hardware/p/ps4-pro.json<gh_stars>100-1000
{
"name": "PS4 Pro",
"description": "A gaming console.",
"url": "https://www.playstation.com/en-us/explore/ps4-pro/"
}
| 76 |
1,062 | /**
* Copyright 2014 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.mr4c.metadata;
import java.util.Arrays;
import org.junit.*;
import static org.junit.Assert.*;
public class MetadataListTest {
@Test public void testEqual() {
MetadataList list1 = buildList1();
MetadataList list2 = buildList1();
assertEquals("should be equal", list1,list2);
}
@Test public void testNotEqual() {
MetadataList list1 = buildList1();
MetadataList list2 = buildList2();
assertFalse("should not be equal", list1.equals(list2));
}
private MetadataList buildList1() {
return new MetadataList(
Arrays.asList(
new MetadataField(new Integer(234), PrimitiveType.INTEGER),
new MetadataField(new Double(88.77), PrimitiveType.DOUBLE),
new MetadataField("yo yo yo", PrimitiveType.STRING),
new MetadataArray(Arrays.asList(33L,44L,55L), PrimitiveType.SIZE_T)
)
);
}
private MetadataList buildList2() {
// swapped order of 2nd and 3rd elements
return new MetadataList(
Arrays.asList(
new MetadataField(new Integer(234), PrimitiveType.INTEGER),
new MetadataField("yo yo yo", PrimitiveType.STRING),
new MetadataField(new Double(88.77), PrimitiveType.DOUBLE),
new MetadataArray(Arrays.asList(33L,44L,55L), PrimitiveType.SIZE_T)
)
);
}
}
| 648 |
301 | <filename>test/src/tc/re/junit/src/org/iotivity/test/re/tc/stc/REResourceBrokerTest.java<gh_stars>100-1000
//******************************************************************
//
// Copyright 2016 Samsung Electronics All Rights Reserved.
//
//-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
//-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
package org.iotivity.test.re.tc.stc;
import org.iotivity.base.ModeType;
import org.iotivity.base.OcPlatform;
import org.iotivity.base.PlatformConfig;
import org.iotivity.base.QualityOfService;
import org.iotivity.base.ServiceType;
import org.iotivity.service.RcsException;
import org.iotivity.service.client.RcsRemoteResourceObject;
import org.iotivity.service.client.RcsRemoteResourceObject.ResourceState;
import org.iotivity.test.re.tc.helper.REAPIHelper;
import static org.iotivity.test.re.tc.helper.ResourceUtil.*;
import android.test.InstrumentationTestCase;
import android.util.Log;
public class REResourceBrokerTest extends InstrumentationTestCase {
private static final String LOG_TAG = "RETest";
private REAPIHelper m_REHelper;
private static RcsRemoteResourceObject m_Resource;
private StringBuilder m_ErrorMsg = new StringBuilder();
int m_count = 10;
protected void setUp() throws Exception {
super.setUp();
m_REHelper = new REAPIHelper();
m_ErrorMsg.setLength(0);
PlatformConfig platformConfigObj = new PlatformConfig(getInstrumentation()
.getTargetContext(), ServiceType.IN_PROC, ModeType.CLIENT_SERVER,
"0.0.0.0", 0, QualityOfService.LOW);
OcPlatform.Configure(platformConfigObj);
Log.i(LOG_TAG, "Configuration done Successfully");
if (!m_REHelper.disocverResources(m_ErrorMsg)) {
assertTrue(
"Precondition Failed, No Resource Found!! " + m_ErrorMsg.toString(),
false);
} else {
m_Resource = m_REHelper.getFoundResourceList().get(0);
Log.i(LOG_TAG, m_ErrorMsg.toString());
}
m_ErrorMsg.setLength(0);
}
protected void tearDown() throws Exception {
super.tearDown();
m_REHelper.distroyResources();
if (m_Resource != null) {
m_Resource.destroy();
m_Resource = null;
}
Log.i(LOG_TAG, "tearDown called for REResourceBrokerTest");
}
/**
* @since 2015-11-03
* @see none
* @objective Test 'startMonitoring' function with positive basic way
* @target void startMonitoring(OnStateChangedListener listener)
* @test_data Callback function for receiving changed state
* @pre_condition Remote Resource Object should be instantialized
* @procedure 1. Perform startMonitoring() API 2. Check callback
* @post_condition None
* @expected No crash occurs
*/
public void testStartMonitoring_CV_P() {
try {
m_Resource.startMonitoring(m_REHelper.mOnStateChangedListener);
m_REHelper.waitInSecond(CALLBACK_WAIT_MAX);
if (REAPIHelper.g_IsStateChanged == false) {
fail("Callback is not received.");
}
} catch (RcsException e) {
fail("Unable to start monitoring. " + e.getLocalizedMessage());
}
}
/**
* @since 2016-03-01
* @see void stopMonitoring()
* @see boolean isMonitoring()
* @objective Test 'startMonitoring' function with Validation Loop Condition
* Check
* @target void startMonitoring(OnStateChangedListener listener)
* @test_data Callback function for receiving changed state
* @pre_condition Remote Resource Object should be instantialized
* @procedure 1. Perform startMonitoring() API 2. Wait for 5 seconds 3. Again
* perform startMonitoring() API
* @post_condition None
* @expected Should throw exception with message "Monitoring already started."
*/
public void testStartMonitoring_VLCC_N() {
try {
m_Resource.startMonitoring(m_REHelper.mOnStateChangedListener);
} catch (RcsException e) {
}
m_REHelper.waitInSecond(CALLBACK_WAIT_MAX);
try {
m_Resource.startMonitoring(m_REHelper.mOnStateChangedListener);
fail("Successfully called startMonitoring API twice.");
} catch (RcsException e) {
if (!e.getLocalizedMessage().contains("Monitoring already started")) {
fail("Can't get proper exception. Expected: Monitoring already started, Actual: "
+ e.getLocalizedMessage());
}
}
}
/**
* @since 2015-09-20
* @see void startMonitoring()
* @objective Test 'stopMonitoring' function with Sequential Validation
* @target void stopMonitoring()
* @test_data Callback function for receiving changed state
* @pre_condition Remote Resource Object should be instantialized
* @procedure 1. Perform startMonitoring() API 2. Perform stopMonitoring() API
* @post_condition None
* @expected No crash occurs
*/
public void testStopMonitoring_SQV_P() {
if (!m_REHelper.startMonitoring(m_ErrorMsg)) {
fail(m_ErrorMsg.toString());
}
if (!m_REHelper.stopMonitoring(m_ErrorMsg)) {
fail(m_ErrorMsg.toString());
}
}
/**
* @since 2015-11-03
* @see None
* @objective Test 'startMonitoring' function with Sequential Validation Check
* @target void startMonitoring(OnStateChangedListener listener)
* @target void stopMonitoring()
* @test_data callback function for receiving changed state
* @pre_condition Remote Resource Object should be instantialized
* @procedure 1. Perform startMonitoring() API with valid callback 2. Perform
* stopMonitoring() API
* @post_condition None
* @expected No crash occurs
**/
public void testStartMonitoring_SCV_P() {
try {
m_Resource.startMonitoring(m_REHelper.mOnStateChangedListener);
m_REHelper.waitInSecond(CALLBACK_WAIT_MIN);
m_Resource.stopMonitoring();
} catch (RcsException e) {
fail("Exception occurred inside testStartMonitoring_SCV_P: "
+ e.getLocalizedMessage());
}
}
/**
* @since 2016-03-01
* @see None
* @objective Test 'startMonitoring' function with Sequential Validation Check
* @target void startMonitoring(OnStateChangedListener listener)
* @target boolean isMonitoring()
* @target void stopMonitoring()
* @test_data callback function for receiving changed state
* @pre_condition Remote Resource Object should be instantialized
* @procedure 1. Perform startMonitoring() API with valid callback 2. Check
* Monitoring status with isMonitoring() API 3. Perform
* stopMonitoring() API 4. Check Monitoring status with
* isMonitoring() API again.
* @post_condition None
* @expected 1. Should return valid status of monitoring 2. No crash occurs
**/
public void testStartMonitoring_SQV_P() {
try {
m_Resource.startMonitoring(m_REHelper.mOnStateChangedListener);
m_REHelper.waitInSecond(CALLBACK_WAIT_MIN);
boolean isMonitoring = m_Resource.isMonitoring();
assertEquals("isMonitoring should true after startMonitoring.", true,
isMonitoring);
m_Resource.stopMonitoring();
isMonitoring = m_Resource.isMonitoring();
assertEquals("isMonitoring should false after stopMonitoring.", false,
isMonitoring);
} catch (RcsException e) {
fail("Exception occurred inside testStartMonitoring_SQV_P: "
+ e.getLocalizedMessage());
}
}
/**
* @since 2015-11-03
* @see void startMonitoring(ResourceStateChangedCallback callback)
* @objective Test 'stopMonitoring' function with terminate loop check
* @target void stopMonitoring()
* @test_data callback function for receiving changed state
* @pre_condition 1. Remote Resource Object should be instantialized 2.
* Perform startMonitoring() API 3. Perform stopMonitoring()
* API
* @procedure Perform stopMonitoring() API
* @post_condition None
* @expected No crash occurs
**/
public void testStopMonitoringTwice_DSCC_ITLC_P() {
try {
m_Resource.startMonitoring(m_REHelper.mOnStateChangedListener);
m_REHelper.waitInSecond(CALLBACK_WAIT_MIN);
m_Resource.stopMonitoring();
m_REHelper.waitInSecond(CALLBACK_WAIT_ONE);
m_Resource.stopMonitoring();
} catch (RcsException e) {
fail("Exception occurred inside StopMonitoringTwice_P: "
+ e.getLocalizedMessage());
}
}
/**
* @since 2015-11-03
* @see void startMonitoring(ResourceStateChangedCallback callback)
* @objective Test 'stopMonitoring' function with Initial/Terminate Loop Check
* @target void stopMonitoring()
* @test_data callback function for receiving changed state
* @pre_condition 1. Remote Resource Object should be instantialized 2.
* Perform startMonitoring() API 3. Perform stopMonitoring()
* API
* @procedure Perform stopMonitoring() API 10 times
* @post_condition None
* @expected No crash occurs
**/
public void testStopMonitoring_ITLC_P() {
try {
m_Resource.startMonitoring(m_REHelper.mOnStateChangedListener);
m_Resource.stopMonitoring();
} catch (RcsException e) {
assertTrue(
"Throws exception when calling startMonitoring() and stopMonitoring() API",
false);
}
for (int i = 0; i < m_count; i++) {
try {
m_Resource.stopMonitoring();
} catch (RcsException e) {
assertTrue(
"Fail to stop monitoring when calling stopMonitoring() in a loop.",
false);
}
}
}
/**
* @since 2015-12-31
* @see None
* @objective Test 'stopMonitoring' function without performing
* startMonitoring()
* @target void stopMonitoring()
* @test_data None
* @pre_condition Remote Resource Object should be instantialized
* @procedure Perform stopMonitoring() API
* @post_condition None
* @expected No crash occurs
**/
public void testStopMonitoring_DSCC_N() {
try {
m_Resource.stopMonitoring();
} catch (RcsException e) {
fail("Exception occurred inside testStopMonitoring_DSCC_P: "
+ e.getLocalizedMessage());
}
}
/**
* @since 2015-12-31
* @see void startMonitoring(ResourceStateChangedCallback callback)
* @see void stopMonitoring()
* @objective Test 'getState' function with Sequential validation
* @target ResourceState getState()
* @test_data callback function for receiving changed state
* @pre_condition Remote Resource Object should be instantialized
* @procedure 1. Perform getState() API. 2. Check the ResourceState. 3.
* Perform startMonitoring() API. 4. Perform getState() API. 5.
* Check the ResourceState.
* @post_condition None
* @expected 1. No exception occurs 2. getState should return NONE before
* startMonitoring 3. getState should return REQUESTED after
* startMonitoring
**/
public void testGetState_SQV_P() {
try {
ResourceState state = m_Resource.getState();
if (state != ResourceState.NONE) {
assertTrue("Resource should return NONE, but actual is " + state, false);
}
m_Resource.startMonitoring(m_REHelper.mOnStateChangedListener);
state = m_Resource.getState();
if (state != ResourceState.REQUESTED) {
assertTrue("Resource should return REQUESTED, but actual is " + state,
false);
}
} catch (RcsException e) {
fail("Exception occurs at testGetState_STCC_P: "
+ e.getLocalizedMessage());
}
}
/**
* @since 2015-12-31
* @see None
* @objective Test 'getState' function with State Check Validation
* @target ResourceState getState()
* @test_data callback function for receiving changed state
* @pre_condition Remote Resource Object should be instantialized
* @procedure 1. Perform getState() API. 2. Check the ResourceState. 3.
* Perform startMonitoring() API. 4. Check the ResourceState. 5.
* Wait for 5 seconds. 6. Perform getState() API. 7. Check the
* ResourceState.
* @post_condition None
* @expected 1. No exception occurs 2. getState should return ALIVE after
* startMonitoring
**/
public void testGetState_SCV_P() {
try {
ResourceState state = m_Resource.getState();
if (state != ResourceState.NONE) {
assertTrue("Resource should return NONE, but actual is " + state, false);
}
m_Resource.startMonitoring(m_REHelper.mOnStateChangedListener);
state = m_Resource.getState();
if (state != ResourceState.REQUESTED) {
assertTrue("Resource should return REQUESTED, but actual is " + state,
false);
}
m_REHelper.waitInSecond(CALLBACK_WAIT_MAX);
state = m_Resource.getState();
if (state != ResourceState.ALIVE) {
assertTrue("Resource should return ALIVE, but actual is " + state,
false);
}
} catch (RcsException e) {
fail("Exception occurs at testGetState_SCV_P: " + e.getLocalizedMessage());
}
}
/**
* @since 2017-03-02
* @see void stopMonitoring()
* @see boolean startMonitoring()
* @objective Test 'isObservable' function with positive basic way
* @target boolean isObservable()
* @test_data None
* @pre_condition Remote Resource Object should be instantialized
* @procedure 1. Perform stopMonitoring() API
* 2. Perform startMonitoring() API
* 3. Check the value of isObservable()
* @post_condition None
* @expected No crash should occur
*/
public void testIsObseravable_SCV_P() {
try {
m_Resource.stopMonitoring();
m_Resource.startMonitoring(m_REHelper.mOnStateChangedListener);
} catch (RcsException e) {
fail("Exception occurs at testIsObseravable_SCV_P: " + e.getLocalizedMessage());
}
m_REHelper.waitInSecond(CALLBACK_WAIT_MAX);
boolean isObservable = true;
try {
isObservable= m_Resource.isObservable();
} catch (RcsException e) {
fail("Should not throw exception when calling isObservable API.");
}
if (!isObservable) {
fail("isObservable() is false after startMonitoring.");
}
}
/**
* @since 2017-03-02
* @see void stopMonitoring()
* @see boolean startMonitoring()
* @objective Test 'isObservable' function with negative basic way
* @target boolean isObservable()
* @test_data None
* @pre_condition Remote Resource Object should be instantialized
* @procedure 1. Check the value of isMonitoring()
* 2. Perform startMonitoring()
* 3. Check the value of isObservable()
* @post_condition Perform stopMonitoring()
* @expected 1. isObservable() should return True
* 2. No crash occurs
*/
public void testIsObseravable_USTC_N() {
boolean isObservable = false;
try {
isObservable = m_Resource.isMonitoring();
if (isObservable) {
fail("isMonitoring() is true before startMonitoring.");
}
m_Resource.startMonitoring(m_REHelper.mOnStateChangedListener);
} catch (RcsException e) {
fail("Exception occurs at testIsObseravable_USTC_N: " + e.getLocalizedMessage());
}
m_REHelper.waitInSecond(CALLBACK_WAIT_MAX);
try {
isObservable = m_Resource.isObservable();
} catch (RcsException e) {
fail("Should not throw exception when calling isObservable API.");
}
if (!isObservable) {
fail("isObservable() is false after startMonitoring.");
}
try {
m_Resource.stopMonitoring();
} catch (RcsException e) {
fail("Should not throw exception when calling stopMonitoring API.");
}
}
}
| 5,839 |
892 | {
"schema_version": "1.2.0",
"id": "GHSA-6r8v-ww3c-grvh",
"modified": "2022-04-23T00:03:22Z",
"published": "2022-04-16T00:00:53Z",
"aliases": [
"CVE-2022-26034"
],
"details": "Improper authentication vulnerability in the communication protocol provided by AD (Automation Design) server of CENTUM VP R6.01.10 to R6.09.00, CENTUM VP Small R6.01.10 to R6.09.00, CENTUM VP Basic R6.01.10 to R6.09.00, and B/M9000 VP R8.01.01 to R8.03.01 allows an attacker to use the functions provided by AD server. This may lead to leakage or tampering of data managed by AD server.",
"severity": [
{
"type": "CVSS_V3",
"score": "CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:H/I:H/A:N"
}
],
"affected": [
],
"references": [
{
"type": "ADVISORY",
"url": "https://nvd.nist.gov/vuln/detail/CVE-2022-26034"
},
{
"type": "WEB",
"url": "https://jvn.jp/vu/JVNVU99204686/index.html"
},
{
"type": "WEB",
"url": "https://www.yokogawa.com/library/resources/white-papers/yokogawa-security-advisory-report-list/"
}
],
"database_specific": {
"cwe_ids": [
"CWE-287"
],
"severity": "CRITICAL",
"github_reviewed": false
}
} | 570 |
405 | <reponame>macharmi/workalendar
from datetime import date
from ..core import MON, WesternCalendar
from ..registry_tools import iso_register
@iso_register('GG')
class Guernsey(WesternCalendar):
'Guernsey'
include_easter_monday = True
include_boxing_day = True
shift_new_years_day = True
include_good_friday = True
def get_spring_bank_holiday(self, year):
spring_bank_holiday = Guernsey \
.get_last_weekday_in_month(year, 5, MON)
return (
spring_bank_holiday,
"Spring Bank Holiday"
)
def get_early_may_bank_holiday(self, year):
"""
Return Early May bank holiday
"""
# Special case in 2020, for the 75th anniversary of the end of WWII.
if year == 2020:
return (
date(year, 5, 8),
"Early May bank holiday (VE day)"
)
return (
Guernsey.get_nth_weekday_in_month(year, 5, MON),
"Early May Bank Holiday"
)
def get_summer_bank_holiday(self, year):
return (
Guernsey.get_last_weekday_in_month(year, 8, MON),
"Summer Bank Holiday"
)
def get_liberation_day(self, year):
return (date(year, 5, 9), "Liberation Day")
def get_variable_days(self, year):
days = super().get_variable_days(year)
days.append(self.get_early_may_bank_holiday(year))
days.append(self.get_spring_bank_holiday(year))
days.append(self.get_summer_bank_holiday(year))
days.append(self.get_liberation_day(year))
# Boxing day & XMas shift
shifts = self.shift_christmas_boxing_days(year=year)
days.extend(shifts)
return days
| 798 |
407 | package com.alibaba.tesla.tkgone.server.common;
import lombok.Data;
import java.util.ArrayList;
import java.util.List;
@Data
public class Range {
int start = 0;
int stop = 1;
int step = 1;
public Range(int start, int stop, int step) {
this.start = start;
this.stop = stop;
this.step = step;
}
public Range(int start, int stop) {
this(start, stop, 1);
}
public Range(int stop) {
this(0, stop);
}
public List<Integer> toList() {
List<Integer> list = new ArrayList<>();
for (int index = start; index < stop; index+=step) {
list.add(index);
}
return list;
}
}
| 312 |
1,425 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.tinkerpop.gremlin.process.computer.clustering.peerpressure;
import org.apache.tinkerpop.gremlin.process.computer.KeyValue;
import org.apache.tinkerpop.gremlin.process.computer.MapReduce;
import org.apache.tinkerpop.gremlin.process.computer.util.StaticMapReduce;
import org.apache.tinkerpop.gremlin.structure.Graph;
import org.apache.tinkerpop.gremlin.structure.Property;
import org.apache.tinkerpop.gremlin.structure.Vertex;
import org.apache.tinkerpop.gremlin.structure.util.StringFactory;
import org.apache.commons.configuration2.Configuration;
import java.io.Serializable;
import java.util.HashSet;
import java.util.Iterator;
import java.util.Set;
/**
* @author <NAME> (http://markorodriguez.com)
*/
public class ClusterCountMapReduce extends StaticMapReduce<MapReduce.NullObject, Serializable, MapReduce.NullObject, Integer, Integer> {
public static final String CLUSTER_COUNT_MEMORY_KEY = "gremlin.clusterCountMapReduce.memoryKey";
public static final String DEFAULT_MEMORY_KEY = "clusterCount";
private String memoryKey = DEFAULT_MEMORY_KEY;
private ClusterCountMapReduce() {
}
private ClusterCountMapReduce(final String memoryKey) {
this.memoryKey = memoryKey;
}
@Override
public void storeState(final Configuration configuration) {
super.storeState(configuration);
configuration.setProperty(CLUSTER_COUNT_MEMORY_KEY, this.memoryKey);
}
@Override
public void loadState(final Graph graph, final Configuration configuration) {
this.memoryKey = configuration.getString(CLUSTER_COUNT_MEMORY_KEY, DEFAULT_MEMORY_KEY);
}
@Override
public boolean doStage(final Stage stage) {
return !stage.equals(Stage.COMBINE);
}
@Override
public void map(final Vertex vertex, final MapEmitter<NullObject, Serializable> emitter) {
final Property<Serializable> cluster = vertex.property(PeerPressureVertexProgram.CLUSTER);
if (cluster.isPresent()) {
emitter.emit(NullObject.instance(), cluster.value());
}
}
@Override
public void reduce(final NullObject key, final Iterator<Serializable> values, final ReduceEmitter<NullObject, Integer> emitter) {
final Set<Serializable> set = new HashSet<>();
values.forEachRemaining(set::add);
emitter.emit(NullObject.instance(), set.size());
}
@Override
public Integer generateFinalResult(final Iterator<KeyValue<NullObject, Integer>> keyValues) {
return keyValues.next().getValue();
}
@Override
public String getMemoryKey() {
return this.memoryKey;
}
@Override
public String toString() {
return StringFactory.mapReduceString(this, this.memoryKey);
}
//////////////////////////////
public static Builder build() {
return new Builder();
}
public final static class Builder {
private String memoryKey = DEFAULT_MEMORY_KEY;
private Builder() {
}
public Builder memoryKey(final String memoryKey) {
this.memoryKey = memoryKey;
return this;
}
public ClusterCountMapReduce create() {
return new ClusterCountMapReduce(this.memoryKey);
}
}
} | 1,365 |
536 | <gh_stars>100-1000
package com.ctrip.ops.sysdev.cmd;
import com.ctrip.ops.sysdev.baseplugin.BaseFilter;
import com.ctrip.ops.sysdev.baseplugin.BaseInput;
import com.ctrip.ops.sysdev.baseplugin.BaseOutput;
import lombok.extern.log4j.Log4j2;
import java.lang.reflect.Constructor;
import java.util.*;
@Log4j2
public class TopologyBuilder {
private final List<HashMap<String, Map>> inputConfigs;
private final List<HashMap<String, Map>> filterConfigs;
private final List<HashMap<String, Map>> outputConfigs;
public TopologyBuilder(List<HashMap<String, Map>> inputConfigs,
List<HashMap<String, Map>> filterConfigs,
List<HashMap<String, Map>> outputConfigs) {
this.inputConfigs = inputConfigs;
this.filterConfigs = filterConfigs;
this.outputConfigs = outputConfigs;
}
private List<BaseInput> buildInputs() {
List<BaseInput> inputs = new ArrayList<BaseInput>(inputConfigs.size());
inputConfigs.forEach(
input -> {
input.forEach((inputType, inputConfig) -> {
log.info("begin to build input " + inputType);
Class<?> inputClass = null;
List<String> classNames = Arrays.asList("com.ctrip.ops.sysdev.inputs." + inputType, inputType);
boolean tryCtrip = true;
for (String className : classNames) {
try {
inputClass = Class.forName(className);
Constructor<?> ctor = inputClass.getConstructor(
Map.class,
ArrayList.class,
ArrayList.class);
BaseInput inputInstance = (BaseInput) ctor.newInstance(
inputConfig,
filterConfigs,
outputConfigs);
log.info("build input " + inputType + " done");
inputs.add(inputInstance);
break;
} catch (ClassNotFoundException e) {
if (tryCtrip == true) {
log.info("maybe a third party input plugin. try to build " + inputType);
tryCtrip = false;
continue;
} else {
log.error(e);
System.exit(1);
}
} catch (Exception e) {
log.error(e);
System.exit(1);
}
}
});
});
return inputs;
}
private List<BaseFilter> buildFilters() {
if (filterConfigs == null) {
return new ArrayList<>(0);
}
final List<BaseFilter> filterProcessors = new ArrayList(filterConfigs.size());
filterConfigs.stream().forEach((Map filterMap) -> {
filterMap.entrySet().stream().forEach(entry -> {
Map.Entry<String, Map> filter = (Map.Entry<String, Map>) entry;
String filterType = filter.getKey();
Map filterConfig = filter.getValue();
log.info("begin to build filter " + filterType);
Class<?> filterClass;
Constructor<?> ctor = null;
List<String> classNames = Arrays.asList("com.ctrip.ops.sysdev.filters." + filterType, filterType);
boolean tryCtrip = true;
for (String className : classNames) {
try {
filterClass = Class.forName(className);
ctor = filterClass.getConstructor(Map.class);
log.info("build filter " + filterType + " done");
filterProcessors.add((BaseFilter) ctor.newInstance(filterConfig));
break;
} catch (ClassNotFoundException e) {
if (tryCtrip == true) {
log.info("maybe a third party output plugin. try to build " + filterType);
tryCtrip = false;
continue;
} else {
log.error(e);
System.exit(1);
}
} catch (Exception e) {
e.printStackTrace();
System.exit(1);
}
}
});
});
return filterProcessors;
}
private List<BaseOutput> buildOutputs() {
if (outputConfigs == null) {
log.error("Error: At least One output should be set.");
System.exit(1);
}
final List<BaseOutput> outputProcessors = new ArrayList<BaseOutput>(outputConfigs.size());
outputConfigs.stream().forEach((Map outputMap) -> {
outputMap.entrySet().stream().forEach(entry -> {
Map.Entry<String, Map> output = (Map.Entry<String, Map>) entry;
String outputType = output.getKey();
Map outputConfig = output.getValue();
log.info("begin to build output " + outputType);
Class<?> outputClass;
Constructor<?> ctor = null;
List<String> classNames = Arrays.asList("com.ctrip.ops.sysdev.outputs." + outputType, outputType);
boolean tryCtrip = true;
for (String className : classNames) {
try {
outputClass = Class.forName(className);
ctor = outputClass.getConstructor(Map.class);
log.info("build output " + outputType + " done");
outputProcessors.add((BaseOutput) ctor.newInstance(outputConfig));
break;
} catch (ClassNotFoundException e) {
if (tryCtrip == true) {
log.info("maybe a third party output plugin. try to build " + outputType);
tryCtrip = false;
continue;
} else {
log.error(e);
System.exit(1);
}
} catch (Exception e) {
e.printStackTrace();
System.exit(1);
}
}
});
});
return outputProcessors;
}
private void setDestToInput(BaseInput input, List<BaseFilter> filters, List<BaseOutput> outputs) {
if (filters.size() != 0) {
input.nextFilter = filters.get(0);
} else {
input.outputs.addAll(outputs);
}
}
private void setDestToFilter(BaseFilter filter, int i, List<BaseFilter> filters, List<BaseOutput> outputs) {
if (filters.size() == i + 1) {
filter.outputs.addAll(outputs);
} else {
filter.nextFilter = filters.get(i + 1);
}
}
public List<BaseInput> buildTopology() {
List<BaseInput> inputs = this.buildInputs();
List<BaseFilter> filters = this.buildFilters();
List<BaseOutput> outputs = this.buildOutputs();
for (BaseInput input :
inputs
) {
setDestToInput(input, filters, outputs);
}
for (int i = 0, size = filters.size(); i < size; i++) {
setDestToFilter(filters.get(i), i, filters, outputs);
}
return inputs;
}
}
| 4,416 |
348 | <gh_stars>100-1000
{"nom":"<NAME>","circ":"2ème circonscription","dpt":"Var","inscrits":16399,"abs":9021,"votants":7378,"blancs":498,"nuls":144,"exp":6736,"res":[{"nuance":"REM","nom":"Mme <NAME>","voix":3731},{"nuance":"FN","nom":"<NAME>","voix":3005}]} | 104 |
474 | package org.javacord.api.listener.server;
import org.javacord.api.event.server.ServerChangeMultiFactorAuthenticationLevelEvent;
import org.javacord.api.listener.GloballyAttachableListener;
import org.javacord.api.listener.ObjectAttachableListener;
/**
* This listener listens to server multi factor authentication level changes.
*/
@FunctionalInterface
public interface ServerChangeMultiFactorAuthenticationLevelListener extends ServerAttachableListener,
GloballyAttachableListener,
ObjectAttachableListener {
/**
* This method is called every time a server's multi factor authentication level changed.
*
* @param event The event.
*/
void onServerChangeMultiFactorAuthenticationLevel(ServerChangeMultiFactorAuthenticationLevelEvent event);
}
| 224 |
1,435 | <reponame>dupontz/libcloud
{"status":"ACTIVE","region":"SBG1","name":"test_vm","image":{"visibility":"public","status":"active","region":"SBG1","name":"Ubuntu12.04","minDisk":0,"size":2.19921875,"creationDate":"2016-06-01T07:39:56Z","minRam":0,"user":"ubuntu","id":"3031ed24-8337-4b09-94b5-e51c54bec6c8","type":"linux"},"created":"2016-10-05T19:17:14Z","sshKey":{"fingerPrint":"7d:25:ec:f9:53:91:95:13:45:f9:73:a1:33:f0:00:00","regions":["SBG1"],"name":"testkey","id":"<KEY>","publicKey":"<KEY>},"monthlyBilling":null,"id":"cc350b4a-b04b-41d2-959d-1f8f388877a2","ipAddresses":[{"networkId":"","version":4,"ip":"192.168.127.12","type":"public"}],"flavor":{"outboundBandwidth":102,"disk":10,"region":"SBG1","name":"vps-ssd-1","inboundBandwidth":102,"id":"98c1e679-5f2c-4069-b4da-4a4f7179b758","vcpus":1,"type":"ovh.vps-ssd","osType":"linux","ram":2000}}
| 363 |
653 | <gh_stars>100-1000
package se.michaelthelin.spotify.enums;
import java.util.HashMap;
import java.util.Map;
/**
* An enumeration of all possible Spotify product types.
*/
public enum ProductType {
BASIC_DESKTOP("basic-desktop"),
DAYPASS("daypass"),
FREE("free"),
OPEN("open"),
PREMIUM("premium");
private static final Map<String, ProductType> map = new HashMap<>();
static {
for (ProductType productType : ProductType.values()) {
map.put(productType.type, productType);
}
}
public final String type;
ProductType(final String type) {
this.type = type;
}
public static ProductType keyOf(String type) {
return map.get(type);
}
/**
* Get the Spotify product type as a string.
*
* @return The Spotify product type as a string.
*/
public String getType() {
return type;
}
}
| 290 |
14,668 | <filename>chrome/test/data/extensions/js_injection_background/manifest.json
{
"name": "js_injection_background",
"description": "Tests JS injection into an extension's background page.
The name of a DOM node in the background page is returned and verified.",
"version": "0.1",
"manifest_version": 2,
"background": {
"page": "bg.html"
},
"browser_action": {
"default_title": "Browser Action"
}
}
| 142 |
778 | /*
* Copyright (C) 2021 Intel Corporation
*
* SPDX-License-Identifier: MIT
*
*/
#include "shared/source/helpers/engine_node_helper.h"
#include "test.h"
using namespace NEO;
TEST(EngineNodeHelperTests, givenValidEngineUsageWhenGettingStringRepresentationThenItIsCorrect) {
EXPECT_EQ(std::string{"Regular"}, EngineHelpers::engineUsageToString(EngineUsage::Regular));
EXPECT_EQ(std::string{"Internal"}, EngineHelpers::engineUsageToString(EngineUsage::Internal));
EXPECT_EQ(std::string{"LowPriority"}, EngineHelpers::engineUsageToString(EngineUsage::LowPriority));
EXPECT_EQ(std::string{"Cooperative"}, EngineHelpers::engineUsageToString(EngineUsage::Cooperative));
}
TEST(EngineNodeHelperTests, givenInValidEngineUsageWhenGettingStringRepresentationThenReturnUnknown) {
EXPECT_EQ(std::string{"Unknown"}, EngineHelpers::engineUsageToString(EngineUsage::EngineUsageCount));
EXPECT_EQ(std::string{"Unknown"}, EngineHelpers::engineUsageToString(static_cast<EngineUsage>(0xcc)));
}
TEST(EngineNodeHelperTests, givenValidEngineTypeWhenGettingStringRepresentationThenItIsCorrect) {
#define CHECK_ENGINE(type) EXPECT_EQ(std::string{#type}, EngineHelpers::engineTypeToString(aub_stream::EngineType::ENGINE_##type))
CHECK_ENGINE(RCS);
CHECK_ENGINE(BCS);
CHECK_ENGINE(VCS);
CHECK_ENGINE(VECS);
CHECK_ENGINE(CCS);
CHECK_ENGINE(CCS1);
CHECK_ENGINE(CCS2);
CHECK_ENGINE(CCS3);
#undef CHECK_ENGINE
}
TEST(EngineNodeHelperTests, givenCcsEngineWhenHelperIsUsedThenReturnTrue) {
EXPECT_TRUE(EngineHelpers::isCcs(aub_stream::EngineType::ENGINE_CCS));
EXPECT_TRUE(EngineHelpers::isCcs(aub_stream::EngineType::ENGINE_CCS1));
EXPECT_TRUE(EngineHelpers::isCcs(aub_stream::EngineType::ENGINE_CCS2));
EXPECT_TRUE(EngineHelpers::isCcs(aub_stream::EngineType::ENGINE_CCS3));
EXPECT_FALSE(EngineHelpers::isCcs(aub_stream::EngineType::ENGINE_RCS));
EXPECT_FALSE(EngineHelpers::isCcs(aub_stream::EngineType::NUM_ENGINES));
}
TEST(EngineNodeHelperTests, givenInvalidEngineTypeWhenGettingStringRepresentationThenItIsCorrect) {
EXPECT_EQ(std::string{"Unknown"}, EngineHelpers::engineTypeToString(aub_stream::EngineType::NUM_ENGINES));
EXPECT_EQ(std::string{"Unknown"}, EngineHelpers::engineTypeToString(static_cast<aub_stream::EngineType>(0xcc)));
}
| 837 |
1,127 | // Copyright (C) 2018-2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "shared_test_classes/subgraph/matmul_multiply_fusion.hpp"
namespace SubgraphTestsDefinitions {
TEST_P(MatMulMultiplyFusion, CompareWithRefs) {
Run();
}
TEST_P(QuantizedMatMulMultiplyFusion, CompareWithRefs) {
Run();
}
} // namespace SubgraphTestsDefinitions
| 147 |
5,169 | {
"name": "IRCrypto",
"version": "0.9.1",
"summary": "iOS Crypto library: Provides Symmetric and Asymmetric Encryption as well as AEAD using RNCryptor Data Format v3",
"description": "IRCrypto aims to provide the following:\n\nCryptographic Key Derivation (using PBKDF2 with 10,000 rounds) from a String\nSymmetric AES Key generation (128, 256, 512 bits)\nAsymmetric RSA Key generation (1024, 2048 and 4096 bits)\nAsymmetric EC Key generation (256, 384 and 521 bits) (Read More)\nData hashing (using SHA256)\nData signing (using HMAC - SHA256 + 256 bit key)\nSymmetric Encryption (AES in CBC mode)\nAsymmetric Encryption (RSA)\nAuthenticated Encryption using the Encrypt-then-MAC scheme (AES in CBC mode and HMAC) [RNCryptor Data Format v4.0]\nPublic Key Encryption with Authenticated Encryption (RSA + AES in CBC mode and HMAC) [RNCryptor Data Format v4.0]\nYou can also:\n\nSave the generated keys (AES, RSA, EC) in the Keychain and protect them with TouchID and/or a user generated password\nGenerate an asymmetric key pair (RSA) for signing data, where the private key will never be returned and just used for singning directly from the Secure Enclave",
"homepage": "https://github.com/ivRodriguezCA/IRCrypto",
"license": {
"type": "MIT",
"file": "MIT-LICENSE.txt"
},
"authors": {
"<NAME>": "<EMAIL>"
},
"social_media_url": "http://twitter.com/ivRodriguezCA",
"platforms": {
"ios": "9.0"
},
"source": {
"git": "https://github.com/ivRodriguezCA/IRCrypto.git",
"tag": "v0.9.1"
},
"source_files": "IRCrypto/**/*.{h,m}",
"public_header_files": [
"IRCrypto/IRCryptoHeader.h",
"IRCrypto/IRCrypto.h",
"IRCrypto/IRPublicConstants.h"
]
}
| 595 |
1,056 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.netbeans.modules.j2ee.weblogic9.dd.model;
import java.io.BufferedInputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.util.regex.Pattern;
import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.DocumentBuilderFactory;
import javax.xml.parsers.ParserConfigurationException;
import org.netbeans.api.annotations.common.NullAllowed;
import org.netbeans.modules.j2ee.deployment.common.api.Version;
import org.netbeans.modules.schema2beans.AttrProp;
import org.netbeans.modules.schema2beans.NullEntityResolver;
import org.openide.util.NbBundle;
import org.w3c.dom.Document;
import org.xml.sax.SAXException;
/**
*
* @author <NAME>
*/
public final class EarApplicationModel extends BaseDescriptorModel {
private static final Pattern SCHEMA_1031 = Pattern.compile("http://xmlns\\.oracle\\.com/weblogic/weblogic-application/1\\.[0-3]/weblogic-application\\.xsd"); // NOI18N
private static final Pattern SCHEMA_1211 = Pattern.compile("http://xmlns\\.oracle\\.com/weblogic/weblogic-application/1\\.[4-6]/weblogic-application\\.xsd"); // NOI18N
private static final Pattern SCHEMA_1221 = Pattern.compile("http://xmlns\\.oracle\\.com/weblogic/weblogic-application/1\\.[7]/weblogic-application\\.xsd"); // NOI18N
private final WeblogicApplication bean;
private EarApplicationModel(WeblogicApplication bean) {
super(bean);
this.bean = bean;
}
public static EarApplicationModel forFile(File file) throws IOException {
InputStream is = new BufferedInputStream(new FileInputStream(file));
try {
return forInputStream(is);
} finally {
is.close();
}
}
public static EarApplicationModel forInputStream(InputStream is) throws IOException {
DocumentBuilderFactory factory = DocumentBuilderFactory.newInstance();
factory.setNamespaceAware(true);
factory.setValidating(false);
Document doc;
try {
DocumentBuilder builder = factory.newDocumentBuilder();
builder.setEntityResolver(NullEntityResolver.newInstance());
doc = builder.parse(is);
} catch (SAXException ex) {
throw new RuntimeException(NbBundle.getMessage(EarApplicationModel.class, "MSG_CantCreateXMLDOMDocument"), ex);
} catch (ParserConfigurationException ex) {
throw new RuntimeException(NbBundle.getMessage(EarApplicationModel.class, "MSG_CantCreateXMLDOMDocument"), ex);
}
String ns = doc.getDocumentElement().getNamespaceURI();
if ("http://xmlns.oracle.com/weblogic/weblogic-application".equals(ns)) { // NOI18N
String value = doc.getDocumentElement().getAttributeNS("http://www.w3.org/2001/XMLSchema-instance", "schemaLocation"); // NOI18N
if (SCHEMA_1031.matcher(value).matches()) {
return new EarApplicationModel(org.netbeans.modules.j2ee.weblogic9.dd.ear1031.WeblogicApplication.createGraph(doc));
} else if (SCHEMA_1211.matcher(value).matches()) {
return new EarApplicationModel(org.netbeans.modules.j2ee.weblogic9.dd.ear1211.WeblogicApplication.createGraph(doc));
} else if (SCHEMA_1221.matcher(value).matches()) {
return new EarApplicationModel(org.netbeans.modules.j2ee.weblogic9.dd.ear1221.WeblogicApplication.createGraph(doc));
} else {
return new EarApplicationModel(org.netbeans.modules.j2ee.weblogic9.dd.ear1221.WeblogicApplication.createGraph(doc));
}
} else if ("http://www.bea.com/ns/weblogic/weblogic-application".equals(ns)) { // NOI18N
return new EarApplicationModel(org.netbeans.modules.j2ee.weblogic9.dd.ear1030.WeblogicApplication.createGraph(doc));
}
return new EarApplicationModel(org.netbeans.modules.j2ee.weblogic9.dd.ear90.WeblogicApplication.createGraph(doc));
}
public static EarApplicationModel generate(@NullAllowed Version serverVersion) {
if (serverVersion != null) {
if (serverVersion.isAboveOrEqual(VERSION_12_2_1)) {
return generate1221();
} else if (serverVersion.isAboveOrEqual(VERSION_12_1_1)) {
return generate1211();
} else if (serverVersion.isAboveOrEqual(VERSION_10_3_1)) {
return generate1031();
} else if (serverVersion.isAboveOrEqual(VERSION_10_3_0)) {
return generate1030();
}
}
return generate90();
}
private static EarApplicationModel generate90() {
org.netbeans.modules.j2ee.weblogic9.dd.ear90.WeblogicApplication webLogicApplication = new org.netbeans.modules.j2ee.weblogic9.dd.ear90.WeblogicApplication();
webLogicApplication.createAttribute("xmlns:j2ee", "xmlns:j2ee", AttrProp.CDATA | AttrProp.IMPLIED, null, null); // NOI18N
webLogicApplication.setAttributeValue("xmlns:j2ee", "http://java.sun.com/xml/ns/j2ee"); // NOI18N
webLogicApplication.setAttributeValue("xmlns:xsi", "http://www.w3.org/2001/XMLSchema-instance"); // NOI18N
webLogicApplication.setAttributeValue("xsi:schemaLocation", "http://www.bea.com/ns/weblogic/90 http://www.bea.com/ns/weblogic/90/weblogic-application.xsd"); // NOI18N
return new EarApplicationModel(webLogicApplication);
}
private static EarApplicationModel generate1030() {
org.netbeans.modules.j2ee.weblogic9.dd.ear1030.WeblogicApplication webLogicApplication = new org.netbeans.modules.j2ee.weblogic9.dd.ear1030.WeblogicApplication();
webLogicApplication.setAttributeValue("xmlns:xsi", "http://www.w3.org/2001/XMLSchema-instance"); // NOI18N
webLogicApplication.setAttributeValue("xsi:schemaLocation", "http://www.bea.com/ns/weblogic/weblogic-application http://www.bea.com/ns/weblogic/weblogic-application/1.0/weblogic-application.xsd"); // NOI18N
return new EarApplicationModel(webLogicApplication);
}
private static EarApplicationModel generate1031() {
org.netbeans.modules.j2ee.weblogic9.dd.ear1031.WeblogicApplication webLogicApplication = new org.netbeans.modules.j2ee.weblogic9.dd.ear1031.WeblogicApplication();
webLogicApplication.setAttributeValue("xmlns:xsi", "http://www.w3.org/2001/XMLSchema-instance"); // NOI18N
webLogicApplication.setAttributeValue("xsi:schemaLocation", "http://java.sun.com/xml/ns/javaee http://java.sun.com/xml/ns/javaee/javaee_5.xsd http://xmlns.oracle.com/weblogic/weblogic-application http://xmlns.oracle.com/weblogic/weblogic-application/1.0/weblogic-application.xsd"); // NOI18N
return new EarApplicationModel(webLogicApplication);
}
private static EarApplicationModel generate1211() {
org.netbeans.modules.j2ee.weblogic9.dd.ear1211.WeblogicApplication webLogicApplication = new org.netbeans.modules.j2ee.weblogic9.dd.ear1211.WeblogicApplication();
webLogicApplication.setAttributeValue("xmlns:xsi", "http://www.w3.org/2001/XMLSchema-instance"); // NOI18N
webLogicApplication.setAttributeValue("xsi:schemaLocation", "http://java.sun.com/xml/ns/javaee http://java.sun.com/xml/ns/javaee/javaee_5.xsd http://xmlns.oracle.com/weblogic/weblogic-application http://xmlns.oracle.com/weblogic/weblogic-application/1.4/weblogic-application.xsd"); // NOI18N
return new EarApplicationModel(webLogicApplication);
}
private static EarApplicationModel generate1221() {
org.netbeans.modules.j2ee.weblogic9.dd.ear1221.WeblogicApplication webLogicApplication = new org.netbeans.modules.j2ee.weblogic9.dd.ear1221.WeblogicApplication();
webLogicApplication.setAttributeValue("xmlns:xsi", "http://www.w3.org/2001/XMLSchema-instance"); // NOI18N
webLogicApplication.setAttributeValue("xsi:schemaLocation", "http://java.sun.com/xml/ns/javaee http://java.sun.com/xml/ns/javaee/javaee_5.xsd http://xmlns.oracle.com/weblogic/weblogic-application http://xmlns.oracle.com/weblogic/weblogic-application/1.7/weblogic-application.xsd"); // NOI18N
return new EarApplicationModel(webLogicApplication);
}
}
| 3,439 |
2,151 | <filename>extensions/browser/extension_icon_placeholder.h<gh_stars>1000+
// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef EXTENSIONS_BROWSER_EXTENSION_ICON_PLACEHOLDER_H_
#define EXTENSIONS_BROWSER_EXTENSION_ICON_PLACEHOLDER_H_
#include <string>
#include "base/macros.h"
#include "base/strings/string16.h"
#include "extensions/common/constants.h"
#include "ui/gfx/image/canvas_image_source.h"
#include "ui/gfx/image/image.h"
namespace gfx {
class Canvas;
}
namespace extensions {
// An extension icon image with a gray background and the first letter of the
// extension name, so that not all extensions without an icon look the same.
class ExtensionIconPlaceholder : public gfx::CanvasImageSource {
public:
ExtensionIconPlaceholder(extension_misc::ExtensionIcons size,
const std::string& name);
~ExtensionIconPlaceholder() override;
// Creates an image backed by an ImageSkia with the ExtensionIconPlaceholder
// as its image source.
static gfx::Image CreateImage(extension_misc::ExtensionIcons size,
const std::string& name);
private:
// gfx::CanvasImageSource:
void Draw(gfx::Canvas* canvas) override;
// The size this placeholder is filling.
extension_misc::ExtensionIcons icon_size_;
// The first letter of the extension's name.
base::string16 letter_;
// The gray background image, on top of which the letter is drawn.
gfx::Image base_image_;
DISALLOW_COPY_AND_ASSIGN(ExtensionIconPlaceholder);
};
} // namespace extensions
#endif // EXTENSIONS_BROWSER_EXTENSION_ICON_PLACEHOLDER_H_
| 587 |
920 | <reponame>slikos/espresso
# Copyright (c) <NAME>
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import espresso.criterions # noqa
import espresso.models # noqa
import espresso.modules # noqa
import espresso.optim # noqa
import espresso.optim.lr_scheduler # noqa
import espresso.tasks # noqa
| 115 |
516 | # Copyright (c) 2018 Anki, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License in the file LICENSE.txt or at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Colors to be used with a light or Vector's screen.
"""
class Color:
"""A Color to be used with a Light or Vector's screen.
Either int_color or rgb may be used to specify the actual color.
Any alpha components (from int_color) are ignored - all colors are fully opaque.
:param int_color: A 32 bit value holding the binary RGBA value (where A
is ignored and forced to be fully opaque).
:param rgb: A tuple holding the integer values from 0-255 for (reg, green, blue)
:param name: A name to assign to this color.
"""
def __init__(self, int_color: int = None, rgb: tuple = None, name: str = None):
self.name = name
self._int_color = 0
if int_color is not None:
self._int_color = int_color | 0xff
elif rgb is not None:
self._int_color = (rgb[0] << 24) | (rgb[1] << 16) | (rgb[2] << 8) | 0xff
@property
def int_color(self) -> int:
"""The encoded integer value of the color."""
return self._int_color
@property
def rgb565_bytepair(self):
"""bytes[]: Two bytes representing an int16 color with rgb565 encoding.
This format reflects the robot's Screen color range, and performing this
conversion will reduce network traffic when sending Screen data.
"""
red5 = ((self._int_color >> 24) & 0xff) >> 3
green6 = ((self._int_color >> 16) & 0xff) >> 2
blue5 = ((self._int_color >> 8) & 0xff) >> 3
green3_hi = green6 >> 3
green3_low = green6 & 0x07
int_565_color_lowbyte = (green3_low << 5) | blue5
int_565_color_highbyte = (red5 << 3) | green3_hi
return [int_565_color_highbyte, int_565_color_lowbyte]
#: :class:`Color`: Green color instance.
green = Color(name="green", int_color=0x00ff00ff)
#: :class:`Color`: Red color instance.
red = Color(name="red", int_color=0xff0000ff)
#: :class:`Color`: Blue color instance.
blue = Color(name="blue", int_color=0x0000ffff)
#: :class:`Color`: Cyan color instance.
cyan = Color(name="cyan", int_color=0x00ffffff)
#: :class:`Color`: Magenta color instance.
magenta = Color(name="magenta", int_color=0xff00ffff)
#: :class:`Color`: Yellow color instance.
yellow = Color(name="yellow", int_color=0xffff00ff)
#: :class:`Color`: White color instance.
white = Color(name="white", int_color=0xffffffff)
#: :class:`Color`: Instance representing no color (i.e., lights off).
off = Color(name="off")
| 1,116 |
2,329 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.shenyu.plugin.cache.handler;
import org.apache.shenyu.common.dto.PluginData;
import org.apache.shenyu.common.dto.RuleData;
import org.apache.shenyu.common.dto.convert.rule.impl.CacheRuleHandle;
import org.apache.shenyu.common.enums.PluginEnum;
import org.apache.shenyu.common.utils.GsonUtils;
import org.apache.shenyu.common.utils.Singleton;
import org.apache.shenyu.plugin.base.cache.CommonHandleCache;
import org.apache.shenyu.plugin.base.handler.PluginDataHandler;
import org.apache.shenyu.plugin.base.utils.BeanHolder;
import org.apache.shenyu.plugin.base.utils.CacheKeyUtils;
import org.apache.shenyu.plugin.cache.ICache;
import org.apache.shenyu.plugin.cache.ICacheBuilder;
import org.apache.shenyu.plugin.cache.config.CacheConfig;
import org.apache.shenyu.plugin.cache.utils.CacheUtils;
import org.apache.shenyu.spi.ExtensionLoader;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Objects;
import java.util.Optional;
import java.util.function.Supplier;
/**
* The type Cache plugin data handler.
*/
public class CachePluginDataHandler implements PluginDataHandler {
public static final Supplier<CommonHandleCache<String, CacheRuleHandle>> CACHED_HANDLE = new BeanHolder<>(CommonHandleCache::new);
/**
* the log.
*/
private static final Logger LOG = LoggerFactory.getLogger(CachePluginDataHandler.class);
@Override
public void handlerPlugin(final PluginData pluginData) {
if (Objects.isNull(pluginData) || Boolean.FALSE.equals(pluginData.getEnabled())) {
LOG.info("the plugin {} is disabled", this.pluginNamed());
this.closeCacheIfNeed();
return;
}
final String config = pluginData.getConfig();
CacheConfig cacheConfig = GsonUtils.getInstance().fromJson(config, CacheConfig.class);
if (Objects.isNull(cacheConfig)) {
LOG.info("invalid cacheConfig.");
return;
}
LOG.info("use the {} cache.", cacheConfig.getCacheType());
// set the config to compare with lastConfig.
cacheConfig.setConfig(config);
final CacheConfig lastCacheConfig = Singleton.INST.get(CacheConfig.class);
if (cacheConfig.equals(lastCacheConfig)) {
LOG.info("cache plugin initialized.");
return;
}
Singleton.INST.single(CacheConfig.class, cacheConfig);
this.closeCacheIfNeed();
final ICacheBuilder cacheBuilder = ExtensionLoader.getExtensionLoader(ICacheBuilder.class).getJoin(cacheConfig.getCacheType());
Singleton.INST.single(ICache.class, cacheBuilder.builderCache(config));
}
@Override
public void handlerRule(final RuleData ruleData) {
Optional.ofNullable(ruleData.getHandle()).ifPresent(json -> {
final CacheRuleHandle cacheRuleHandle = GsonUtils.getInstance().fromJson(json, CacheRuleHandle.class);
CACHED_HANDLE.get().cachedHandle(CacheKeyUtils.INST.getKey(ruleData), cacheRuleHandle);
});
}
@Override
public void removeRule(final RuleData ruleData) {
Optional.ofNullable(ruleData.getHandle()).ifPresent(json -> CACHED_HANDLE.get().removeHandle(CacheKeyUtils.INST.getKey(ruleData)));
}
@Override
public String pluginNamed() {
return PluginEnum.CACHE.getName();
}
/**
* close the cache if you need.
*/
private void closeCacheIfNeed() {
ICache lastCache = CacheUtils.getCache();
if (Objects.nonNull(lastCache)) {
// close last cache.
LOG.info("close the last cache {}", lastCache);
lastCache.close();
}
}
}
| 1,588 |
456 | <reponame>767214481/Summer
package com.swingfrog.summer.test.ecsgameserver.infrastructure;
public interface SessionHandlerPriority {
int LOGIN = 1000;
int PLAYER = 900;
}
| 63 |
4,054 | <reponame>Thendont/lwjgl<filename>modules/lwjgl/opencl/src/generated/java/org/lwjgl/opencl/KHRMipmapImage.java
/*
* Copyright LWJGL. All rights reserved.
* License terms: https://www.lwjgl.org/license
* MACHINE GENERATED FILE, DO NOT EDIT
*/
package org.lwjgl.opencl;
/**
* Native bindings to the <strong>khr_mipmap_image</strong> extension.
*
* <p>This extension adds support to create a mip-mapped image, enqueue commands to read/write/copy/map a region of a mipmapped image and built-in functions
* that can be used to read a mip-mapped image in an OpenCL C program.</p>
*/
public final class KHRMipmapImage {
/** cl_sampler_info */
public static final int
CL_SAMPLER_MIP_FILTER_MODE_KHR = 0x1155,
CL_SAMPLER_LOD_MIN_KHR = 0x1156,
CL_SAMPLER_LOD_MAX_KHR = 0x1157;
private KHRMipmapImage() {}
} | 350 |
2,542 | <gh_stars>1000+
// ------------------------------------------------------------
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License (MIT). See License.txt in the repo root for license information.
// ------------------------------------------------------------
#pragma once
namespace Naming
{
namespace UserServiceState
{
enum Enum
{
Invalid = 0,
None = 1,
Creating = 2,
Deleting = 3,
Created = 4,
Updating = 5,
ForceDeleting = 6,
};
bool IsDeleting(Enum const & value);
void WriteToTextWriter(Common::TextWriter & w, Enum const & e);
}
}
| 282 |
401 | <reponame>mariuslindegaard/6.867_MARL_project
from matplotlib.pyplot import xcorr
import torch as th
from torch.distributions import Categorical
from torch.distributions.one_hot_categorical import OneHotCategorical
from .epsilon_schedules import DecayThenFlatSchedule
class GumbelSoftmax(OneHotCategorical):
def __init__(self, logits, probs=None, temperature=1):
super(GumbelSoftmax, self).__init__(logits=logits, probs=probs)
self.eps = 1e-20
self.temperature = temperature
def sample_gumbel(self):
U = self.logits.clone()
U.uniform_(0, 1)
return -th.log( -th.log( U + self.eps))
def gumbel_softmax_sample(self):
y = self.logits + self.sample_gumbel()
return th.softmax( y / self.temperature, dim=-1)
def hard_gumbel_softmax_sample(self):
y = self.gumbel_softmax_sample()
return (th.max(y, dim=-1, keepdim=True)[0] == y).float()
def rsample(self):
return self.gumbel_softmax_sample()
def sample(self):
return self.rsample().detach()
def hard_sample(self):
return self.hard_gumbel_softmax_sample()
def multinomial_entropy(logits):
assert logits.size(-1) > 1
return GumbelSoftmax(logits=logits).entropy()
REGISTRY = {}
class GumbelSoftmaxMultinomialActionSelector():
def __init__(self, args):
self.args = args
self.schedule = DecayThenFlatSchedule(args.epsilon_start, args.epsilon_finish, args.epsilon_anneal_time,
decay="linear")
self.epsilon = self.schedule.eval(0)
self.test_greedy = getattr(args, "test_greedy", True)
self.save_probs = getattr(self.args, 'save_probs', False)
def select_action(self, agent_logits, avail_actions, t_env, test_mode=False):
masked_policies = agent_logits.clone()
self.epsilon = self.schedule.eval(t_env)
if test_mode and self.test_greedy:
picked_actions = masked_policies.max(dim=2)[1]
else:
picked_actions = GumbelSoftmax(logits=masked_policies).sample()
picked_actions = th.argmax(picked_actions, dim=-1).long()
if self.save_probs:
return picked_actions, masked_policies
else:
return picked_actions
REGISTRY["gumbel"] = GumbelSoftmaxMultinomialActionSelector
class MultinomialActionSelector():
def __init__(self, args):
self.args = args
self.schedule = DecayThenFlatSchedule(args.epsilon_start, args.epsilon_finish, args.epsilon_anneal_time,
decay="linear")
self.epsilon = self.schedule.eval(0)
self.test_greedy = getattr(args, "test_greedy", True)
self.save_probs = getattr(self.args, 'save_probs', False)
def select_action(self, agent_inputs, avail_actions, t_env, test_mode=False):
masked_policies = agent_inputs.clone()
masked_policies[avail_actions == 0] = 0
masked_policies = masked_policies / (masked_policies.sum(-1, keepdim=True) + 1e-8)
if test_mode and self.test_greedy:
picked_actions = masked_policies.max(dim=2)[1]
else:
self.epsilon = self.schedule.eval(t_env)
epsilon_action_num = (avail_actions.sum(-1, keepdim=True) + 1e-8)
masked_policies = ((1 - self.epsilon) * masked_policies
+ avail_actions * self.epsilon/epsilon_action_num)
masked_policies[avail_actions == 0] = 0
picked_actions = Categorical(masked_policies).sample().long()
if self.save_probs:
return picked_actions, masked_policies
else:
return picked_actions
REGISTRY["multinomial"] = MultinomialActionSelector
def categorical_entropy(probs):
assert probs.size(-1) > 1
return Categorical(probs=probs).entropy()
class EpsilonGreedyActionSelector():
def __init__(self, args):
self.args = args
self.schedule = DecayThenFlatSchedule(args.epsilon_start, args.epsilon_finish, args.epsilon_anneal_time,
decay="linear")
self.epsilon = self.schedule.eval(0)
def select_action(self, agent_inputs, avail_actions, t_env, test_mode=False):
# Assuming agent_inputs is a batch of Q-Values for each agent bav
self.epsilon = self.schedule.eval(t_env)
if test_mode:
# Greedy action selection only
self.epsilon = getattr(self.args, "test_noise", 0.0)
# mask actions that are excluded from selection
masked_q_values = agent_inputs.clone()
masked_q_values[avail_actions == 0] = -float("inf") # should never be selected!
random_numbers = th.rand_like(agent_inputs[:, :, 0])
pick_random = (random_numbers < self.epsilon).long()
random_actions = Categorical(avail_actions.float()).sample().long()
picked_actions = pick_random * random_actions + (1 - pick_random) * masked_q_values.max(dim=2)[1]
return picked_actions
REGISTRY["epsilon_greedy"] = EpsilonGreedyActionSelector
class GaussianActionSelector():
def __init__(self, args):
self.args = args
self.test_greedy = getattr(args, "test_greedy", True)
def select_action(self, mu, sigma, test_mode=False):
# Expects the following input dimensions:
# mu: [b x a x u]
# sigma: [b x a x u x u]
assert mu.dim() == 3, "incorrect input dim: mu"
assert sigma.dim() == 3, "incorrect input dim: sigma"
sigma = sigma.view(-1, self.args.n_agents, self.args.n_actions, self.args.n_actions)
if test_mode and self.test_greedy:
picked_actions = mu
else:
dst = th.distributions.MultivariateNormal(mu.view(-1,
mu.shape[-1]),
sigma.view(-1,
mu.shape[-1],
mu.shape[-1]))
try:
picked_actions = dst.sample().view(*mu.shape)
except Exception as e:
a = 5
pass
return picked_actions
REGISTRY["gaussian"] = GaussianActionSelector | 3,041 |
5,169 | <reponame>Gantios/Specs
{
"name": "SwiftLocation",
"version": "5.0.1",
"summary": "Location Manager Made Easy",
"description": "Efficient location tracking for iOS with support for oneshot/continuous/background tracking, reverse geocoding, autocomplete, geofencing, beacon monitoring & broadcasting",
"homepage": "https://github.com/malcommac/SwiftLocation.git",
"license": {
"type": "MIT",
"file": "LICENSE"
},
"authors": {
"<NAME>": "<EMAIL>"
},
"social_media_url": "https://twitter.com/danielemargutti",
"platforms": {
"ios": "11.0",
"osx": "11.0"
},
"source": {
"git": "https://github.com/malcommac/SwiftLocation.git",
"tag": "5.0.1"
},
"source_files": "Sources/**/*.swift",
"frameworks": [
"Foundation",
"CoreLocation",
"MapKit"
],
"swift_versions": [
"5.0",
"5.1",
"5.3"
],
"swift_version": "5.3"
}
| 372 |
372 | <reponame>arithmetic1728/google-api-java-client-services
/*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
/*
* This code was generated by https://github.com/googleapis/google-api-java-client-services/
* Modify at your own risk.
*/
package com.google.api.services.monitoring.v3.model;
/**
* A condition type that allows alert policies to be defined using Monitoring Query Language
* (https://cloud.google.com/monitoring/mql).
*
* <p> This is the Java data model class that specifies how to parse/serialize into the JSON that is
* transmitted over HTTP when working with the Cloud Monitoring API. For a detailed explanation see:
* <a href="https://developers.google.com/api-client-library/java/google-http-java-client/json">https://developers.google.com/api-client-library/java/google-http-java-client/json</a>
* </p>
*
* @author Google, Inc.
*/
@SuppressWarnings("javadoc")
public final class MonitoringQueryLanguageCondition extends com.google.api.client.json.GenericJson {
/**
* The amount of time that a time series must violate the threshold to be considered failing.
* Currently, only values that are a multiple of a minute--e.g., 0, 60, 120, or 300 seconds--are
* supported. If an invalid value is given, an error will be returned. When choosing a duration,
* it is useful to keep in mind the frequency of the underlying time series data (which may also
* be affected by any alignments specified in the aggregations field); a good duration is long
* enough so that a single outlier does not generate spurious alerts, but short enough that
* unhealthy states are detected and alerted on quickly.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private String duration;
/**
* Monitoring Query Language (https://cloud.google.com/monitoring/mql) query that outputs a
* boolean stream.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.lang.String query;
/**
* The number/percent of time series for which the comparison must hold in order for the condition
* to trigger. If unspecified, then the condition will trigger if the comparison is true for any
* of the time series that have been identified by filter and aggregations, or by the ratio, if
* denominator_filter and denominator_aggregations are specified.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private Trigger trigger;
/**
* The amount of time that a time series must violate the threshold to be considered failing.
* Currently, only values that are a multiple of a minute--e.g., 0, 60, 120, or 300 seconds--are
* supported. If an invalid value is given, an error will be returned. When choosing a duration,
* it is useful to keep in mind the frequency of the underlying time series data (which may also
* be affected by any alignments specified in the aggregations field); a good duration is long
* enough so that a single outlier does not generate spurious alerts, but short enough that
* unhealthy states are detected and alerted on quickly.
* @return value or {@code null} for none
*/
public String getDuration() {
return duration;
}
/**
* The amount of time that a time series must violate the threshold to be considered failing.
* Currently, only values that are a multiple of a minute--e.g., 0, 60, 120, or 300 seconds--are
* supported. If an invalid value is given, an error will be returned. When choosing a duration,
* it is useful to keep in mind the frequency of the underlying time series data (which may also
* be affected by any alignments specified in the aggregations field); a good duration is long
* enough so that a single outlier does not generate spurious alerts, but short enough that
* unhealthy states are detected and alerted on quickly.
* @param duration duration or {@code null} for none
*/
public MonitoringQueryLanguageCondition setDuration(String duration) {
this.duration = duration;
return this;
}
/**
* Monitoring Query Language (https://cloud.google.com/monitoring/mql) query that outputs a
* boolean stream.
* @return value or {@code null} for none
*/
public java.lang.String getQuery() {
return query;
}
/**
* Monitoring Query Language (https://cloud.google.com/monitoring/mql) query that outputs a
* boolean stream.
* @param query query or {@code null} for none
*/
public MonitoringQueryLanguageCondition setQuery(java.lang.String query) {
this.query = query;
return this;
}
/**
* The number/percent of time series for which the comparison must hold in order for the condition
* to trigger. If unspecified, then the condition will trigger if the comparison is true for any
* of the time series that have been identified by filter and aggregations, or by the ratio, if
* denominator_filter and denominator_aggregations are specified.
* @return value or {@code null} for none
*/
public Trigger getTrigger() {
return trigger;
}
/**
* The number/percent of time series for which the comparison must hold in order for the condition
* to trigger. If unspecified, then the condition will trigger if the comparison is true for any
* of the time series that have been identified by filter and aggregations, or by the ratio, if
* denominator_filter and denominator_aggregations are specified.
* @param trigger trigger or {@code null} for none
*/
public MonitoringQueryLanguageCondition setTrigger(Trigger trigger) {
this.trigger = trigger;
return this;
}
@Override
public MonitoringQueryLanguageCondition set(String fieldName, Object value) {
return (MonitoringQueryLanguageCondition) super.set(fieldName, value);
}
@Override
public MonitoringQueryLanguageCondition clone() {
return (MonitoringQueryLanguageCondition) super.clone();
}
}
| 1,738 |
930 | package com.zone.weixin4j.interceptor;
import com.zone.weixin4j.exception.WeixinException;
import com.zone.weixin4j.handler.WeixinMessageHandler;
import com.zone.weixin4j.request.WeixinMessage;
import com.zone.weixin4j.request.WeixinRequest;
import com.zone.weixin4j.response.WeixinResponse;
/**
* 消息拦截适配
*
* @author jinyu(<EMAIL>)
* @className MessageInterceptorAdapter
* @date 2015年5月14日
* @see
* @since JDK 1.6
*/
public abstract class MessageInterceptorAdapter implements
WeixinMessageInterceptor {
@Override
public boolean preHandle(
WeixinRequest request, WeixinMessage message, WeixinMessageHandler handler)
throws WeixinException {
return true;
}
@Override
public void postHandle(
WeixinRequest request, WeixinResponse response, WeixinMessage message,
WeixinMessageHandler handler) throws WeixinException {
}
@Override
public void afterCompletion(
WeixinRequest request, WeixinResponse response, WeixinMessage message,
WeixinMessageHandler handler, Exception exception)
throws WeixinException {
}
@Override
public int weight() {
return 0;
}
}
| 488 |
1,010 | <filename>src/lib/shim/shim_api_addrinfo.c
/*
* The Shadow Simulator
* See LICENSE for licensing information
*/
#include <assert.h>
#include <dlfcn.h>
#include <errno.h>
#include <fcntl.h>
#include <glib.h>
#include <netdb.h>
#include <stdarg.h>
#include <stdlib.h>
#include <string.h>
#include <strings.h>
#include <sys/socket.h>
#include <sys/syscall.h>
#include <sys/types.h>
#include <unistd.h>
#include "lib/logger/logger.h"
#include "lib/shim/shim.h"
#include "lib/shim/shim_syscall.h"
#include "main/host/syscall_numbers.h" // For SYS_shadow_hostname_to_addr_ipv4
// Sets `port` to the port specified by `service`, according to the criteria in
// getaddrinfo(3). Returns 0 on success or the appropriate getaddrinfo error on
// failure.
static int _getaddrinfo_service(in_port_t* port, const char* service,
const struct addrinfo* hints) {
char* endptr;
*port = htons(strtol(service, &endptr, 10));
if (*service != '\0' && *endptr == '\0') {
return 0;
}
// getaddrinfo(3): "EAI_NONAME: ... or AI_NUMERICSERV was specified in
// hints.ai_flags and service was not a numeric port-number string."
if (hints->ai_flags & AI_NUMERICSERV) {
return EAI_NONAME;
}
// `buf` will be used for strings pointed to in `result`.
// 1024 is the recommended size in getservbyname_r(3).
char* buf = malloc(1024);
struct servent servent;
struct servent* result;
int rv = getservbyname_r(service, NULL, &servent, buf, 1024, &result);
if (rv != 0) {
// According to getservbyname_r(3): "On error, they return one of the
// positive error numbers listed in errors." The only one documented as
// possibly being returned by getserbyname_r is ERANGE, indicating that
// the buffer was too small. We *could* retry with a bigger buffer, but
// that really shouldn't be needed.
//
// getaddrinfo(3): "EAI_SYSTEM: Other system error, check errno for
// details."
errno = rv;
return EAI_SYSTEM;
}
if (result == NULL) {
// getaddrinfo(3): "The requested service is not available for the
// requested socket type."
return EAI_SERVICE;
}
// While getaddrinfo(3) seems to indicate that we should restrict which
// protocols we return based on the specific service, and fail if the
// service we found was incompatible with the requested socket type or
// protocol, experimentally glibc doesn't do this. e.g., for "80" or "http"
// it will return UDP and RAW in addition to TCP, despite /etc/services
// only containing a TCP entry for that protocol.
*port = result->s_port;
free(buf);
return rv;
}
// Creates an `addrinfo` pointing to `addr`, and adds it to the linked list
// specified by `head` and `tail`. An empty list can be passed in by setting
// `*head` and `*tail` to NULL.
static void _getaddrinfo_append(struct addrinfo** head, struct addrinfo** tail, int socktype,
struct sockaddr* addr, socklen_t addrlen) {
int protocol = 0;
if (socktype == SOCK_DGRAM) {
protocol = IPPROTO_UDP;
}
if (socktype == SOCK_STREAM) {
protocol = IPPROTO_TCP;
}
if (socktype == SOCK_RAW) {
protocol = 0;
}
struct addrinfo* new_tail = malloc(sizeof(*new_tail));
*new_tail = (struct addrinfo){.ai_flags = 0,
.ai_family = AF_INET,
.ai_socktype = socktype,
.ai_protocol = protocol,
.ai_addrlen = addrlen,
.ai_addr = addr,
.ai_canonname = NULL,
.ai_next = NULL};
if (*tail != NULL) {
(*tail)->ai_next = new_tail;
}
*tail = new_tail;
if (*head == NULL) {
*head = new_tail;
}
}
// IPv4 wrapper for _getaddrinfo_append. Appends an entry for the address and
// port for each requested socket type.
static void _getaddrinfo_appendv4(struct addrinfo** head, struct addrinfo** tail, bool add_tcp,
bool add_udp, bool add_raw, uint32_t s_addr, in_port_t port) {
if (add_tcp) {
struct sockaddr_in* sai = malloc(sizeof(*sai));
*sai = (struct sockaddr_in){.sin_family = AF_INET, .sin_port = port, .sin_addr = {s_addr}};
_getaddrinfo_append(head, tail, SOCK_STREAM, (struct sockaddr*)sai, sizeof(*sai));
}
if (add_udp) {
struct sockaddr_in* sai = malloc(sizeof(*sai));
*sai = (struct sockaddr_in){.sin_family = AF_INET, .sin_port = port, .sin_addr = {s_addr}};
_getaddrinfo_append(head, tail, SOCK_DGRAM, (struct sockaddr*)sai, sizeof(*sai));
}
if (add_raw) {
struct sockaddr_in* sai = malloc(sizeof(*sai));
*sai = (struct sockaddr_in){.sin_family = AF_INET, .sin_port = port, .sin_addr = {s_addr}};
_getaddrinfo_append(head, tail, SOCK_RAW, (struct sockaddr*)sai, sizeof(*sai));
}
}
// Looks for matching IPv4 addresses in /etc/hosts and them to the list
// specified by `head` and `tail`.
static void _getaddrinfo_add_matching_hosts_ipv4(struct addrinfo** head, struct addrinfo** tail,
const char* node, bool add_tcp, bool add_udp,
bool add_raw, in_port_t port) {
// TODO: Parse hosts file once and keep it in an efficiently-searchable
// in-memory format.
GError* error = NULL;
gchar* hosts = NULL;
char* pattern = NULL;
GMatchInfo* match_info = NULL;
GRegex* regex = NULL;
trace("Reading /etc/hosts file");
g_file_get_contents("/etc/hosts", &hosts, NULL, &error);
if (error != NULL) {
panic("Reading /etc/hosts: %s", error->message);
goto out;
}
assert(hosts != NULL);
trace("Scanning /etc/hosts contents for name %s", node);
{
gchar* escaped_node = g_regex_escape_string(node, -1);
// Build a regex to match an IPv4 address entry for the given `node` in
// /etc/hosts. See HOSTS(5) for format specification.
int rv = asprintf(&pattern, "^(\\d+\\.\\d+\\.\\d+\\.\\d+)[^#\n]*\\b%s\\b", escaped_node);
g_free(escaped_node);
if (rv < 0) {
panic("asprintf failed: %d", rv);
goto out;
}
}
trace("Node:%s -> regex:%s", node, pattern);
regex = g_regex_new(pattern, G_REGEX_MULTILINE, 0, &error);
if (error != NULL) {
panic("g_regex_new: %s", error->message);
goto out;
}
assert(regex != NULL);
g_regex_match(regex, hosts, 0, &match_info);
// /etc/host.conf specifies whether to return all matching addresses or only
// the first. The recommended configuration is to only return the first. For
// now we hard-code that behavior.
if (g_match_info_matches(match_info)) {
#ifdef DEBUG
{
gchar* matched_string = g_match_info_fetch(match_info, 0);
trace("Node:%s -> match:%s", node, matched_string);
g_free(matched_string);
}
#endif
gchar* address_string = g_match_info_fetch(match_info, 1);
trace("Node:%s -> address string:%s", node, address_string);
assert(address_string != NULL);
uint32_t addr;
int rv = inet_pton(AF_INET, address_string, &addr);
if (rv != 1) {
panic("Bad address in /etc/hosts: %s\n", address_string);
} else {
_getaddrinfo_appendv4(head, tail, add_tcp, add_udp, add_raw, addr, port);
}
g_free(address_string);
}
out:
if (match_info != NULL)
g_match_info_free(match_info);
if (regex != NULL)
g_regex_unref(regex);
if (pattern != NULL)
free(pattern);
if (hosts != NULL)
g_free(hosts);
}
// Ask shadow to provide an ipv4 addr for a node using a custom syscall.
// Returns true if we got a valid address from shadow, false otherwise.
static bool _shim_api_hostname_to_addr_ipv4(const char* node, uint32_t* addr) {
if (!node || !addr) {
return false;
}
// Skip the Shadow syscall for localhost lookups.
if (strcasecmp(node, "localhost") == 0) {
// Loopback address in network order.
*addr = htonl(INADDR_LOOPBACK);
trace("handled localhost getaddrinfo() lookup locally");
return true;
}
// Resolve the hostname (find the ipv4 `addr` associated with hostname `name`) using a custom
// syscall that Shadow handles internally. We want to execute natively in ptrace mode so ptrace
// can intercept it, but we want to send to Shadow through shmem in preload mode. Let
// shim_syscall figure it out.
trace("Performing custom shadow syscall SYS_shadow_hostname_to_addr_ipv4 for name %s", node);
int rv =
shim_syscall(SYS_shadow_hostname_to_addr_ipv4, node, strlen(node), addr, sizeof(*addr));
if (rv == 0) {
#ifdef DEBUG
char addr_str_buf[INET_ADDRSTRLEN] = {0};
if (inet_ntop(AF_INET, (struct in_addr*)addr, addr_str_buf, INET_ADDRSTRLEN)) {
trace("SYS_shadow_hostname_to_addr_ipv4 returned addr %s for name %s", addr_str_buf,
node);
} else {
trace("SYS_shadow_hostname_to_addr_ipv4 succeeded for name %s", node);
}
#endif
return true;
} else {
trace("SYS_shadow_hostname_to_addr_ipv4 failed for name %s", node);
return false;
}
}
int shim_api_getaddrinfo(const char* node, const char* service, const struct addrinfo* hints,
struct addrinfo** res) {
// Quoted text is from the man page.
// "Either node or service, but not both, may be NULL."
// "EAI_NONAME...both node and service are NULL"
if (node == NULL && service == NULL) {
return EAI_NONAME;
}
// "Specifying hints as NULL is equivalent to setting ai_socktype and
// ai_protocol to 0; ai_family to AF_UNSPEC; and ai_flags to (AI_V4MAPPED |
// AI_ADDRCONFIG).
static const struct addrinfo default_hints = {.ai_socktype = 0,
.ai_protocol = 0,
.ai_family = AF_UNSPEC,
.ai_flags = AI_V4MAPPED | AI_ADDRCONFIG};
if (hints == NULL) {
hints = &default_hints;
}
// "`service` sets the port in each returned address structure."
in_port_t port = 0;
if (service != NULL) {
int rv = _getaddrinfo_service(&port, service, hints);
if (rv != 0) {
return rv;
}
}
// "There are several reasons why the linked list may have more than one
// addrinfo structure, including: the network host is ... the same service
// is available from multiple socket types (one SOCK_STREAM address and
// another SOCK_DGRAM address, for example)."
//
// Experimentally, glibc doesn't pay attention to which protocols are
// specified for the given port in /etc/services; it returns all protocols
// that are compatible with `hints`. We do the same for compatibility.
bool add_tcp = (hints->ai_socktype == 0 || hints->ai_socktype == SOCK_STREAM) &&
(hints->ai_protocol == 0 || hints->ai_protocol == IPPROTO_TCP);
bool add_udp = (hints->ai_socktype == 0 || hints->ai_socktype == SOCK_DGRAM) &&
(hints->ai_protocol == 0 || hints->ai_protocol == IPPROTO_UDP);
bool add_raw =
(hints->ai_socktype == 0 || hints->ai_socktype == SOCK_RAW) && (hints->ai_protocol == 0);
// "If hints.ai_flags includes the AI_ADDRCONFIG flag, then IPv4 addresses
// are returned in the list pointed to by res only if the local system
// has at least one IPv4 address configured, and IPv6 addresses are
// returned only if the local system has at least one IPv6 address
// configured."
//
// Determining what kind of addresses the local system has configured is
// unimplemented. For now we assume it has IPv4 and not IPv6.
const bool system_has_an_ipv4_address = true;
const bool system_has_an_ipv6_address = false;
// "There are several reasons why the linked list may have more than one
// addrinfo structure, including: the network host is ... accessible over
// multiple protocols (e.g., both AF_INET and AF_INET6)"
//
// Here we constrain which protocols to consider, so that we can not bother
// doing lookups for other protocols.
const bool add_ipv4 = hints->ai_family == AF_UNSPEC ||
(hints->ai_family == AF_INET &&
!((hints->ai_flags & AI_ADDRCONFIG) && !system_has_an_ipv4_address));
const bool add_ipv6 = hints->ai_family == AF_UNSPEC ||
(hints->ai_family == AF_INET6 &&
!((hints->ai_flags & AI_ADDRCONFIG) && !system_has_an_ipv6_address));
// "EAI_ADDRFAMILY: The specified network host does not have any network
// addresses in the requested address family."
if (!add_ipv4 && !add_ipv6) {
return EAI_ADDRFAMILY;
}
// *res will be the head of the linked lists of results. For efficiency we
// also keep track of the tail of the list.
*res = NULL;
struct addrinfo* tail = NULL;
// No address lookups needed if `node` is NULL.
if (node == NULL) {
if (hints->ai_flags & AI_PASSIVE) {
// "If the AI_PASSIVE flag is specified in hints.ai_flags, and node
// is NULL, then the returned socket addresses will be suitable for
// bind(2)ing a socket that will accept(2) connections. The
// returned socket address will contain the "wildcard address"
// (INADDR_ANY for IPv4 addresses, IN6ADDR_ANY_INIT for IPv6
// address)."
if (add_ipv4) {
_getaddrinfo_appendv4(
res, &tail, add_tcp, add_udp, add_raw, ntohl(INADDR_ANY), port);
}
if (add_ipv6) {
// TODO: IPv6
}
} else {
// "If the AI_PASSIVE flag is not set in hints.ai_flags, then the
// returned socket addresses will be suitable for use with
// connect(2), sendto(2), or sendmsg(2). If node is NULL, then the
// network address will be set to the loopback interface address
// (INADDR_LOOP‐ BACK for IPv4 addresses, IN6ADDR_LOOPBACK_INIT
// for IPv6 address);"
if (add_ipv4) {
_getaddrinfo_appendv4(
res, &tail, add_tcp, add_udp, add_raw, ntohl(INADDR_LOOPBACK), port);
}
if (add_ipv6) {
// TODO: IPv6
}
}
// We've finished adding all relevant addresses.
return 0;
}
// "`node` specifies either a numerical network address..."
if (add_ipv6) {
// TODO: try parsing as IPv6
}
if (add_ipv4) {
uint32_t addr;
if (inet_pton(AF_INET, node, &addr) == 1) {
_getaddrinfo_appendv4(res, &tail, add_tcp, add_udp, add_raw, addr, port);
}
}
// If we successfully parsed as a numeric address, there's no need to
// continue on to doing name-based lookups.
if (*res != NULL) {
return 0;
}
// "If hints.ai_flags contains the AI_NUMERICHOST flag, then node
// must be a numerical network address."
if (hints->ai_flags & AI_NUMERICHOST) {
// "The node or service is not known; or both node and service are NULL;
// or AI_NUMERICSERV was specified in hints.ai_flags and service was not
// a numeric port-number string."
//
// The man page isn't 100% explicit about which error to return in this
// case, but EAI_NONAME is plausible based on the above, and it's what
// glibc returns.
return EAI_NONAME;
}
// "node specifies either a numerical network address...or a network
// hostname, whose network addresses are looked up and resolved."
//
// On to name lookups. The `hosts` line in /etc/nsswitch.conf specifies the
// order in which to try lookups. We just hard-code trying `files` first
// (and for now, only). For hosts lookups, the corresponding file is
// /etc/hosts. See NSSWITCH.CONF(5).
if (add_ipv6) {
// TODO: look for IPv6 addresses in /etc/hosts.
}
if (add_ipv4) {
// Try first to avoid scanning the /etc/hosts file.
uint32_t addr;
if (_shim_api_hostname_to_addr_ipv4(node, &addr)) {
// We got the address we needed.
_getaddrinfo_appendv4(res, &tail, add_tcp, add_udp, add_raw, addr, port);
} else {
// Fall back to scanning /etc/hosts.
warning("shadow_hostname_to_addr_ipv4 syscall failed for name %s, falling back to less "
"efficient scan of '/etc/hosts' file.",
node);
_getaddrinfo_add_matching_hosts_ipv4(res, &tail, node, add_tcp, add_udp, add_raw, port);
}
}
// TODO: maybe do DNS lookup, if we end up supporting that in Shadow.
if (*res == NULL) {
// "EAI_NONAME: The node or service is not known"
return EAI_NONAME;
}
return 0;
}
void shim_api_freeaddrinfo(struct addrinfo* res) {
while (res != NULL) {
struct addrinfo* next = res->ai_next;
assert(res->ai_addr != NULL);
free(res->ai_addr);
// We don't support canonname lookups, so shouldn't have been set.
assert(res->ai_canonname == NULL);
free(res);
res = next;
}
}
| 7,808 |
308 | package com.wanbo.werb.ui.view;
import android.support.v7.widget.LinearLayoutManager;
import android.support.v7.widget.RecyclerView;
/**
* Created by Werb on 2016/8/2.
* Werb is Wanbo.
* Contact Me : <EMAIL>
*/
public interface ITabView {
String getWeiBoId();
RecyclerView getRecyclerView();
LinearLayoutManager getLayoutManager();
}
| 127 |
719 | <gh_stars>100-1000
package com.googlecode.objectify;
import com.googlecode.objectify.util.Closeable;
/**
* A simple thread local namespace manager, similar to legacy GAE's {@code NamespaceManager}.
*/
public class NamespaceManager {
/** */
private static final ThreadLocal<String> NAMESPACE = new ThreadLocal<>();
/**
* <p>Sets the default namespace for this thread. Similar to legacy GAE's {@code NamespaceManager}. While a namespace
* is set, all keys which are not created with an explicit namespace and all queries without an explicit
* namespace will inherit this value.</p>
*
* <p>To exit the namespace, call {@code close()} on the return value. This should be performed in a finally block,
* or even better with the a try-with-resources idiom:</p>
* {@code try (Closeable ignored = NamespaceManager.set("blah")) { ... }}
*
* <p>Note that this namespace affects key creation, but once a key has been created, it has an inherent namespace.</p>
*
* <pre>
* final Key<Foo> key = Key.create(Foo.class, 123);
*
* try (final Closeable ignored = NamespaceManager.set("blah")) {
* ofy().load().key(key); // The key already has the default namespace
* ofy().load().type(Foo.class).id(123); // Uses the 'blah' namespace
* ofy().load().key(Key.create(Foo.class, 123)); // Uses the 'blah' namespace
* }
* </pre>
*
* <p>You can call {@code set(null)} to clear the namespace; this is identical to calling {@code close()} on the return value.</p>
*/
public static Closeable set(final String namespaceName) {
final String previous = NAMESPACE.get();
NAMESPACE.set(namespaceName);
return () -> NAMESPACE.set(previous);
}
/**
* @return the currently set default namespace, or null if one is not set
*/
public static String get() {
return NAMESPACE.get();
}
}
| 610 |
777 | <gh_stars>100-1000
// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef WebSelection_h
#define WebSelection_h
#include "../platform/WebCommon.h"
#include "../platform/WebSelectionBound.h"
namespace blink {
struct CompositedSelection;
// The active selection region, containing compositing data for the selection
// end points as well as metadata for the selection region.
class BLINK_EXPORT WebSelection {
public:
enum SelectionType { NoSelection, CaretSelection, RangeSelection };
#if INSIDE_BLINK
explicit WebSelection(const CompositedSelection&);
#endif
WebSelection(const WebSelection&);
const WebSelectionBound& start() const { return m_start; }
const WebSelectionBound& end() const { return m_end; }
bool isNone() const { return selectionType() == NoSelection; }
bool isCaret() const { return selectionType() == CaretSelection; }
bool isRange() const { return selectionType() == RangeSelection; }
bool isEditable() const { return m_isEditable; }
bool isEmptyTextFormControl() const { return m_isEmptyTextControl; }
private:
SelectionType selectionType() const { return m_selectionType; }
SelectionType m_selectionType;
WebSelectionBound m_start;
WebSelectionBound m_end;
// Whether the selection region consists of editable text.
bool m_isEditable;
// Whether the selection resides in an empty text form control. Note that
// this only applies to caret-type selections.
bool m_isEmptyTextControl;
};
} // namespace blink
#endif // WebSelection_h
| 467 |
435 | <reponame>amaajemyfren/data<filename>pydata-london-2015/videos/helena-bengtsson-keynote-how-to-find-stories-in-data.json
{
"description": "This talk is about how data is used to enrich stories at the Guardian.",
"duration": 2486,
"language": "eng",
"recorded": "2015-06-20",
"speakers": [
"<NAME>"
],
"tags": [
"keynote"
],
"thumbnail_url": "https://i.ytimg.com/vi/UqqfADo1LJg/hqdefault.jpg",
"title": "Keynote - How to Find Stories in Data",
"videos": [
{
"type": "youtube",
"url": "https://www.youtube.com/watch?v=UqqfADo1LJg"
}
]
}
| 256 |
1,186 | # -*- coding: utf-8 -*-
from base import *
import requests
import socket
ASSERT_RESPONSE = b"Hello world!"
RESPONSE = [b"Hello ", b"world!"]
DEFAULT_HOST = "localhost"
DEFAULT_PORT = 8000
DEFAULT_METHOD = "GET"
DEFAULT_PATH = "/PATH?ket=value"
DEFAULT_VERSION = "HTTP/1.0"
DEFAULT_ADDR = (DEFAULT_HOST, DEFAULT_PORT)
DEFAULT_HEADER = [
("User-Agent", "Mozilla/5.0 (X11; U; Linux i686; ja; rv:1.9.2.7) Gecko/20100715 Ubuntu/10.04 (lucid) Firefox/3.6.7"),
("Accept", "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8"),
("Accept-Language", "ja,en-us;q=0.7,en;q=0.3"),
("Accept-Encoding", "gzip,deflate"),
("Accept-Charset", "Shift_JIS,utf-8;q=0.7,*;q=0.7"),
("Keep-Alive","115"),
("Connection", "keep-alive"),
("Cache-Control", "max-age=0"),
]
ERR_400 = b"HTTP/1.0 400 Bad Request"
def to_bytes(s):
if isinstance(s, bytes):
return s
else:
return s.encode('iso-8859-1')
def send_data(addr=DEFAULT_ADDR, method=DEFAULT_METHOD, path=DEFAULT_PATH,
version=DEFAULT_VERSION, headers=DEFAULT_HEADER, post_data=None):
try:
sock = socket.create_connection(addr)
sock.send(to_bytes("%s %s %s\r\n" % (method, path, version)))
sock.send(to_bytes("Host: %s\r\n" % addr[0]))
for h in headers:
sock.send(to_bytes("%s: %s\r\n" % h))
sock.send(b"\r\n")
if post_data:
sock.send(to_bytes(post_data))
sock.send(b"\r\n")
data = sock.recv(1024 * 2)
return data
except:
import traceback
print(traceback.format_exc())
raise
class App(BaseApp):
environ = None
def __call__(self, environ, start_response):
status = '200 OK'
response_headers = [('Content-type','text/plain')]
start_response(status, response_headers)
self.environ = environ.copy()
print(environ)
return RESPONSE
def test_long_url1():
def client():
query = "A" * 8191
return requests.get("http://localhost:8000/" + query)
env, res = run_client(client, App)
assert(res.status_code == 200)
def test_long_url2():
def client():
query = "A" * 8192
return requests.get("http://localhost:8000/" + query)
env, res = run_client(client, App)
assert(res.status_code == 400)
def test_bad_method1():
def client():
return send_data(method=b"")
env, res = run_client(client, App)
assert(res.split(b"\r\n")[0] == ERR_400)
def test_bad_method2():
def client():
return send_data(method=b"GET" * 100)
env, res = run_client(client, App)
assert(res.split(b"\r\n")[0] == ERR_400)
def test_bad_path():
def client():
return send_data(path=b"..")
env, res = run_client(client, App)
assert(res.split(b"\r\n")[0] == ERR_400)
| 1,406 |
3,066 | <filename>server/src/main/java/org/elasticsearch/common/joda/FormatDateTimeFormatter.java
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.joda;
import org.joda.time.format.DateTimeFormatter;
import java.util.Locale;
import java.util.Objects;
/**
* A simple wrapper around {@link DateTimeFormatter} that retains the
* format that was used to create it.
*/
public class FormatDateTimeFormatter {
private final String format;
private final DateTimeFormatter parser;
private final DateTimeFormatter printer;
private final Locale locale;
public FormatDateTimeFormatter(String format, DateTimeFormatter parser, Locale locale) {
this(format, parser, parser, locale);
}
public FormatDateTimeFormatter(String format, DateTimeFormatter parser, DateTimeFormatter printer, Locale locale) {
this.format = format;
this.locale = Objects.requireNonNull(locale, "A locale is required as JODA otherwise uses the default locale");
this.printer = printer.withLocale(locale).withDefaultYear(1970);
this.parser = parser.withLocale(locale).withDefaultYear(1970);
}
public String format() {
return format;
}
public DateTimeFormatter parser() {
return parser;
}
public DateTimeFormatter printer() {
return this.printer;
}
public Locale locale() {
return locale;
}
}
| 653 |
1,001 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkdrds.endpoint import endpoint_data
class CreateDrdsDBRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Drds', '2019-01-23', 'CreateDrdsDB','drds')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_Encode(self):
return self.get_query_params().get('Encode')
def set_Encode(self,Encode):
self.add_query_param('Encode',Encode)
def get_RdsInstances(self):
return self.get_query_params().get('RdsInstance')
def set_RdsInstances(self, RdsInstances):
for depth1 in range(len(RdsInstances)):
if RdsInstances[depth1] is not None:
self.add_query_param('RdsInstance.' + str(depth1 + 1) , RdsInstances[depth1])
def get_Type(self):
return self.get_query_params().get('Type')
def set_Type(self,Type):
self.add_query_param('Type',Type)
def get_Password(self):
return self.get_query_params().get('Password')
def set_Password(self,Password):
self.add_query_param('Password',Password)
def get_RdsSuperAccounts(self):
return self.get_query_params().get('RdsSuperAccount')
def set_RdsSuperAccounts(self, RdsSuperAccounts):
for depth1 in range(len(RdsSuperAccounts)):
if RdsSuperAccounts[depth1].get('Password') is not None:
self.add_query_param('RdsSuperAccount.' + str(depth1 + 1) + '.Password', RdsSuperAccounts[depth1].get('Password'))
if RdsSuperAccounts[depth1].get('AccountName') is not None:
self.add_query_param('RdsSuperAccount.' + str(depth1 + 1) + '.AccountName', RdsSuperAccounts[depth1].get('AccountName'))
if RdsSuperAccounts[depth1].get('DbInstanceId') is not None:
self.add_query_param('RdsSuperAccount.' + str(depth1 + 1) + '.DbInstanceId', RdsSuperAccounts[depth1].get('DbInstanceId'))
def get_AccountName(self):
return self.get_query_params().get('AccountName')
def set_AccountName(self,AccountName):
self.add_query_param('AccountName',AccountName)
def get_DrdsInstanceId(self):
return self.get_query_params().get('DrdsInstanceId')
def set_DrdsInstanceId(self,DrdsInstanceId):
self.add_query_param('DrdsInstanceId',DrdsInstanceId)
def get_DbInstanceIsCreating(self):
return self.get_query_params().get('DbInstanceIsCreating')
def set_DbInstanceIsCreating(self,DbInstanceIsCreating):
self.add_query_param('DbInstanceIsCreating',DbInstanceIsCreating)
def get_InstDbNames(self):
return self.get_query_params().get('InstDbName')
def set_InstDbNames(self, InstDbNames):
for depth1 in range(len(InstDbNames)):
if InstDbNames[depth1].get('ShardDbName') is not None:
for depth2 in range(len(InstDbNames[depth1].get('ShardDbName'))):
if InstDbNames[depth1].get('ShardDbName')[depth2] is not None:
self.add_query_param('InstDbName.' + str(depth1 + 1) + '.ShardDbName.' + str(depth2 + 1) , InstDbNames[depth1].get('ShardDbName')[depth2])
if InstDbNames[depth1].get('DbInstanceId') is not None:
self.add_query_param('InstDbName.' + str(depth1 + 1) + '.DbInstanceId', InstDbNames[depth1].get('DbInstanceId'))
def get_DbName(self):
return self.get_query_params().get('DbName')
def set_DbName(self,DbName):
self.add_query_param('DbName',DbName)
def get_DbInstType(self):
return self.get_query_params().get('DbInstType')
def set_DbInstType(self,DbInstType):
self.add_query_param('DbInstType',DbInstType) | 1,624 |
4,695 | {
"1111001": "The upload file was not found",
"1111002": "Failed to save file %s",
"1111003": "Failed to open file %s",
"1111004": "The contents of the file cannot be empty, %s",
"1111005": "Failed to get file contents %s",
"1111006": "Failed to get host data, error: %s",
"1111007": "Failed to create EXCEL file, error: %",
"1111008": "Failed to get instance data, error: %s",
"1111009": "Failed to get add net device result, error: %s",
"1111010": "Failed to get add net property result, error: %s",
"1111011": "Failed to get net device data, error: %s",
"1111012": "Failed to get net property data, error: %s",
"1111013": "Please fill in user name and password",
"1111014": "User name or password is wrong, please try again",
"1111015": "User name and password can't be found at session.user_info in config file common.conf",
"1111016": "The format of user name and password are wrong, please check session.user_info in config file common.conf",
"1111017": "Unknown login version %s",
"1111018": "Failed to get the EN/CN-username map of instance objuser attribute, error: %s",
"1111019": "No hosts are imported, Failed to validate host %s",
"": ""
}
| 431 |
6,989 | <filename>thrust/detail/complex/ccosh.h
/*
* Copyright 2008-2013 NVIDIA Corporation
* Copyright 2013 <NAME>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*-
* Copyright (c) 2005 <NAME> and <NAME>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice unmodified, this list of conditions, and the following
* disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* adapted from FreeBSD:
* lib/msun/src/s_ccosh.c
*/
#pragma once
#include <thrust/complex.h>
#include <thrust/detail/complex/math_private.h>
namespace thrust{
namespace detail{
namespace complex{
/*
* Hyperbolic cosine of a complex argument z = x + i y.
*
* cosh(z) = cosh(x+iy)
* = cosh(x) cos(y) + i sinh(x) sin(y).
*
* Exceptional values are noted in the comments within the source code.
* These values and the return value were taken from n1124.pdf.
*/
__host__ __device__ inline
thrust::complex<double> ccosh(const thrust::complex<double>& z){
const double huge = 8.98846567431157953864652595395e+307; // 0x1p1023
double x, y, h;
uint32_t hx, hy, ix, iy, lx, ly;
x = z.real();
y = z.imag();
extract_words(hx, lx, x);
extract_words(hy, ly, y);
ix = 0x7fffffff & hx;
iy = 0x7fffffff & hy;
/* Handle the nearly-non-exceptional cases where x and y are finite. */
if (ix < 0x7ff00000 && iy < 0x7ff00000) {
if ((iy | ly) == 0)
return (thrust::complex<double>(::cosh(x), x * y));
if (ix < 0x40360000) /* small x: normal case */
return (thrust::complex<double>(::cosh(x) * ::cos(y), ::sinh(x) * ::sin(y)));
/* |x| >= 22, so cosh(x) ~= exp(|x|) */
if (ix < 0x40862e42) {
/* x < 710: exp(|x|) won't overflow */
h = ::exp(::fabs(x)) * 0.5;
return (thrust::complex<double>(h * cos(y), copysign(h, x) * sin(y)));
} else if (ix < 0x4096bbaa) {
/* x < 1455: scale to avoid overflow */
thrust::complex<double> z_;
z_ = ldexp_cexp(thrust::complex<double>(fabs(x), y), -1);
return (thrust::complex<double>(z_.real(), z_.imag() * copysign(1.0, x)));
} else {
/* x >= 1455: the result always overflows */
h = huge * x;
return (thrust::complex<double>(h * h * cos(y), h * sin(y)));
}
}
/*
* cosh(+-0 +- I Inf) = dNaN + I sign(d(+-0, dNaN))0.
* The sign of 0 in the result is unspecified. Choice = normally
* the same as dNaN. Raise the invalid floating-point exception.
*
* cosh(+-0 +- I NaN) = d(NaN) + I sign(d(+-0, NaN))0.
* The sign of 0 in the result is unspecified. Choice = normally
* the same as d(NaN).
*/
if ((ix | lx) == 0 && iy >= 0x7ff00000)
return (thrust::complex<double>(y - y, copysign(0.0, x * (y - y))));
/*
* cosh(+-Inf +- I 0) = +Inf + I (+-)(+-)0.
*
* cosh(NaN +- I 0) = d(NaN) + I sign(d(NaN, +-0))0.
* The sign of 0 in the result is unspecified.
*/
if ((iy | ly) == 0 && ix >= 0x7ff00000) {
if (((hx & 0xfffff) | lx) == 0)
return (thrust::complex<double>(x * x, copysign(0.0, x) * y));
return (thrust::complex<double>(x * x, copysign(0.0, (x + x) * y)));
}
/*
* cosh(x +- I Inf) = dNaN + I dNaN.
* Raise the invalid floating-point exception for finite nonzero x.
*
* cosh(x + I NaN) = d(NaN) + I d(NaN).
* Optionally raises the invalid floating-point exception for finite
* nonzero x. Choice = don't raise (except for signaling NaNs).
*/
if (ix < 0x7ff00000 && iy >= 0x7ff00000)
return (thrust::complex<double>(y - y, x * (y - y)));
/*
* cosh(+-Inf + I NaN) = +Inf + I d(NaN).
*
* cosh(+-Inf +- I Inf) = +Inf + I dNaN.
* The sign of Inf in the result is unspecified. Choice = always +.
* Raise the invalid floating-point exception.
*
* cosh(+-Inf + I y) = +Inf cos(y) +- I Inf sin(y)
*/
if (ix >= 0x7ff00000 && ((hx & 0xfffff) | lx) == 0) {
if (iy >= 0x7ff00000)
return (thrust::complex<double>(x * x, x * (y - y)));
return (thrust::complex<double>((x * x) * cos(y), x * sin(y)));
}
/*
* cosh(NaN + I NaN) = d(NaN) + I d(NaN).
*
* cosh(NaN +- I Inf) = d(NaN) + I d(NaN).
* Optionally raises the invalid floating-point exception.
* Choice = raise.
*
* cosh(NaN + I y) = d(NaN) + I d(NaN).
* Optionally raises the invalid floating-point exception for finite
* nonzero y. Choice = don't raise (except for signaling NaNs).
*/
return (thrust::complex<double>((x * x) * (y - y), (x + x) * (y - y)));
}
__host__ __device__ inline
thrust::complex<double> ccos(const thrust::complex<double>& z){
/* ccos(z) = ccosh(I * z) */
return (ccosh(thrust::complex<double>(-z.imag(), z.real())));
}
} // namespace complex
} // namespace detail
template <typename ValueType>
__host__ __device__
inline complex<ValueType> cos(const complex<ValueType>& z){
const ValueType re = z.real();
const ValueType im = z.imag();
return complex<ValueType>(std::cos(re) * std::cosh(im),
-std::sin(re) * std::sinh(im));
}
template <typename ValueType>
__host__ __device__
inline complex<ValueType> cosh(const complex<ValueType>& z){
const ValueType re = z.real();
const ValueType im = z.imag();
return complex<ValueType>(std::cosh(re) * std::cos(im),
std::sinh(re) * std::sin(im));
}
template <>
__host__ __device__
inline thrust::complex<double> cos(const thrust::complex<double>& z){
return detail::complex::ccos(z);
}
template <>
__host__ __device__
inline thrust::complex<double> cosh(const thrust::complex<double>& z){
return detail::complex::ccosh(z);
}
} // namespace thrust
| 2,776 |
307 | <gh_stars>100-1000
/**
* Copyright 2010 <NAME>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
*
*/
package com.jolbox.bonecp;
import java.sql.Connection;
import java.sql.Driver;
import java.sql.DriverManager;
import java.sql.DriverPropertyInfo;
import java.sql.SQLException;
import java.util.Properties;
// #ifdef JDK7
import java.sql.SQLFeatureNotSupportedException;
import java.util.logging.Logger;
/** A Fake jdbc driver for mocking purposes.
* @author Wallace
*
*/
public class MockJDBCDriver implements Driver {
/** Connection handle to return. */
private volatile Connection connection = null;
/** called to return. */
private volatile MockJDBCAnswer mockJDBCAnswer;
/** on/off setting. */
private volatile static boolean active;
/**
* Default constructor
* @throws SQLException
*/
public MockJDBCDriver() throws SQLException{
// default constructor
DriverManager.registerDriver(this);
active = true;
}
/** Stop intercepting requests.
* @throws SQLException
*/
public void unregister() throws SQLException{
this.connection = null;
this.mockJDBCAnswer = null;
active = false;
DriverManager.deregisterDriver(this);
}
/** Connection handle to return
* @param mockJDBCAnswer answer class
* @throws SQLException
*/
public MockJDBCDriver(MockJDBCAnswer mockJDBCAnswer) throws SQLException{
this();
this.mockJDBCAnswer = mockJDBCAnswer;
}
/** Return the connection when requested.
* @param connection
* @throws SQLException
*/
public MockJDBCDriver(Connection connection) throws SQLException{
this();
this.connection = connection;
}
/** {@inheritDoc}
* @see java.sql.Driver#acceptsURL(java.lang.String)
*/
// @Override
public synchronized boolean acceptsURL(String url) throws SQLException {
return active && url.startsWith("jdbc:mock"); // accept anything
}
/** {@inheritDoc}
* @see java.sql.Driver#connect(java.lang.String, java.util.Properties)
*/
// @Override
public synchronized Connection connect(String url, Properties info) throws SQLException {
if (url.startsWith("invalid") || url.equals("")){
throw new SQLException("Mock Driver rejecting invalid URL");
}
if (this.connection != null){
return this.connection;
}
if (this.mockJDBCAnswer == null){
return new MockConnection();
}
return this.mockJDBCAnswer.answer();
}
/** {@inheritDoc}
* @see java.sql.Driver#getMajorVersion()
*/
// @Override
public int getMajorVersion() {
return 1;
}
/** {@inheritDoc}
* @see java.sql.Driver#getMinorVersion()
*/
// @Override
public int getMinorVersion() {
return 0;
}
/** {@inheritDoc}
* @see java.sql.Driver#getPropertyInfo(java.lang.String, java.util.Properties)
*/
// @Override
public synchronized DriverPropertyInfo[] getPropertyInfo(String url, Properties info)
throws SQLException {
return new DriverPropertyInfo[0];
}
/** {@inheritDoc}
* @see java.sql.Driver#jdbcCompliant()
*/
// @Override
public boolean jdbcCompliant() {
return true;
}
// #ifdef JDK7
// @Override
public Logger getParentLogger() throws SQLFeatureNotSupportedException {
return null;
}
// #endif JDK7
/**
* Disable everything.
* @throws SQLException
* @throws SQLException
*/
public synchronized void disable() throws SQLException{
this.connection = null;
this.mockJDBCAnswer = null;
DriverManager.deregisterDriver(this);
}
/**
* @return the connection
* @throws SQLException
*/
public synchronized Connection getConnection() throws SQLException {
if (this.mockJDBCAnswer == null){
return new MockConnection();
}
return this.mockJDBCAnswer.answer();
}
/**
* @param connection the connection to set
*/
public synchronized void setConnection(Connection connection) {
this.connection = connection;
}
/** Return the jdbc answer class
* @return the mockJDBCAnswer
*/
public synchronized MockJDBCAnswer getMockJDBCAnswer() {
return this.mockJDBCAnswer;
}
/** Sets the jdbc mock answer.
* @param mockJDBCAnswer the mockJDBCAnswer to set
*/
public synchronized void setMockJDBCAnswer(MockJDBCAnswer mockJDBCAnswer) {
this.mockJDBCAnswer = mockJDBCAnswer;
}
} | 1,778 |
1,442 | <gh_stars>1000+
#include <escher/message_text_view.h>
#include <assert.h>
namespace Escher {
MessageTextView::MessageTextView(const KDFont * font, I18n::Message message, float horizontalAlignment, float verticalAlignment,
KDColor textColor, KDColor backgroundColor) :
TextView(font, horizontalAlignment, verticalAlignment, textColor, backgroundColor),
m_message(message)
{
}
const char * MessageTextView::text() const {
return I18n::translate(m_message);
}
void MessageTextView::setText(const char * text) {
assert(false);
}
void MessageTextView::setMessage(I18n::Message message) {
if (message != m_message) {
m_message = message;
markRectAsDirty(bounds());
}
}
KDSize MessageTextView::minimalSizeForOptimalDisplay() const {
return m_font->stringSize(text());
}
}
| 267 |
3,102 | <gh_stars>1000+
// RUN: %clang -fsyntax-only %s
// rdar://6757323
// foo \
#define blork 32
| 43 |
8,514 | <reponame>Jimexist/thrift
package org.apache.thrift.transport;
import junit.framework.TestCase;
import java.nio.charset.StandardCharsets;
import org.apache.thrift.TException;
import java.nio.ByteBuffer;
public class TestTByteBuffer extends TestCase {
public void testReadWrite() throws Exception {
final TByteBuffer byteBuffer = new TByteBuffer(ByteBuffer.allocate(16));
byteBuffer.write("Hello World".getBytes(StandardCharsets.UTF_8));
assertEquals("Hello World", new String(byteBuffer.flip().toByteArray(), StandardCharsets.UTF_8));
}
public void testReuseReadWrite() throws Exception {
final TByteBuffer byteBuffer = new TByteBuffer(ByteBuffer.allocate(16));
byteBuffer.write("Hello World".getBytes(StandardCharsets.UTF_8));
assertEquals("Hello World", new String(byteBuffer.flip().toByteArray(), StandardCharsets.UTF_8));
byteBuffer.clear();
byteBuffer.write("Goodbye Horses".getBytes(StandardCharsets.UTF_8));
assertEquals("Goodbye Horses", new String(byteBuffer.flip().toByteArray(), StandardCharsets.UTF_8));
}
public void testOverflow() throws Exception {
final TByteBuffer byteBuffer = new TByteBuffer(ByteBuffer.allocate(4));
try {
byteBuffer.write("Hello World".getBytes(StandardCharsets.UTF_8));
fail("Expected write operation to fail with TTransportException");
} catch (TTransportException e) {
assertEquals("Not enough room in output buffer", e.getMessage());
}
}
}
| 480 |
385 | import open3d as o3d
import numpy as np
import config
import constants
from config import args
from utils.temporal_optimization import OneEuroFilter
from visualization.create_meshes import create_body_mesh, create_body_model
def convert_trans_scale(trans):
trans *= np.array([0.4, 0.6, 0.7])
return trans
class Open3d_visualizer(object):
def __init__(self, multi_mode=False):
self.window_size = np.array([1280,1080])
#self.window_size = np.array([720,720])
self._init_viewer_()
if not multi_mode:
self.prepare_single_person_scene()
else:
self.prepare_multi_person_scene()
def _init_viewer_(self):
self.viewer = o3d.visualization.Visualizer()
self.viewer.create_window(width=self.window_size[0], height=self.window_size[1], window_name='ROMP - output')
def _set_view_configs_(self, cam_location, focal_length=1000):
view_control = self.viewer.get_view_control()
cam_params = view_control.convert_to_pinhole_camera_parameters()
intrinsics = cam_params.intrinsic.intrinsic_matrix.copy()
focal_length = max(self.window_size)/2. / np.tan(np.radians(args().FOV/2.))
intrinsics[0,0], intrinsics[1,1] = focal_length, focal_length
cam_params.intrinsic.intrinsic_matrix = intrinsics
#print('Open3d_visualizer Camera intrinsic matrix: ', intrinsics)
extrinsics = np.eye(4)
extrinsics[0:3, 3] = cam_location
#print('Open3d_visualizer Camera extrinsic matrix: ', extrinsics)
cam_params.extrinsic = extrinsics
view_control.convert_from_pinhole_camera_parameters(cam_params)
view_control.set_constant_z_far(100)
render_option = self.viewer.get_render_option()
render_option.load_from_json('romp/lib/visualization/vis_cfgs/render_option.json')
self.viewer.update_renderer()
def update_viewer(self):
self.viewer.poll_events()
self.viewer.update_renderer()
def prepare_single_person_scene(self):
self.mesh = create_body_mesh()
self.viewer.add_geometry(self.mesh)
cam_location = np.array([0,0,0])
if not args().add_trans:
cam_location[2] = 2.6/np.tan(np.radians(args().FOV/2.))
self._set_view_configs_(cam_location=cam_location)
self.smoother = self.create_filter()
self.update_viewer()
def create_filter(self):
return {'verts':OneEuroFilter(4.,0), 'trans':OneEuroFilter(3.,0.)}
def process_single_mesh(self, verts, trans, smoother, mesh_ob):
verts = smoother['verts'].process(verts)
#verts = np.matmul(self.view_mat, verts.T).T
if trans is not None:
trans_converted = convert_trans_scale(trans)
verts += smoother['trans'].process(trans_converted)[None]
mesh_ob.vertices = o3d.utility.Vector3dVector(verts)
mesh_ob.compute_triangle_normals()
mesh_ob.compute_vertex_normals()
self.viewer.update_geometry(mesh_ob)
def run(self, verts, trans=None):
self.process_single_mesh(verts, trans, self.smoother, self.mesh)
self.update_viewer()
def prepare_multi_person_scene(self, start_person_num=6):
self.pid2mid_dict = {}
self.mesh_usage_change_cacher = []
self.mesh_num = start_person_num
self.mesh_ids_available = list(range(start_person_num))
self.meshes = {mid:create_body_mesh() for mid in self.mesh_ids_available}
self.zero_mesh = o3d.utility.Vector3dVector(np.zeros((len(self.meshes[0].vertices),3)))
self.filter_dict = {}
for mid in self.mesh_ids_available:
self.viewer.add_geometry(self.meshes[mid])
self.update_viewer()
for mid in self.mesh_ids_available:
self.reset_mesh(mid)
self.update_viewer()
cam_location = np.array([0,0,0])
self._set_view_configs_(cam_location=cam_location)
def add_mesh(self):
print('Adding new Mesh {}'.format(self.mesh_num))
self.mesh_ids_available.append(self.mesh_num)
new_mesh = create_body_mesh()
self.filter_dict[self.mesh_num] = self.create_filter()
new_mesh.vertices=self.zero_mesh
new_mesh.compute_triangle_normals()
new_mesh.compute_vertex_normals()
self.meshes[self.mesh_num] = new_mesh
self.viewer.add_geometry(self.meshes[self.mesh_num])
#self.update_viewer()
self.mesh_num += 1
def reset_mesh(self, mesh_id):
print('Reseting Mesh {}'.format(mesh_id))
self.meshes[mesh_id].vertices = self.zero_mesh
self.meshes[mesh_id].compute_triangle_normals()
self.meshes[mesh_id].compute_vertex_normals()
self.filter_dict[mesh_id] = self.create_filter()
if mesh_id not in self.mesh_ids_available:
self.mesh_ids_available.append(mesh_id)
self.viewer.update_geometry(self.meshes[mesh_id])
#self.update_viewer()
def run_multiperson(self, verts, trans=None, tracked_ids=None):
#print('recieving {} people'.format(len(verts)))
assert len(verts)==len(trans)==len(tracked_ids), print('length is not equal~')
for vert, tran, pid in zip(verts, trans, tracked_ids):
if pid not in self.pid2mid_dict:
if len(self.mesh_ids_available)==0:
self.add_mesh()
self.pid2mid_dict[pid] = self.mesh_ids_available.pop()
mesh_id = self.pid2mid_dict[pid]
self.process_single_mesh(vert, tran, self.filter_dict[mesh_id], self.meshes[mesh_id])
# reset the disappeared people
for pid in self.mesh_usage_change_cacher:
if pid in self.pid2mid_dict and pid not in tracked_ids:
self.reset_mesh(self.pid2mid_dict[pid])
self.pid2mid_dict.pop(pid, None)
self.mesh_usage_change_cacher = tracked_ids
self.update_viewer() | 2,743 |
1,515 | <reponame>canwdev/PptxGenJS<gh_stars>1000+
{
"rules": {
"array-type": false,
"arrow-return-shorthand": [true, "multiline"],
"no-duplicate-switch-case": true,
"no-duplicate-variable": true,
"no-empty": true,
"no-eval": true,
"no-string-literal": false,
"no-string-throw": true,
"no-use-before-declare": true,
"no-var-keyword": true,
"prefer-template": false,
"switch-default": true,
"triple-equals": {
"options": ["allow-null-check"]
}
}
}
| 234 |
310 | package org.seasar.doma.internal.apt.processor.entity;
import org.seasar.doma.Entity;
import org.seasar.doma.Id;
import org.seasar.doma.Version;
@Entity
public class VersionDuplicatedEntity {
@Id Integer id;
@Version Integer version;
@Version Integer version2;
public Integer getId() {
return id;
}
public void setId(Integer id) {
this.id = id;
}
public Integer getVersion() {
return version;
}
public void setVersion(Integer version) {
this.version = version;
}
public Integer getVersion2() {
return version2;
}
public void setVersion2(Integer version2) {
this.version2 = version2;
}
}
| 224 |
2,777 | <filename>launcher/ui/themes/FusionTheme.h<gh_stars>1000+
#pragma once
#include "ITheme.h"
class FusionTheme: public ITheme
{
public:
virtual ~FusionTheme() {}
QString qtTheme() override;
};
| 79 |
363 | <reponame>LarsP8/service-proxy<filename>core/src/main/java/com/predic8/membrane/core/jmx/JmxExporter.java<gh_stars>100-1000
/*
* Copyright 2016 predic8 GmbH, www.predic8.com
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.predic8.membrane.core.jmx;
import com.predic8.membrane.annot.MCElement;
import org.springframework.beans.BeansException;
import org.springframework.beans.factory.DisposableBean;
import org.springframework.context.ApplicationContext;
import org.springframework.context.ApplicationContextAware;
import org.springframework.context.Lifecycle;
import org.springframework.jmx.export.MBeanExporter;
import org.springframework.jmx.export.annotation.AnnotationJmxAttributeSource;
import org.springframework.jmx.export.assembler.MetadataMBeanInfoAssembler;
import org.springframework.jmx.support.RegistrationPolicy;
import java.util.HashMap;
@MCElement(name=JmxExporter.JMX_EXPORTER_NAME)
public class JmxExporter extends MBeanExporter implements Lifecycle, ApplicationContextAware, DisposableBean {
public static final String JMX_EXPORTER_NAME = "jmxExporter";
HashMap<String, Object> jmxBeans = new HashMap<String, Object>();
ApplicationContext context;
MBeanExporter exporter;
@Override
public void setApplicationContext(ApplicationContext applicationContext) throws BeansException {
this.context = applicationContext;
}
@Override
public void start() {
exporter = new MBeanExporter();
exporter.setRegistrationPolicy(RegistrationPolicy.IGNORE_EXISTING);
MetadataMBeanInfoAssembler assembler = new MetadataMBeanInfoAssembler();
assembler.setAttributeSource(new AnnotationJmxAttributeSource());
assembler.afterPropertiesSet();
exporter.setAssembler(assembler);
}
@Override
public void stop() {
}
@Override
public boolean isRunning() {
return false;
}
@Override
public void destroy() {
jmxBeans.clear();
exporter.destroy();
}
public void addBean(String fullyQualifiedMBeanName, Object bean ) {
jmxBeans.put(fullyQualifiedMBeanName,bean);
}
public void removeBean(String fullyQualifiedMBeanName){
jmxBeans.remove(fullyQualifiedMBeanName);
}
public void initAfterBeansAdded(){
exporter.setBeans(jmxBeans);
exporter.afterPropertiesSet();
exporter.afterSingletonsInstantiated();
}
}
| 1,021 |
1,069 | <gh_stars>1000+
# coding: utf-8
from django.db import models
from django_th.models import TriggerService
from django_th.models.services import Services
from django.utils.translation import ugettext_lazy as _
SCOPES = (
('home', _('Home')),
('public', _('Public'))
)
class Mastodon(Services):
"""
Model for Mastodon Service
"""
timeline = models.CharField(max_length=10, default="home", choices=SCOPES)
tooter = models.CharField(max_length=80, null=True, blank=True)
fav = models.BooleanField(default=False)
tag = models.CharField(max_length=80, null=True, blank=True)
since_id = models.BigIntegerField(null=True, blank=True)
max_id = models.BigIntegerField(null=True, blank=True)
count = models.IntegerField(null=True, blank=True)
trigger = models.ForeignKey(TriggerService, on_delete=models.CASCADE)
class Meta:
app_label = 'th_mastodon'
db_table = 'django_th_mastodon'
def show(self):
"""
:return: string representing object
"""
return "My Mastodon %s %s" % (self.timeline, self.trigger)
def __str__(self):
return "%s" % self.timeline
| 455 |
3,702 |
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package app.metatron.discovery.extension.dataconnection.jdbc;
import java.util.Map;
/**
* The interface Jdbc connect information.
*/
public interface JdbcConnectInformation {
/**
* Gets authentication type.
*
* @return the authentication type
*/
AuthenticationType getAuthenticationType();
/**
* Gets implementor.
*
* @return the implementor
*/
String getImplementor();
/**
* Gets url.
*
* @return the url
*/
String getUrl();
/**
* Gets options.
*
* @return the options
*/
String getOptions();
/**
* Gets hostname.
*
* @return the hostname
*/
String getHostname();
/**
* Gets port.
*
* @return the port
*/
Integer getPort();
/**
* Gets database.
*
* @return the database
*/
String getDatabase();
/**
* Gets sid.
*
* @return the sid
*/
String getSid();
/**
* Gets catalog.
*
* @return the catalog
*/
String getCatalog();
/**
* Gets properties.
*
* @return the properties
*/
String getProperties();
/**
* Gets username.
*
* @return the username
*/
String getUsername();
/**
* Gets password.
*
* @return the password
*/
String getPassword();
/**
* Gets properties map.
*
* @return the properties map
*/
Map<String, String> getPropertiesMap();
/**
* The enum Authentication type.
*/
enum AuthenticationType {
/**
* Manual authentication type.
*/
MANUAL,
/**
* Userinfo authentication type.
*/
USERINFO,
/**
* Dialog authentication type.
*/
DIALOG
}
}
| 756 |
7,353 | <gh_stars>1000+
/**
* @file list.c
* @author <NAME> <<EMAIL>>
*
* @section LICENSE
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the author nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* @section DESCRIPTION
*
* List construction module.
*
* Synopsis:
* list(elem1, ..., elemN)
* list listfrom(list l1, ..., list lN)
*
* Description:
* The first form creates a list with the given elements.
* The second form creates a list by concatenating the given
* lists.
*
* Variables:
* (empty) - list containing elem1, ..., elemN
* length - number of elements in list
*
* Synopsis: list::append(arg)
*
* Synopsis: list::appendv(list arg)
* Description: Appends the elements of arg to the list.
*
* Synopsis: list::length()
* Variables:
* (empty) - number of elements in list at the time of initialization
* of this method
*
* Synopsis: list::get(string index)
* Variables:
* (empty) - element of list at position index (starting from zero) at the time of initialization
*
* Synopsis: list::shift()
*
* Synopsis: list::contains(value)
* Variables:
* (empty) - "true" if list contains value, "false" if not
*
* Synopsis:
* list::find(start_pos, value)
* Description:
* finds the first occurrence of 'value' in the list at position >='start_pos'.
* Variables:
* pos - position of element, or "none" if not found
* found - "true" if found, "false" if not
*
* Sysnopsis:
* list::remove_at(remove_pos)
* Description:
* Removes the element at position 'remove_pos', which must refer to an existing element.
*
* Synopsis:
* list::remove(value)
* Description:
* Removes the first occurrence of value in the list, which must be in the list.
*
* Synopsis:
* list::set(list l1, ..., list lN)
* Description:
* Replaces the list with the concatenation of given lists.
*/
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include <inttypes.h>
#include <misc/offset.h>
#include <misc/parse_number.h>
#include <structure/IndexedList.h>
#include <ncd/NCDModule.h>
#include <ncd/extra/value_utils.h>
#include <generated/blog_channel_ncd_list.h>
#define ModuleLog(i, ...) NCDModuleInst_Backend_Log((i), BLOG_CURRENT_CHANNEL, __VA_ARGS__)
struct elem {
IndexedListNode il_node;
NCDValMem mem;
NCDValRef val;
};
struct instance {
NCDModuleInst *i;
IndexedList il;
};
struct length_instance {
NCDModuleInst *i;
uint64_t length;
};
struct get_instance {
NCDModuleInst *i;
NCDValMem mem;
NCDValRef val;
};
struct contains_instance {
NCDModuleInst *i;
int contains;
};
struct find_instance {
NCDModuleInst *i;
int is_found;
uint64_t found_pos;
};
static uint64_t list_count (struct instance *o)
{
return IndexedList_Count(&o->il);
}
static struct elem * insert_value (NCDModuleInst *i, struct instance *o, NCDValRef val, uint64_t idx)
{
ASSERT(idx <= list_count(o))
ASSERT(!NCDVal_IsInvalid(val))
struct elem *e = malloc(sizeof(*e));
if (!e) {
ModuleLog(i, BLOG_ERROR, "malloc failed");
goto fail0;
}
NCDValMem_Init(&e->mem);
e->val = NCDVal_NewCopy(&e->mem, val);
if (NCDVal_IsInvalid(e->val)) {
goto fail1;
}
IndexedList_InsertAt(&o->il, &e->il_node, idx);
return e;
fail1:
NCDValMem_Free(&e->mem);
free(e);
fail0:
return NULL;
}
static void remove_elem (struct instance *o, struct elem *e)
{
IndexedList_Remove(&o->il, &e->il_node);
NCDValMem_Free(&e->mem);
free(e);
}
static struct elem * get_elem_at (struct instance *o, uint64_t idx)
{
ASSERT(idx < list_count(o))
IndexedListNode *iln = IndexedList_GetAt(&o->il, idx);
struct elem *e = UPPER_OBJECT(iln, struct elem, il_node);
return e;
}
static struct elem * get_first_elem (struct instance *o)
{
ASSERT(list_count(o) > 0)
IndexedListNode *iln = IndexedList_GetFirst(&o->il);
struct elem *e = UPPER_OBJECT(iln, struct elem, il_node);
return e;
}
static struct elem * get_last_elem (struct instance *o)
{
ASSERT(list_count(o) > 0)
IndexedListNode *iln = IndexedList_GetLast(&o->il);
struct elem *e = UPPER_OBJECT(iln, struct elem, il_node);
return e;
}
static void cut_list_front (struct instance *o, uint64_t count)
{
while (list_count(o) > count) {
remove_elem(o, get_first_elem(o));
}
}
static void cut_list_back (struct instance *o, uint64_t count)
{
while (list_count(o) > count) {
remove_elem(o, get_last_elem(o));
}
}
static int append_list_contents (NCDModuleInst *i, struct instance *o, NCDValRef args)
{
ASSERT(NCDVal_IsList(args))
uint64_t orig_count = list_count(o);
size_t append_count = NCDVal_ListCount(args);
for (size_t j = 0; j < append_count; j++) {
NCDValRef elem = NCDVal_ListGet(args, j);
if (!insert_value(i, o, elem, list_count(o))) {
goto fail;
}
}
return 1;
fail:
cut_list_back(o, orig_count);
return 0;
}
static int append_list_contents_contents (NCDModuleInst *i, struct instance *o, NCDValRef args)
{
ASSERT(NCDVal_IsList(args))
uint64_t orig_count = list_count(o);
size_t append_count = NCDVal_ListCount(args);
for (size_t j = 0; j < append_count; j++) {
NCDValRef elem = NCDVal_ListGet(args, j);
if (!NCDVal_IsList(elem)) {
ModuleLog(i, BLOG_ERROR, "wrong type");
goto fail;
}
if (!append_list_contents(i, o, elem)) {
goto fail;
}
}
return 1;
fail:
cut_list_back(o, orig_count);
return 0;
}
static struct elem * find_elem (struct instance *o, NCDValRef val, uint64_t start_idx, uint64_t *out_idx)
{
if (start_idx >= list_count(o)) {
return NULL;
}
for (IndexedListNode *iln = IndexedList_GetAt(&o->il, start_idx); iln; iln = IndexedList_GetNext(&o->il, iln)) {
struct elem *e = UPPER_OBJECT(iln, struct elem, il_node);
if (NCDVal_Compare(e->val, val) == 0) {
if (out_idx) {
*out_idx = start_idx;
}
return e;
}
start_idx++;
}
return NULL;
}
static int list_to_value (NCDModuleInst *i, struct instance *o, NCDValMem *mem, NCDValRef *out_val)
{
*out_val = NCDVal_NewList(mem, IndexedList_Count(&o->il));
if (NCDVal_IsInvalid(*out_val)) {
goto fail;
}
for (IndexedListNode *iln = IndexedList_GetFirst(&o->il); iln; iln = IndexedList_GetNext(&o->il, iln)) {
struct elem *e = UPPER_OBJECT(iln, struct elem, il_node);
NCDValRef copy = NCDVal_NewCopy(mem, e->val);
if (NCDVal_IsInvalid(copy)) {
goto fail;
}
if (!NCDVal_ListAppend(*out_val, copy)) {
goto fail;
}
}
return 1;
fail:
return 0;
}
static void func_new_list (void *vo, NCDModuleInst *i, const struct NCDModuleInst_new_params *params)
{
struct instance *o = vo;
o->i = i;
// init list
IndexedList_Init(&o->il);
// append contents
if (!append_list_contents(i, o, params->args)) {
goto fail1;
}
// signal up
NCDModuleInst_Backend_Up(o->i);
return;
fail1:
cut_list_front(o, 0);
NCDModuleInst_Backend_DeadError(i);
}
static void func_new_listfrom (void *vo, NCDModuleInst *i, const struct NCDModuleInst_new_params *params)
{
struct instance *o = vo;
o->i = i;
// init list
IndexedList_Init(&o->il);
// append contents contents
if (!append_list_contents_contents(i, o, params->args)) {
goto fail1;
}
// signal up
NCDModuleInst_Backend_Up(o->i);
return;
fail1:
cut_list_front(o, 0);
NCDModuleInst_Backend_DeadError(i);
}
static void func_die (void *vo)
{
struct instance *o = vo;
// free list elements
cut_list_front(o, 0);
NCDModuleInst_Backend_Dead(o->i);
}
static int func_getvar (void *vo, const char *name, NCDValMem *mem, NCDValRef *out)
{
struct instance *o = vo;
if (!strcmp(name, "")) {
if (!list_to_value(o->i, o, mem, out)) {
return 0;
}
return 1;
}
if (!strcmp(name, "length")) {
*out = ncd_make_uintmax(mem, list_count(o));
return 1;
}
return 0;
}
static void append_func_new (void *unused, NCDModuleInst *i, const struct NCDModuleInst_new_params *params)
{
// check arguments
NCDValRef arg;
if (!NCDVal_ListRead(params->args, 1, &arg)) {
ModuleLog(i, BLOG_ERROR, "wrong arity");
goto fail0;
}
// get method object
struct instance *mo = NCDModuleInst_Backend_GetUser((NCDModuleInst *)params->method_user);
// append
if (!insert_value(i, mo, arg, list_count(mo))) {
goto fail0;
}
// signal up
NCDModuleInst_Backend_Up(i);
return;
fail0:
NCDModuleInst_Backend_DeadError(i);
}
static void appendv_func_new (void *unused, NCDModuleInst *i, const struct NCDModuleInst_new_params *params)
{
// check arguments
NCDValRef arg;
if (!NCDVal_ListRead(params->args, 1, &arg)) {
ModuleLog(i, BLOG_ERROR, "wrong arity");
goto fail0;
}
if (!NCDVal_IsList(arg)) {
ModuleLog(i, BLOG_ERROR, "wrong type");
goto fail0;
}
// get method object
struct instance *mo = NCDModuleInst_Backend_GetUser((NCDModuleInst *)params->method_user);
// append
if (!append_list_contents(i, mo, arg)) {
goto fail0;
}
// signal up
NCDModuleInst_Backend_Up(i);
return;
fail0:
NCDModuleInst_Backend_DeadError(i);
}
static void length_func_new (void *vo, NCDModuleInst *i, const struct NCDModuleInst_new_params *params)
{
struct length_instance *o = vo;
o->i = i;
// check arguments
if (!NCDVal_ListRead(params->args, 0)) {
ModuleLog(o->i, BLOG_ERROR, "wrong arity");
goto fail0;
}
// get method object
struct instance *mo = NCDModuleInst_Backend_GetUser((NCDModuleInst *)params->method_user);
// remember length
o->length = list_count(mo);
// signal up
NCDModuleInst_Backend_Up(o->i);
return;
fail0:
NCDModuleInst_Backend_DeadError(i);
}
static void length_func_die (void *vo)
{
struct length_instance *o = vo;
NCDModuleInst_Backend_Dead(o->i);
}
static int length_func_getvar (void *vo, const char *name, NCDValMem *mem, NCDValRef *out)
{
struct length_instance *o = vo;
if (!strcmp(name, "")) {
*out = ncd_make_uintmax(mem, o->length);
return 1;
}
return 0;
}
static void get_func_new (void *vo, NCDModuleInst *i, const struct NCDModuleInst_new_params *params)
{
struct get_instance *o = vo;
o->i = i;
// check arguments
NCDValRef index_arg;
if (!NCDVal_ListRead(params->args, 1, &index_arg)) {
ModuleLog(o->i, BLOG_ERROR, "wrong arity");
goto fail0;
}
if (!NCDVal_IsString(index_arg)) {
ModuleLog(o->i, BLOG_ERROR, "wrong type");
goto fail0;
}
uintmax_t index;
if (!ncd_read_uintmax(index_arg, &index)) {
ModuleLog(o->i, BLOG_ERROR, "wrong value");
goto fail0;
}
// get method object
struct instance *mo = NCDModuleInst_Backend_GetUser((NCDModuleInst *)params->method_user);
// check index
if (index >= list_count(mo)) {
ModuleLog(o->i, BLOG_ERROR, "no element at index %"PRIuMAX, index);
goto fail0;
}
// get element
struct elem *e = get_elem_at(mo, index);
// init mem
NCDValMem_Init(&o->mem);
// copy value
o->val = NCDVal_NewCopy(&o->mem, e->val);
if (NCDVal_IsInvalid(o->val)) {
goto fail1;
}
// signal up
NCDModuleInst_Backend_Up(o->i);
return;
fail1:
NCDValMem_Free(&o->mem);
fail0:
NCDModuleInst_Backend_DeadError(i);
}
static void get_func_die (void *vo)
{
struct get_instance *o = vo;
// free mem
NCDValMem_Free(&o->mem);
NCDModuleInst_Backend_Dead(o->i);
}
static int get_func_getvar (void *vo, const char *name, NCDValMem *mem, NCDValRef *out)
{
struct get_instance *o = vo;
if (!strcmp(name, "")) {
*out = NCDVal_NewCopy(mem, o->val);
return 1;
}
return 0;
}
static void shift_func_new (void *unused, NCDModuleInst *i, const struct NCDModuleInst_new_params *params)
{
// check arguments
if (!NCDVal_ListRead(params->args, 0)) {
ModuleLog(i, BLOG_ERROR, "wrong arity");
goto fail0;
}
// get method object
struct instance *mo = NCDModuleInst_Backend_GetUser((NCDModuleInst *)params->method_user);
// check first
if (list_count(mo) == 0) {
ModuleLog(i, BLOG_ERROR, "list has no elements");
goto fail0;
}
// remove first
remove_elem(mo, get_first_elem(mo));
// signal up
NCDModuleInst_Backend_Up(i);
return;
fail0:
NCDModuleInst_Backend_DeadError(i);
}
static void contains_func_new (void *vo, NCDModuleInst *i, const struct NCDModuleInst_new_params *params)
{
struct contains_instance *o = vo;
o->i = i;
// read arguments
NCDValRef value_arg;
if (!NCDVal_ListRead(params->args, 1, &value_arg)) {
ModuleLog(o->i, BLOG_ERROR, "wrong arity");
goto fail0;
}
// get method object
struct instance *mo = NCDModuleInst_Backend_GetUser((NCDModuleInst *)params->method_user);
// search
o->contains = !!find_elem(mo, value_arg, 0, NULL);
// signal up
NCDModuleInst_Backend_Up(o->i);
return;
fail0:
NCDModuleInst_Backend_DeadError(i);
}
static void contains_func_die (void *vo)
{
struct contains_instance *o = vo;
NCDModuleInst_Backend_Dead(o->i);
}
static int contains_func_getvar (void *vo, const char *name, NCDValMem *mem, NCDValRef *out)
{
struct contains_instance *o = vo;
if (!strcmp(name, "")) {
*out = ncd_make_boolean(mem, o->contains, o->i->params->iparams->string_index);
return 1;
}
return 0;
}
static void find_func_new (void *vo, NCDModuleInst *i, const struct NCDModuleInst_new_params *params)
{
struct find_instance *o = vo;
o->i = i;
// read arguments
NCDValRef start_pos_arg;
NCDValRef value_arg;
if (!NCDVal_ListRead(params->args, 2, &start_pos_arg, &value_arg)) {
ModuleLog(o->i, BLOG_ERROR, "wrong arity");
goto fail0;
}
if (!NCDVal_IsString(start_pos_arg)) {
ModuleLog(o->i, BLOG_ERROR, "wrong type");
goto fail0;
}
// read start position
uintmax_t start_pos;
if (!ncd_read_uintmax(start_pos_arg, &start_pos) || start_pos > UINT64_MAX) {
ModuleLog(o->i, BLOG_ERROR, "wrong start pos");
goto fail0;
}
// get method object
struct instance *mo = NCDModuleInst_Backend_GetUser((NCDModuleInst *)params->method_user);
// find
o->is_found = !!find_elem(mo, value_arg, start_pos, &o->found_pos);
// signal up
NCDModuleInst_Backend_Up(o->i);
return;
fail0:
NCDModuleInst_Backend_DeadError(i);
}
static void find_func_die (void *vo)
{
struct find_instance *o = vo;
NCDModuleInst_Backend_Dead(o->i);
}
static int find_func_getvar (void *vo, const char *name, NCDValMem *mem, NCDValRef *out)
{
struct find_instance *o = vo;
if (!strcmp(name, "pos")) {
char value[64] = "none";
if (o->is_found) {
generate_decimal_repr_string(o->found_pos, value);
}
*out = NCDVal_NewString(mem, value);
return 1;
}
if (!strcmp(name, "found")) {
*out = ncd_make_boolean(mem, o->is_found, o->i->params->iparams->string_index);
return 1;
}
return 0;
}
static void removeat_func_new (void *unused, NCDModuleInst *i, const struct NCDModuleInst_new_params *params)
{
// read arguments
NCDValRef remove_pos_arg;
if (!NCDVal_ListRead(params->args, 1, &remove_pos_arg)) {
ModuleLog(i, BLOG_ERROR, "wrong arity");
goto fail0;
}
if (!NCDVal_IsString(remove_pos_arg)) {
ModuleLog(i, BLOG_ERROR, "wrong type");
goto fail0;
}
// read position
uintmax_t remove_pos;
if (!ncd_read_uintmax(remove_pos_arg, &remove_pos)) {
ModuleLog(i, BLOG_ERROR, "wrong pos");
goto fail0;
}
// get method object
struct instance *mo = NCDModuleInst_Backend_GetUser((NCDModuleInst *)params->method_user);
// check position
if (remove_pos >= list_count(mo)) {
ModuleLog(i, BLOG_ERROR, "pos out of range");
goto fail0;
}
// remove
remove_elem(mo, get_elem_at(mo, remove_pos));
// signal up
NCDModuleInst_Backend_Up(i);
return;
fail0:
NCDModuleInst_Backend_DeadError(i);
}
static void remove_func_new (void *unused, NCDModuleInst *i, const struct NCDModuleInst_new_params *params)
{
// read arguments
NCDValRef value_arg;
if (!NCDVal_ListRead(params->args, 1, &value_arg)) {
ModuleLog(i, BLOG_ERROR, "wrong arity");
goto fail0;
}
// get method object
struct instance *mo = NCDModuleInst_Backend_GetUser((NCDModuleInst *)params->method_user);
// find element
struct elem *e = find_elem(mo, value_arg, 0, NULL);
if (!e) {
ModuleLog(i, BLOG_ERROR, "value does not exist");
goto fail0;
}
// remove element
remove_elem(mo, e);
// signal up
NCDModuleInst_Backend_Up(i);
return;
fail0:
NCDModuleInst_Backend_DeadError(i);
}
static void set_func_new (void *unused, NCDModuleInst *i, const struct NCDModuleInst_new_params *params)
{
// get method object
struct instance *mo = NCDModuleInst_Backend_GetUser((NCDModuleInst *)params->method_user);
// remember old count
uint64_t old_count = list_count(mo);
// append contents of our lists
if (!append_list_contents_contents(i, mo, params->args)) {
goto fail0;
}
// remove old elements
cut_list_front(mo, list_count(mo) - old_count);
// signal up
NCDModuleInst_Backend_Up(i);
return;
fail0:
NCDModuleInst_Backend_DeadError(i);
}
static struct NCDModule modules[] = {
{
.type = "list",
.func_new2 = func_new_list,
.func_die = func_die,
.func_getvar = func_getvar,
.alloc_size = sizeof(struct instance)
}, {
.type = "listfrom",
.base_type = "list",
.func_new2 = func_new_listfrom,
.func_die = func_die,
.func_getvar = func_getvar,
.alloc_size = sizeof(struct instance)
}, {
.type = "concatlist", // alias for listfrom
.base_type = "list",
.func_new2 = func_new_listfrom,
.func_die = func_die,
.func_getvar = func_getvar,
.alloc_size = sizeof(struct instance)
}, {
.type = "list::append",
.func_new2 = append_func_new
}, {
.type = "list::appendv",
.func_new2 = appendv_func_new
}, {
.type = "list::length",
.func_new2 = length_func_new,
.func_die = length_func_die,
.func_getvar = length_func_getvar,
.alloc_size = sizeof(struct length_instance)
}, {
.type = "list::get",
.func_new2 = get_func_new,
.func_die = get_func_die,
.func_getvar = get_func_getvar,
.alloc_size = sizeof(struct get_instance)
}, {
.type = "list::shift",
.func_new2 = shift_func_new
}, {
.type = "list::contains",
.func_new2 = contains_func_new,
.func_die = contains_func_die,
.func_getvar = contains_func_getvar,
.alloc_size = sizeof(struct contains_instance)
}, {
.type = "list::find",
.func_new2 = find_func_new,
.func_die = find_func_die,
.func_getvar = find_func_getvar,
.alloc_size = sizeof(struct find_instance)
}, {
.type = "list::remove_at",
.func_new2 = removeat_func_new
}, {
.type = "list::remove",
.func_new2 = remove_func_new
}, {
.type = "list::set",
.func_new2 = set_func_new
}, {
.type = NULL
}
};
const struct NCDModuleGroup ncdmodule_list = {
.modules = modules
};
| 9,862 |
469 | {
"continue": "Продолжить",
"copy": "Copy",
"copySeed": "Copy Seed phrase",
"createAccount": "Создать аккаунт",
"goBack": "go back",
"notHaveAccounts": "У вас нет сохранённых аккаунтов",
"nothingHere": "Пока ничего нет…",
"password": {
"error": "<PASSWORD>!"
},
"userList": {
"address": "Адрес / Название аккаунта",
"buttons": {
"login": "Войти"
},
"exportUser": "Экспортируйте ваш аккаунт",
"notHaveAccount": "Чтобы продолжить, пожалуйста, войдите",
"password": "<PASSWORD>",
"placeholders": {
"password": "<PASSWORD>"
},
"title": "С возвращением"
},
"warn": "Since only you control your money, you’ll need to save your Seed phrase in case this app is deleted or",
"yourSeed": "Your Seed phrase"
} | 523 |
2,805 | <gh_stars>1000+
# encoding: utf-8
"""041 Resource new fields
Revision ID: <KEY>
Revises: <PASSWORD>
Create Date: 2018-09-04 18:49:03.042528
"""
from alembic import op
import sqlalchemy as sa
from ckan.migration import skip_based_on_legacy_engine_version
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = '<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
if skip_based_on_legacy_engine_version(op, __name__):
return
for table in ('resource', 'resource_revision'):
op.add_column(table, sa.Column('name', sa.UnicodeText))
op.add_column(table, sa.Column('resource_type', sa.UnicodeText))
op.add_column(table, sa.Column('mimetype', sa.UnicodeText))
op.add_column(table, sa.Column('mimetype_inner', sa.UnicodeText))
op.add_column(table, sa.Column('size', sa.BigInteger))
op.add_column(table, sa.Column('last_modified', sa.TIMESTAMP))
op.add_column(table, sa.Column('cache_url', sa.UnicodeText))
op.add_column(table, sa.Column('cache_last_updated', sa.TIMESTAMP))
op.add_column(table, sa.Column('webstore_url', sa.UnicodeText))
op.add_column(table, sa.Column('webstore_last_updated', sa.TIMESTAMP))
def downgrade():
for table in ('resource', 'resource_revision'):
op.drop_column(table, 'name')
op.drop_column(table, 'resource_type')
op.drop_column(table, 'mimetype')
op.drop_column(table, 'mimetype_inner')
op.drop_column(table, 'size')
op.drop_column(table, 'last_modified')
op.drop_column(table, 'cache_url')
op.drop_column(table, 'cache_last_updated')
op.drop_column(table, 'webstore_url')
op.drop_column(table, 'webstore_last_updated')
| 750 |
666 | # Copyright 2019 BDL Benchmarks Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Diabetic retinopathy diagnosis BDL Benchmark."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
from typing import Callable
from typing import Dict
from typing import Optional
from typing import Sequence
from typing import Text
from typing import Tuple
from typing import Union
import numpy as np
import pandas as pd
import tensorflow as tf
from absl import logging
from ..core import transforms
from ..core.benchmark import Benchmark
from ..core.benchmark import BenchmarkInfo
from ..core.benchmark import DataSplits
from ..core.constants import DATA_DIR
from ..core.levels import Level
tfk = tf.keras
_DIABETIC_RETINOPATHY_DIAGNOSIS_DATA_DIR = os.path.join(
DATA_DIR, "downloads", "manual", "diabetic_retinopathy_diagnosis")
class DiabeticRetinopathyDiagnosisBecnhmark(Benchmark):
"""Diabetic retinopathy diagnosis benchmark class."""
def __init__(
self,
level: Union[Text, Level],
batch_size: int = 64,
data_dir: Optional[Text] = None,
download_and_prepare: bool = False,
):
"""Constructs a benchmark object.
Args:
level: `Level` or `str, downstream task level.
batch_size: (optional) `int`, number of datapoints
per mini-batch.
data_dir: (optional) `str`, path to parent data directory.
download_and_prepare: (optional) `bool`, if the data is not available
it downloads and preprocesses it.
"""
self.__level = level if isinstance(level, Level) else Level.from_str(level)
try:
self.__ds = self.load(level=level,
batch_size=batch_size,
data_dir=data_dir or DATA_DIR)
except AssertionError:
if not download_and_prepare:
raise
else:
logging.info(
"Data not found, `DiabeticRetinopathyDiagnosisBecnhmark.download_and_prepare()`"
" is now running...")
self.download_and_prepare()
@classmethod
def evaluate(
cls,
estimator: Callable[[np.ndarray], Tuple[np.ndarray, np.ndarray]],
dataset: tf.data.Dataset,
output_dir: Optional[Text] = None,
name: Optional[Text] = None,
) -> Dict[Text, float]:
"""Evaluates an `estimator` on the `mode` benchmark dataset.
Args:
estimator: `lambda x: mu_x, uncertainty_x`, an uncertainty estimation
function, which returns `mean_x` and predictive `uncertainty_x`.
dataset: `tf.data.Dataset`, on which dataset to performance evaluation.
output_dir: (optional) `str`, directory to save figures.
name: (optional) `str`, the name of the method.
"""
import inspect
import tqdm
import tensorflow_datasets as tfds
from sklearn.metrics import roc_auc_score
from sklearn.metrics import accuracy_score
import matplotlib.pyplot as plt
# Containers used for caching performance evaluation
y_true = list()
y_pred = list()
y_uncertainty = list()
# Convert to NumPy iterator if necessary
ds = dataset if inspect.isgenerator(dataset) else tfds.as_numpy(dataset)
for x, y in tqdm.tqdm(ds):
# Sample from probabilistic model
mean, uncertainty = estimator(x)
# Cache predictions
y_true.append(y)
y_pred.append(mean)
y_uncertainty.append(uncertainty)
# Use vectorized NumPy containers
y_true = np.concatenate(y_true).flatten()
y_pred = np.concatenate(y_pred).flatten()
y_uncertainty = np.concatenate(y_uncertainty).flatten()
fractions = np.asarray([0.5, 0.6, 0.7, 0.8, 0.9, 1.0])
# Metrics for evaluation
metrics = zip(["accuracy", "auc"], cls.metrics())
return {
metric: cls._evaluate_metric(
y_true,
y_pred,
y_uncertainty,
fractions,
lambda y_true, y_pred: metric_fn(y_true, y_pred).numpy(),
name,
) for (metric, metric_fn) in metrics
}
@staticmethod
def _evaluate_metric(
y_true: np.ndarray,
y_pred: np.ndarray,
y_uncertainty: np.ndarray,
fractions: Sequence[float],
metric_fn: Callable[[np.ndarray, np.ndarray], float],
name=None,
) -> pd.DataFrame:
"""Evaluate model predictive distribution on `metric_fn` at data retain
`fractions`.
Args:
y_true: `numpy.ndarray`, the ground truth labels, with shape [N].
y_pred: `numpy.ndarray`, the model predictions, with shape [N].
y_uncertainty: `numpy.ndarray`, the model uncertainties,
with shape [N].
fractions: `iterable`, the percentages of data to retain for
calculating `metric_fn`.
metric_fn: `lambda(y_true, y_pred) -> float`, a metric
function that provides a score given ground truths
and predictions.
name: (optional) `str`, the name of the method.
Returns:
A `pandas.DataFrame` with columns ["retained_data", "mean", "std"],
that summarizes the scores at different data retained fractions.
"""
N = y_true.shape[0]
# Sorts indexes by ascending uncertainty
I_uncertainties = np.argsort(y_uncertainty)
# Score containers
mean = np.empty_like(fractions)
# TODO(filangel): do bootstrap sampling and estimate standard error
std = np.zeros_like(fractions)
for i, frac in enumerate(fractions):
# Keep only the %-frac of lowest uncertainties
I = np.zeros(N, dtype=bool)
I[I_uncertainties[:int(N * frac)]] = True
mean[i] = metric_fn(y_true[I], y_pred[I])
# Store
df = pd.DataFrame(dict(retained_data=fractions, mean=mean, std=std))
df.name = name
return df
@property
def datasets(self) -> tf.data.Dataset:
"""Pointer to the processed datasets."""
return self.__ds
@property
def info(self) -> BenchmarkInfo:
"""Text description of the benchmark."""
return BenchmarkInfo(description="", urls="", setup="", citation="")
@property
def level(self) -> Level:
"""The downstream task level."""
return self.__level
@staticmethod
def loss() -> tfk.losses.Loss:
"""Loss used for training binary classifiers."""
return tfk.losses.BinaryCrossentropy()
@staticmethod
def metrics() -> tfk.metrics.Metric:
"""Evaluation metrics used for monitoring training."""
return [tfk.metrics.BinaryAccuracy(), tfk.metrics.AUC()]
@staticmethod
def class_weight() -> Sequence[float]:
"""Class weights used for rebalancing the dataset, by skewing the `loss`
accordingly."""
return [1.0, 4.0]
@classmethod
def load(
cls,
level: Union[Text, Level] = "realworld",
batch_size: int = 64,
data_dir: Optional[Text] = None,
as_numpy: bool = False,
) -> DataSplits:
"""Loads the datasets for the benchmark.
Args:
level: `Level` or `str, downstream task level.
batch_size: (optional) `int`, number of datapoints
per mini-batch.
data_dir: (optional) `str`, path to parent data directory.
as_numpy: (optional) `bool`, if True returns python generators
with `numpy.ndarray` outputs.
Returns:
A namedtuple with properties:
* train: `tf.data.Dataset`, train dataset.
* validation: `tf.data.Dataset`, validation dataset.
* test: `tf.data.Dataset`, test dataset.
"""
import tensorflow_datasets as tfds
from .tfds_adapter import DiabeticRetinopathyDiagnosis
# Fetch datasets
try:
ds_train, ds_validation, ds_test = DiabeticRetinopathyDiagnosis(
data_dir=data_dir or DATA_DIR,
config=level).as_dataset(split=["train", "validation", "test"],
shuffle_files=True,
batch_size=batch_size)
except AssertionError as ae:
raise AssertionError(
str(ae) +
" Run DiabeticRetinopathyDiagnosisBecnhmark.download_and_prepare()"
" first and then retry.")
# Parse task level
level = level if isinstance(level, Level) else Level.from_str(level)
# Dataset tranformations
transforms_train, transforms_eval = cls._preprocessors()
# Apply transformations
ds_train = ds_train.map(transforms_train,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
ds_validation = ds_validation.map(
transforms_eval, num_parallel_calls=tf.data.experimental.AUTOTUNE)
ds_test = ds_test.map(transforms_eval,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
# Prefetches datasets to memory
ds_train = ds_train.prefetch(tf.data.experimental.AUTOTUNE)
ds_validation = ds_validation.prefetch(tf.data.experimental.AUTOTUNE)
ds_test = ds_test.prefetch(tf.data.experimental.AUTOTUNE)
if as_numpy:
# Convert to NumPy iterators
ds_train = tfds.as_numpy(ds_train)
ds_validation = tfds.as_numpy(ds_validation)
ds_test = tfds.as_numpy(ds_test)
return DataSplits(ds_train, ds_validation, ds_test)
@classmethod
def download_and_prepare(cls, levels=None) -> None:
"""Downloads dataset from Kaggle, extracts zip files and processes it using
`tensorflow_datasets`.
Args:
levels: (optional) `iterable` of `str`, specifies which
levels from {'medium', 'realworld'} to prepare,
if None it prepares all the levels.
Raises:
OSError: if `~/.kaggle/kaggle.json` is not set up.
"""
# Disable GPU for data download, extraction and preparation
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
cls._download()
# cls._extract()
#cls._prepare(levels)
@staticmethod
def _download() -> None:
"""Downloads data from Kaggle using `tensorflow_datasets`.
Raises:
OSError: if `~/.kaggle/kaggle.json` is not set up.
"""
import subprocess as sp
import tensorflow_datasets as tfds
# Append `/home/$USER/.local/bin` to path
os.environ["PATH"] += ":/home/{}/.local/bin/".format(os.environ["USER"])
# Download all files from Kaggle
drd = tfds.download.kaggle.KaggleCompetitionDownloader(
"diabetic-retinopathy-detection")
try:
for dfile in drd.competition_files:
drd.download_file(dfile,
output_dir=_DIABETIC_RETINOPATHY_DIAGNOSIS_DATA_DIR)
except sp.CalledProcessError as cpe:
raise OSError(
str(cpe) + "." +
" Make sure you have ~/.kaggle/kaggle.json setup, fetched from the Kaggle website"
" https://www.kaggle.com/<username>/account -> 'Create New API Key'."
" Also accept the dataset license by going to"
" https://www.kaggle.com/c/diabetic-retinopathy-detection/rules"
" and look for the button 'I Understand and Accept' (make sure when reloading the"
" page that the button does not pop up again).")
@staticmethod
def _extract() -> None:
"""Extracts zip files downloaded from Kaggle."""
import glob
import tqdm
import zipfile
import tempfile
# Extract train and test original images
for split in ["train", "test"]:
# Extract "<split>.zip.00*"" files to "<split>"
with tempfile.NamedTemporaryFile() as tmp:
# Concatenate "<split>.zip.00*" to "<split>.zip"
for fname in tqdm.tqdm(
sorted(
glob.glob(
os.path.join(_DIABETIC_RETINOPATHY_DIAGNOSIS_DATA_DIR,
"{split}.zip.00*".format(split=split))))):
# Unzip "<split>.zip" to "<split>"
with open(fname, "rb") as ztmp:
tmp.write(ztmp.read())
with zipfile.ZipFile(tmp) as zfile:
for image in tqdm.tqdm(iterable=zfile.namelist(),
total=len(zfile.namelist())):
zfile.extract(member=image,
path=_DIABETIC_RETINOPATHY_DIAGNOSIS_DATA_DIR)
# Delete "<split>.zip.00*" files
for splitzip in os.listdir(_DIABETIC_RETINOPATHY_DIAGNOSIS_DATA_DIR):
if "{split}.zip.00".format(split=split) in splitzip:
os.remove(
os.path.join(_DIABETIC_RETINOPATHY_DIAGNOSIS_DATA_DIR, splitzip))
# Extract "sample.zip", "trainLabels.csv.zip"
for fname in ["sample", "trainLabels.csv"]:
zfname = os.path.join(_DIABETIC_RETINOPATHY_DIAGNOSIS_DATA_DIR,
"{fname}.zip".format(fname=fname))
with zipfile.ZipFile(zfname) as zfile:
zfile.extractall(_DIABETIC_RETINOPATHY_DIAGNOSIS_DATA_DIR)
os.remove(zfname)
@staticmethod
def _prepare(levels=None) -> None:
"""Generates the TFRecord objects for medium and realworld experiments."""
import multiprocessing
from absl import logging
from .tfds_adapter import DiabeticRetinopathyDiagnosis
# Hangle each level individually
for level in levels or ["medium", "realworld"]:
dtask = DiabeticRetinopathyDiagnosis(data_dir=DATA_DIR, config=level)
logging.debug("=== Preparing TFRecords for {} ===".format(level))
dtask.download_and_prepare()
@classmethod
def _preprocessors(cls) -> Tuple[transforms.Transform, transforms.Transform]:
"""Applies transformations to the raw data."""
import tensorflow_datasets as tfds
# Transformation hyperparameters
mean = np.asarray([0.42606387, 0.29752496, 0.21309826])
stddev = np.asarray([0.27662534, 0.20280295, 0.1687619])
class Parse(transforms.Transform):
"""Parses datapoints from raw `tf.data.Dataset`."""
def __call__(self, x, y=None):
"""Returns `as_supervised` tuple."""
return x["image"], x["label"]
class CastX(transforms.Transform):
"""Casts image to `dtype`."""
def __init__(self, dtype):
"""Constructs a type caster."""
self.dtype = dtype
def __call__(self, x, y):
"""Returns casted image (to `dtype`) and its (unchanged) label as
tuple."""
return tf.cast(x, self.dtype), y
class To01X(transforms.Transform):
"""Rescales image to [min, max]=[0, 1]."""
def __call__(self, x, y):
"""Returns rescaled image and its (unchanged) label as tuple."""
return x / 255.0, y
# Get augmentation schemes
[augmentation_config,
no_augmentation_config] = cls._ImageDataGenerator_config()
# Transformations for train dataset
transforms_train = transforms.Compose([
Parse(),
CastX(tf.float32),
To01X(),
transforms.Normalize(mean, stddev),
# TODO(filangel): hangle batch with ImageDataGenerator
# transforms.RandomAugment(**augmentation_config),
])
# Transformations for validation/test dataset
transforms_eval = transforms.Compose([
Parse(),
CastX(tf.float32),
To01X(),
transforms.Normalize(mean, stddev),
# TODO(filangel): hangle batch with ImageDataGenerator
# transforms.RandomAugment(**no_augmentation_config),
])
return transforms_train, transforms_eval
@staticmethod
def _ImageDataGenerator_config():
"""Returns the configs for the
`tensorflow.keras.preprocessing.image.ImageDataGenerator`, used for the
random augmentation of the dataset, following the implementation of
https://github.com/chleibig/disease-detection/blob/f3401b26aa9b832ff77afe93
e3faa342f7d088e5/scripts/inspect_data_augmentation.py."""
augmentation_config = dict(
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=180.0,
width_shift_range=0.05,
height_shift_range=0.05,
shear_range=0.,
zoom_range=0.10,
channel_shift_range=0.,
fill_mode="constant",
cval=0.,
horizontal_flip=True,
vertical_flip=True,
data_format="channels_last",
)
no_augmentation_config = dict(
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=0.0,
width_shift_range=0.0,
height_shift_range=0.0,
shear_range=0.,
zoom_range=0.0,
channel_shift_range=0.,
fill_mode="nearest",
cval=0.,
horizontal_flip=False,
vertical_flip=False,
data_format="channels_last",
)
return augmentation_config, no_augmentation_config
| 7,111 |
396 | {"routes":[{"geometry":"mklyJf{ph@g@aHh@m@LeBD_BDuNAqC_@_R","legs":[{"summary":"Velbastaðarvegur, Gamli Velbastaðvegur","weight":74.2,"duration":65.3,"steps":[{"intersections":[{"out":0,"entry":[true],"bearings":[74],"location":[-6.808995,62.000073]}],"driving_side":"right","geometry":"mklyJf{ph@g@aH","mode":"driving","maneuver":{"bearing_after":74,"bearing_before":0,"location":[-6.808995,62.000073],"modifier":"left","type":"depart","instruction":"Head east on Velbastaðarvegur"},"weight":15.2,"duration":6.6,"name":"Velbastaðarvegur","distance":78.4},{"intersections":[{"out":1,"in":2,"entry":[true,true,false],"bearings":[75,150,255],"location":[-6.807551,62.000267]},{"out":0,"in":2,"entry":[true,true,false],"bearings":[90,195,270],"location":[-6.806054,61.999953]},{"out":0,"in":2,"entry":[true,true,false],"bearings":[90,180,270],"location":[-6.804462,61.999935]},{"out":0,"in":2,"entry":[true,true,false],"bearings":[90,180,270],"location":[-6.802712,61.999962]}],"driving_side":"right","geometry":"ullyJdrph@VUPWBY@KF_AD_B@w@?{@@eB?{A?_A@[?cBAqCCw@?SGiDIwCG_EAQ","mode":"driving","maneuver":{"bearing_after":156,"bearing_before":73,"location":[-6.807551,62.000267],"modifier":"right","type":"turn","instruction":"Turn right onto <NAME>ðvegur"},"weight":58.99999999999999,"duration":58.7,"name":"<NAME>","distance":408.1},{"intersections":[{"in":0,"entry":[true],"bearings":[257],"location":[-6.800053,62.000099]}],"driving_side":"right","geometry":"sklyJhcoh@","mode":"driving","maneuver":{"bearing_after":0,"bearing_before":77,"location":[-6.800053,62.000099],"type":"arrive","modifier":"right","instruction":"You have arrived at your destination, on the right"},"weight":0,"duration":0,"name":"<NAME>","distance":0}],"distance":486.6}],"weight_name":"routability","weight":74.2,"duration":65.3,"distance":486.6}],"waypoints":[{"name":"Velbastaðarvegur","location":[-6.808995,62.000073]},{"name":"<NAME>","location":[-6.800053,62.000099]}],"code":"Ok","uuid":"cjpois8j900098anrxqn5kdc5"} | 741 |
1,350 | <filename>sdk/peering/azure-resourcemanager-peering/src/main/java/com/azure/resourcemanager/peering/implementation/PeeringServicesClientImpl.java
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
// Code generated by Microsoft (R) AutoRest Code Generator.
package com.azure.resourcemanager.peering.implementation;
import com.azure.core.annotation.BodyParam;
import com.azure.core.annotation.Delete;
import com.azure.core.annotation.ExpectedResponses;
import com.azure.core.annotation.Get;
import com.azure.core.annotation.HeaderParam;
import com.azure.core.annotation.Headers;
import com.azure.core.annotation.Host;
import com.azure.core.annotation.HostParam;
import com.azure.core.annotation.Patch;
import com.azure.core.annotation.PathParam;
import com.azure.core.annotation.Put;
import com.azure.core.annotation.QueryParam;
import com.azure.core.annotation.ReturnType;
import com.azure.core.annotation.ServiceInterface;
import com.azure.core.annotation.ServiceMethod;
import com.azure.core.annotation.UnexpectedResponseExceptionType;
import com.azure.core.http.rest.PagedFlux;
import com.azure.core.http.rest.PagedIterable;
import com.azure.core.http.rest.PagedResponse;
import com.azure.core.http.rest.PagedResponseBase;
import com.azure.core.http.rest.Response;
import com.azure.core.http.rest.RestProxy;
import com.azure.core.management.exception.ManagementException;
import com.azure.core.util.Context;
import com.azure.core.util.FluxUtil;
import com.azure.core.util.logging.ClientLogger;
import com.azure.resourcemanager.peering.fluent.PeeringServicesClient;
import com.azure.resourcemanager.peering.fluent.models.PeeringServiceInner;
import com.azure.resourcemanager.peering.models.PeeringServiceListResult;
import com.azure.resourcemanager.peering.models.ResourceTags;
import reactor.core.publisher.Mono;
/** An instance of this class provides access to all the operations defined in PeeringServicesClient. */
public final class PeeringServicesClientImpl implements PeeringServicesClient {
private final ClientLogger logger = new ClientLogger(PeeringServicesClientImpl.class);
/** The proxy service used to perform REST calls. */
private final PeeringServicesService service;
/** The service client containing this operation class. */
private final PeeringManagementClientImpl client;
/**
* Initializes an instance of PeeringServicesClientImpl.
*
* @param client the instance of the service client containing this operation class.
*/
PeeringServicesClientImpl(PeeringManagementClientImpl client) {
this.service =
RestProxy.create(PeeringServicesService.class, client.getHttpPipeline(), client.getSerializerAdapter());
this.client = client;
}
/**
* The interface defining all the services for PeeringManagementClientPeeringServices to be used by the proxy
* service to perform REST calls.
*/
@Host("{$host}")
@ServiceInterface(name = "PeeringManagementCli")
private interface PeeringServicesService {
@Headers({"Content-Type: application/json"})
@Get(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Peering"
+ "/peeringServices/{peeringServiceName}")
@ExpectedResponses({200})
@UnexpectedResponseExceptionType(ManagementException.class)
Mono<Response<PeeringServiceInner>> getByResourceGroup(
@HostParam("$host") String endpoint,
@PathParam("resourceGroupName") String resourceGroupName,
@PathParam("peeringServiceName") String peeringServiceName,
@PathParam("subscriptionId") String subscriptionId,
@QueryParam("api-version") String apiVersion,
@HeaderParam("Accept") String accept,
Context context);
@Headers({"Content-Type: application/json"})
@Put(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Peering"
+ "/peeringServices/{peeringServiceName}")
@ExpectedResponses({200, 201})
@UnexpectedResponseExceptionType(ManagementException.class)
Mono<Response<PeeringServiceInner>> createOrUpdate(
@HostParam("$host") String endpoint,
@PathParam("resourceGroupName") String resourceGroupName,
@PathParam("peeringServiceName") String peeringServiceName,
@PathParam("subscriptionId") String subscriptionId,
@QueryParam("api-version") String apiVersion,
@BodyParam("application/json") PeeringServiceInner peeringService,
@HeaderParam("Accept") String accept,
Context context);
@Headers({"Content-Type: application/json"})
@Delete(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Peering"
+ "/peeringServices/{peeringServiceName}")
@ExpectedResponses({200, 204})
@UnexpectedResponseExceptionType(ManagementException.class)
Mono<Response<Void>> delete(
@HostParam("$host") String endpoint,
@PathParam("resourceGroupName") String resourceGroupName,
@PathParam("peeringServiceName") String peeringServiceName,
@PathParam("subscriptionId") String subscriptionId,
@QueryParam("api-version") String apiVersion,
@HeaderParam("Accept") String accept,
Context context);
@Headers({"Content-Type: application/json"})
@Patch(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Peering"
+ "/peeringServices/{peeringServiceName}")
@ExpectedResponses({200})
@UnexpectedResponseExceptionType(ManagementException.class)
Mono<Response<PeeringServiceInner>> update(
@HostParam("$host") String endpoint,
@PathParam("resourceGroupName") String resourceGroupName,
@PathParam("peeringServiceName") String peeringServiceName,
@PathParam("subscriptionId") String subscriptionId,
@QueryParam("api-version") String apiVersion,
@BodyParam("application/json") ResourceTags tags,
@HeaderParam("Accept") String accept,
Context context);
@Headers({"Content-Type: application/json"})
@Get(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Peering"
+ "/peeringServices")
@ExpectedResponses({200})
@UnexpectedResponseExceptionType(ManagementException.class)
Mono<Response<PeeringServiceListResult>> listByResourceGroup(
@HostParam("$host") String endpoint,
@PathParam("resourceGroupName") String resourceGroupName,
@PathParam("subscriptionId") String subscriptionId,
@QueryParam("api-version") String apiVersion,
@HeaderParam("Accept") String accept,
Context context);
@Headers({"Content-Type: application/json"})
@Get("/subscriptions/{subscriptionId}/providers/Microsoft.Peering/peeringServices")
@ExpectedResponses({200})
@UnexpectedResponseExceptionType(ManagementException.class)
Mono<Response<PeeringServiceListResult>> list(
@HostParam("$host") String endpoint,
@PathParam("subscriptionId") String subscriptionId,
@QueryParam("api-version") String apiVersion,
@HeaderParam("Accept") String accept,
Context context);
@Headers({"Content-Type: application/json"})
@Get("{nextLink}")
@ExpectedResponses({200})
@UnexpectedResponseExceptionType(ManagementException.class)
Mono<Response<PeeringServiceListResult>> listByResourceGroupNext(
@PathParam(value = "nextLink", encoded = true) String nextLink,
@HostParam("$host") String endpoint,
@HeaderParam("Accept") String accept,
Context context);
@Headers({"Content-Type: application/json"})
@Get("{nextLink}")
@ExpectedResponses({200})
@UnexpectedResponseExceptionType(ManagementException.class)
Mono<Response<PeeringServiceListResult>> listBySubscriptionNext(
@PathParam(value = "nextLink", encoded = true) String nextLink,
@HostParam("$host") String endpoint,
@HeaderParam("Accept") String accept,
Context context);
}
/**
* Gets an existing peering service with the specified name under the given subscription and resource group.
*
* @param resourceGroupName The name of the resource group.
* @param peeringServiceName The name of the peering.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws ManagementException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return an existing peering service with the specified name under the given subscription and resource group.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
private Mono<Response<PeeringServiceInner>> getByResourceGroupWithResponseAsync(
String resourceGroupName, String peeringServiceName) {
if (this.client.getEndpoint() == null) {
return Mono
.error(
new IllegalArgumentException(
"Parameter this.client.getEndpoint() is required and cannot be null."));
}
if (resourceGroupName == null) {
return Mono
.error(new IllegalArgumentException("Parameter resourceGroupName is required and cannot be null."));
}
if (peeringServiceName == null) {
return Mono
.error(new IllegalArgumentException("Parameter peeringServiceName is required and cannot be null."));
}
if (this.client.getSubscriptionId() == null) {
return Mono
.error(
new IllegalArgumentException(
"Parameter this.client.getSubscriptionId() is required and cannot be null."));
}
final String accept = "application/json";
return FluxUtil
.withContext(
context ->
service
.getByResourceGroup(
this.client.getEndpoint(),
resourceGroupName,
peeringServiceName,
this.client.getSubscriptionId(),
this.client.getApiVersion(),
accept,
context))
.contextWrite(context -> context.putAll(FluxUtil.toReactorContext(this.client.getContext()).readOnly()));
}
/**
* Gets an existing peering service with the specified name under the given subscription and resource group.
*
* @param resourceGroupName The name of the resource group.
* @param peeringServiceName The name of the peering.
* @param context The context to associate with this operation.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws ManagementException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return an existing peering service with the specified name under the given subscription and resource group.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
private Mono<Response<PeeringServiceInner>> getByResourceGroupWithResponseAsync(
String resourceGroupName, String peeringServiceName, Context context) {
if (this.client.getEndpoint() == null) {
return Mono
.error(
new IllegalArgumentException(
"Parameter this.client.getEndpoint() is required and cannot be null."));
}
if (resourceGroupName == null) {
return Mono
.error(new IllegalArgumentException("Parameter resourceGroupName is required and cannot be null."));
}
if (peeringServiceName == null) {
return Mono
.error(new IllegalArgumentException("Parameter peeringServiceName is required and cannot be null."));
}
if (this.client.getSubscriptionId() == null) {
return Mono
.error(
new IllegalArgumentException(
"Parameter this.client.getSubscriptionId() is required and cannot be null."));
}
final String accept = "application/json";
context = this.client.mergeContext(context);
return service
.getByResourceGroup(
this.client.getEndpoint(),
resourceGroupName,
peeringServiceName,
this.client.getSubscriptionId(),
this.client.getApiVersion(),
accept,
context);
}
/**
* Gets an existing peering service with the specified name under the given subscription and resource group.
*
* @param resourceGroupName The name of the resource group.
* @param peeringServiceName The name of the peering.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws ManagementException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return an existing peering service with the specified name under the given subscription and resource group.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
private Mono<PeeringServiceInner> getByResourceGroupAsync(String resourceGroupName, String peeringServiceName) {
return getByResourceGroupWithResponseAsync(resourceGroupName, peeringServiceName)
.flatMap(
(Response<PeeringServiceInner> res) -> {
if (res.getValue() != null) {
return Mono.just(res.getValue());
} else {
return Mono.empty();
}
});
}
/**
* Gets an existing peering service with the specified name under the given subscription and resource group.
*
* @param resourceGroupName The name of the resource group.
* @param peeringServiceName The name of the peering.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws ManagementException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return an existing peering service with the specified name under the given subscription and resource group.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PeeringServiceInner getByResourceGroup(String resourceGroupName, String peeringServiceName) {
return getByResourceGroupAsync(resourceGroupName, peeringServiceName).block();
}
/**
* Gets an existing peering service with the specified name under the given subscription and resource group.
*
* @param resourceGroupName The name of the resource group.
* @param peeringServiceName The name of the peering.
* @param context The context to associate with this operation.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws ManagementException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return an existing peering service with the specified name under the given subscription and resource group.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<PeeringServiceInner> getByResourceGroupWithResponse(
String resourceGroupName, String peeringServiceName, Context context) {
return getByResourceGroupWithResponseAsync(resourceGroupName, peeringServiceName, context).block();
}
/**
* Creates a new peering service or updates an existing peering with the specified name under the given subscription
* and resource group.
*
* @param resourceGroupName The name of the resource group.
* @param peeringServiceName The name of the peering service.
* @param peeringService The properties needed to create or update a peering service.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws ManagementException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return peering Service.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
private Mono<Response<PeeringServiceInner>> createOrUpdateWithResponseAsync(
String resourceGroupName, String peeringServiceName, PeeringServiceInner peeringService) {
if (this.client.getEndpoint() == null) {
return Mono
.error(
new IllegalArgumentException(
"Parameter this.client.getEndpoint() is required and cannot be null."));
}
if (resourceGroupName == null) {
return Mono
.error(new IllegalArgumentException("Parameter resourceGroupName is required and cannot be null."));
}
if (peeringServiceName == null) {
return Mono
.error(new IllegalArgumentException("Parameter peeringServiceName is required and cannot be null."));
}
if (this.client.getSubscriptionId() == null) {
return Mono
.error(
new IllegalArgumentException(
"Parameter this.client.getSubscriptionId() is required and cannot be null."));
}
if (peeringService == null) {
return Mono.error(new IllegalArgumentException("Parameter peeringService is required and cannot be null."));
} else {
peeringService.validate();
}
final String accept = "application/json";
return FluxUtil
.withContext(
context ->
service
.createOrUpdate(
this.client.getEndpoint(),
resourceGroupName,
peeringServiceName,
this.client.getSubscriptionId(),
this.client.getApiVersion(),
peeringService,
accept,
context))
.contextWrite(context -> context.putAll(FluxUtil.toReactorContext(this.client.getContext()).readOnly()));
}
/**
* Creates a new peering service or updates an existing peering with the specified name under the given subscription
* and resource group.
*
* @param resourceGroupName The name of the resource group.
* @param peeringServiceName The name of the peering service.
* @param peeringService The properties needed to create or update a peering service.
* @param context The context to associate with this operation.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws ManagementException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return peering Service.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
private Mono<Response<PeeringServiceInner>> createOrUpdateWithResponseAsync(
String resourceGroupName, String peeringServiceName, PeeringServiceInner peeringService, Context context) {
if (this.client.getEndpoint() == null) {
return Mono
.error(
new IllegalArgumentException(
"Parameter this.client.getEndpoint() is required and cannot be null."));
}
if (resourceGroupName == null) {
return Mono
.error(new IllegalArgumentException("Parameter resourceGroupName is required and cannot be null."));
}
if (peeringServiceName == null) {
return Mono
.error(new IllegalArgumentException("Parameter peeringServiceName is required and cannot be null."));
}
if (this.client.getSubscriptionId() == null) {
return Mono
.error(
new IllegalArgumentException(
"Parameter this.client.getSubscriptionId() is required and cannot be null."));
}
if (peeringService == null) {
return Mono.error(new IllegalArgumentException("Parameter peeringService is required and cannot be null."));
} else {
peeringService.validate();
}
final String accept = "application/json";
context = this.client.mergeContext(context);
return service
.createOrUpdate(
this.client.getEndpoint(),
resourceGroupName,
peeringServiceName,
this.client.getSubscriptionId(),
this.client.getApiVersion(),
peeringService,
accept,
context);
}
/**
* Creates a new peering service or updates an existing peering with the specified name under the given subscription
* and resource group.
*
* @param resourceGroupName The name of the resource group.
* @param peeringServiceName The name of the peering service.
* @param peeringService The properties needed to create or update a peering service.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws ManagementException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return peering Service.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
private Mono<PeeringServiceInner> createOrUpdateAsync(
String resourceGroupName, String peeringServiceName, PeeringServiceInner peeringService) {
return createOrUpdateWithResponseAsync(resourceGroupName, peeringServiceName, peeringService)
.flatMap(
(Response<PeeringServiceInner> res) -> {
if (res.getValue() != null) {
return Mono.just(res.getValue());
} else {
return Mono.empty();
}
});
}
/**
* Creates a new peering service or updates an existing peering with the specified name under the given subscription
* and resource group.
*
* @param resourceGroupName The name of the resource group.
* @param peeringServiceName The name of the peering service.
* @param peeringService The properties needed to create or update a peering service.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws ManagementException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return peering Service.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PeeringServiceInner createOrUpdate(
String resourceGroupName, String peeringServiceName, PeeringServiceInner peeringService) {
return createOrUpdateAsync(resourceGroupName, peeringServiceName, peeringService).block();
}
/**
* Creates a new peering service or updates an existing peering with the specified name under the given subscription
* and resource group.
*
* @param resourceGroupName The name of the resource group.
* @param peeringServiceName The name of the peering service.
* @param peeringService The properties needed to create or update a peering service.
* @param context The context to associate with this operation.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws ManagementException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return peering Service.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<PeeringServiceInner> createOrUpdateWithResponse(
String resourceGroupName, String peeringServiceName, PeeringServiceInner peeringService, Context context) {
return createOrUpdateWithResponseAsync(resourceGroupName, peeringServiceName, peeringService, context).block();
}
/**
* Deletes an existing peering service with the specified name under the given subscription and resource group.
*
* @param resourceGroupName The name of the resource group.
* @param peeringServiceName The name of the peering service.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws ManagementException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the completion.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
private Mono<Response<Void>> deleteWithResponseAsync(String resourceGroupName, String peeringServiceName) {
if (this.client.getEndpoint() == null) {
return Mono
.error(
new IllegalArgumentException(
"Parameter this.client.getEndpoint() is required and cannot be null."));
}
if (resourceGroupName == null) {
return Mono
.error(new IllegalArgumentException("Parameter resourceGroupName is required and cannot be null."));
}
if (peeringServiceName == null) {
return Mono
.error(new IllegalArgumentException("Parameter peeringServiceName is required and cannot be null."));
}
if (this.client.getSubscriptionId() == null) {
return Mono
.error(
new IllegalArgumentException(
"Parameter this.client.getSubscriptionId() is required and cannot be null."));
}
final String accept = "application/json";
return FluxUtil
.withContext(
context ->
service
.delete(
this.client.getEndpoint(),
resourceGroupName,
peeringServiceName,
this.client.getSubscriptionId(),
this.client.getApiVersion(),
accept,
context))
.contextWrite(context -> context.putAll(FluxUtil.toReactorContext(this.client.getContext()).readOnly()));
}
/**
* Deletes an existing peering service with the specified name under the given subscription and resource group.
*
* @param resourceGroupName The name of the resource group.
* @param peeringServiceName The name of the peering service.
* @param context The context to associate with this operation.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws ManagementException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the completion.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
private Mono<Response<Void>> deleteWithResponseAsync(
String resourceGroupName, String peeringServiceName, Context context) {
if (this.client.getEndpoint() == null) {
return Mono
.error(
new IllegalArgumentException(
"Parameter this.client.getEndpoint() is required and cannot be null."));
}
if (resourceGroupName == null) {
return Mono
.error(new IllegalArgumentException("Parameter resourceGroupName is required and cannot be null."));
}
if (peeringServiceName == null) {
return Mono
.error(new IllegalArgumentException("Parameter peeringServiceName is required and cannot be null."));
}
if (this.client.getSubscriptionId() == null) {
return Mono
.error(
new IllegalArgumentException(
"Parameter this.client.getSubscriptionId() is required and cannot be null."));
}
final String accept = "application/json";
context = this.client.mergeContext(context);
return service
.delete(
this.client.getEndpoint(),
resourceGroupName,
peeringServiceName,
this.client.getSubscriptionId(),
this.client.getApiVersion(),
accept,
context);
}
/**
* Deletes an existing peering service with the specified name under the given subscription and resource group.
*
* @param resourceGroupName The name of the resource group.
* @param peeringServiceName The name of the peering service.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws ManagementException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the completion.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
private Mono<Void> deleteAsync(String resourceGroupName, String peeringServiceName) {
return deleteWithResponseAsync(resourceGroupName, peeringServiceName)
.flatMap((Response<Void> res) -> Mono.empty());
}
/**
* Deletes an existing peering service with the specified name under the given subscription and resource group.
*
* @param resourceGroupName The name of the resource group.
* @param peeringServiceName The name of the peering service.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws ManagementException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void delete(String resourceGroupName, String peeringServiceName) {
deleteAsync(resourceGroupName, peeringServiceName).block();
}
/**
* Deletes an existing peering service with the specified name under the given subscription and resource group.
*
* @param resourceGroupName The name of the resource group.
* @param peeringServiceName The name of the peering service.
* @param context The context to associate with this operation.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws ManagementException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> deleteWithResponse(String resourceGroupName, String peeringServiceName, Context context) {
return deleteWithResponseAsync(resourceGroupName, peeringServiceName, context).block();
}
/**
* Updates tags for a peering service with the specified name under the given subscription and resource group.
*
* @param resourceGroupName The name of the resource group.
* @param peeringServiceName The name of the peering service.
* @param tags The resource tags.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws ManagementException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return peering Service.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
private Mono<Response<PeeringServiceInner>> updateWithResponseAsync(
String resourceGroupName, String peeringServiceName, ResourceTags tags) {
if (this.client.getEndpoint() == null) {
return Mono
.error(
new IllegalArgumentException(
"Parameter this.client.getEndpoint() is required and cannot be null."));
}
if (resourceGroupName == null) {
return Mono
.error(new IllegalArgumentException("Parameter resourceGroupName is required and cannot be null."));
}
if (peeringServiceName == null) {
return Mono
.error(new IllegalArgumentException("Parameter peeringServiceName is required and cannot be null."));
}
if (this.client.getSubscriptionId() == null) {
return Mono
.error(
new IllegalArgumentException(
"Parameter this.client.getSubscriptionId() is required and cannot be null."));
}
if (tags == null) {
return Mono.error(new IllegalArgumentException("Parameter tags is required and cannot be null."));
} else {
tags.validate();
}
final String accept = "application/json";
return FluxUtil
.withContext(
context ->
service
.update(
this.client.getEndpoint(),
resourceGroupName,
peeringServiceName,
this.client.getSubscriptionId(),
this.client.getApiVersion(),
tags,
accept,
context))
.contextWrite(context -> context.putAll(FluxUtil.toReactorContext(this.client.getContext()).readOnly()));
}
/**
* Updates tags for a peering service with the specified name under the given subscription and resource group.
*
* @param resourceGroupName The name of the resource group.
* @param peeringServiceName The name of the peering service.
* @param tags The resource tags.
* @param context The context to associate with this operation.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws ManagementException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return peering Service.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
private Mono<Response<PeeringServiceInner>> updateWithResponseAsync(
String resourceGroupName, String peeringServiceName, ResourceTags tags, Context context) {
if (this.client.getEndpoint() == null) {
return Mono
.error(
new IllegalArgumentException(
"Parameter this.client.getEndpoint() is required and cannot be null."));
}
if (resourceGroupName == null) {
return Mono
.error(new IllegalArgumentException("Parameter resourceGroupName is required and cannot be null."));
}
if (peeringServiceName == null) {
return Mono
.error(new IllegalArgumentException("Parameter peeringServiceName is required and cannot be null."));
}
if (this.client.getSubscriptionId() == null) {
return Mono
.error(
new IllegalArgumentException(
"Parameter this.client.getSubscriptionId() is required and cannot be null."));
}
if (tags == null) {
return Mono.error(new IllegalArgumentException("Parameter tags is required and cannot be null."));
} else {
tags.validate();
}
final String accept = "application/json";
context = this.client.mergeContext(context);
return service
.update(
this.client.getEndpoint(),
resourceGroupName,
peeringServiceName,
this.client.getSubscriptionId(),
this.client.getApiVersion(),
tags,
accept,
context);
}
/**
* Updates tags for a peering service with the specified name under the given subscription and resource group.
*
* @param resourceGroupName The name of the resource group.
* @param peeringServiceName The name of the peering service.
* @param tags The resource tags.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws ManagementException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return peering Service.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
private Mono<PeeringServiceInner> updateAsync(
String resourceGroupName, String peeringServiceName, ResourceTags tags) {
return updateWithResponseAsync(resourceGroupName, peeringServiceName, tags)
.flatMap(
(Response<PeeringServiceInner> res) -> {
if (res.getValue() != null) {
return Mono.just(res.getValue());
} else {
return Mono.empty();
}
});
}
/**
* Updates tags for a peering service with the specified name under the given subscription and resource group.
*
* @param resourceGroupName The name of the resource group.
* @param peeringServiceName The name of the peering service.
* @param tags The resource tags.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws ManagementException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return peering Service.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PeeringServiceInner update(String resourceGroupName, String peeringServiceName, ResourceTags tags) {
return updateAsync(resourceGroupName, peeringServiceName, tags).block();
}
/**
* Updates tags for a peering service with the specified name under the given subscription and resource group.
*
* @param resourceGroupName The name of the resource group.
* @param peeringServiceName The name of the peering service.
* @param tags The resource tags.
* @param context The context to associate with this operation.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws ManagementException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return peering Service.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<PeeringServiceInner> updateWithResponse(
String resourceGroupName, String peeringServiceName, ResourceTags tags, Context context) {
return updateWithResponseAsync(resourceGroupName, peeringServiceName, tags, context).block();
}
/**
* Lists all of the peering services under the given subscription and resource group.
*
* @param resourceGroupName The name of the resource group.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws ManagementException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the paginated list of peering services.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
private Mono<PagedResponse<PeeringServiceInner>> listByResourceGroupSinglePageAsync(String resourceGroupName) {
if (this.client.getEndpoint() == null) {
return Mono
.error(
new IllegalArgumentException(
"Parameter this.client.getEndpoint() is required and cannot be null."));
}
if (resourceGroupName == null) {
return Mono
.error(new IllegalArgumentException("Parameter resourceGroupName is required and cannot be null."));
}
if (this.client.getSubscriptionId() == null) {
return Mono
.error(
new IllegalArgumentException(
"Parameter this.client.getSubscriptionId() is required and cannot be null."));
}
final String accept = "application/json";
return FluxUtil
.withContext(
context ->
service
.listByResourceGroup(
this.client.getEndpoint(),
resourceGroupName,
this.client.getSubscriptionId(),
this.client.getApiVersion(),
accept,
context))
.<PagedResponse<PeeringServiceInner>>map(
res ->
new PagedResponseBase<>(
res.getRequest(),
res.getStatusCode(),
res.getHeaders(),
res.getValue().value(),
res.getValue().nextLink(),
null))
.contextWrite(context -> context.putAll(FluxUtil.toReactorContext(this.client.getContext()).readOnly()));
}
/**
* Lists all of the peering services under the given subscription and resource group.
*
* @param resourceGroupName The name of the resource group.
* @param context The context to associate with this operation.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws ManagementException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the paginated list of peering services.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
private Mono<PagedResponse<PeeringServiceInner>> listByResourceGroupSinglePageAsync(
String resourceGroupName, Context context) {
if (this.client.getEndpoint() == null) {
return Mono
.error(
new IllegalArgumentException(
"Parameter this.client.getEndpoint() is required and cannot be null."));
}
if (resourceGroupName == null) {
return Mono
.error(new IllegalArgumentException("Parameter resourceGroupName is required and cannot be null."));
}
if (this.client.getSubscriptionId() == null) {
return Mono
.error(
new IllegalArgumentException(
"Parameter this.client.getSubscriptionId() is required and cannot be null."));
}
final String accept = "application/json";
context = this.client.mergeContext(context);
return service
.listByResourceGroup(
this.client.getEndpoint(),
resourceGroupName,
this.client.getSubscriptionId(),
this.client.getApiVersion(),
accept,
context)
.map(
res ->
new PagedResponseBase<>(
res.getRequest(),
res.getStatusCode(),
res.getHeaders(),
res.getValue().value(),
res.getValue().nextLink(),
null));
}
/**
* Lists all of the peering services under the given subscription and resource group.
*
* @param resourceGroupName The name of the resource group.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws ManagementException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the paginated list of peering services.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
private PagedFlux<PeeringServiceInner> listByResourceGroupAsync(String resourceGroupName) {
return new PagedFlux<>(
() -> listByResourceGroupSinglePageAsync(resourceGroupName),
nextLink -> listByResourceGroupNextSinglePageAsync(nextLink));
}
/**
* Lists all of the peering services under the given subscription and resource group.
*
* @param resourceGroupName The name of the resource group.
* @param context The context to associate with this operation.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws ManagementException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the paginated list of peering services.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
private PagedFlux<PeeringServiceInner> listByResourceGroupAsync(String resourceGroupName, Context context) {
return new PagedFlux<>(
() -> listByResourceGroupSinglePageAsync(resourceGroupName, context),
nextLink -> listByResourceGroupNextSinglePageAsync(nextLink, context));
}
/**
* Lists all of the peering services under the given subscription and resource group.
*
* @param resourceGroupName The name of the resource group.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws ManagementException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the paginated list of peering services.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public PagedIterable<PeeringServiceInner> listByResourceGroup(String resourceGroupName) {
return new PagedIterable<>(listByResourceGroupAsync(resourceGroupName));
}
/**
* Lists all of the peering services under the given subscription and resource group.
*
* @param resourceGroupName The name of the resource group.
* @param context The context to associate with this operation.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws ManagementException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the paginated list of peering services.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public PagedIterable<PeeringServiceInner> listByResourceGroup(String resourceGroupName, Context context) {
return new PagedIterable<>(listByResourceGroupAsync(resourceGroupName, context));
}
/**
* Lists all of the peerings under the given subscription.
*
* @throws ManagementException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the paginated list of peering services.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
private Mono<PagedResponse<PeeringServiceInner>> listSinglePageAsync() {
if (this.client.getEndpoint() == null) {
return Mono
.error(
new IllegalArgumentException(
"Parameter this.client.getEndpoint() is required and cannot be null."));
}
if (this.client.getSubscriptionId() == null) {
return Mono
.error(
new IllegalArgumentException(
"Parameter this.client.getSubscriptionId() is required and cannot be null."));
}
final String accept = "application/json";
return FluxUtil
.withContext(
context ->
service
.list(
this.client.getEndpoint(),
this.client.getSubscriptionId(),
this.client.getApiVersion(),
accept,
context))
.<PagedResponse<PeeringServiceInner>>map(
res ->
new PagedResponseBase<>(
res.getRequest(),
res.getStatusCode(),
res.getHeaders(),
res.getValue().value(),
res.getValue().nextLink(),
null))
.contextWrite(context -> context.putAll(FluxUtil.toReactorContext(this.client.getContext()).readOnly()));
}
/**
* Lists all of the peerings under the given subscription.
*
* @param context The context to associate with this operation.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws ManagementException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the paginated list of peering services.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
private Mono<PagedResponse<PeeringServiceInner>> listSinglePageAsync(Context context) {
if (this.client.getEndpoint() == null) {
return Mono
.error(
new IllegalArgumentException(
"Parameter this.client.getEndpoint() is required and cannot be null."));
}
if (this.client.getSubscriptionId() == null) {
return Mono
.error(
new IllegalArgumentException(
"Parameter this.client.getSubscriptionId() is required and cannot be null."));
}
final String accept = "application/json";
context = this.client.mergeContext(context);
return service
.list(
this.client.getEndpoint(),
this.client.getSubscriptionId(),
this.client.getApiVersion(),
accept,
context)
.map(
res ->
new PagedResponseBase<>(
res.getRequest(),
res.getStatusCode(),
res.getHeaders(),
res.getValue().value(),
res.getValue().nextLink(),
null));
}
/**
* Lists all of the peerings under the given subscription.
*
* @throws ManagementException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the paginated list of peering services.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
private PagedFlux<PeeringServiceInner> listAsync() {
return new PagedFlux<>(
() -> listSinglePageAsync(), nextLink -> listBySubscriptionNextSinglePageAsync(nextLink));
}
/**
* Lists all of the peerings under the given subscription.
*
* @param context The context to associate with this operation.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws ManagementException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the paginated list of peering services.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
private PagedFlux<PeeringServiceInner> listAsync(Context context) {
return new PagedFlux<>(
() -> listSinglePageAsync(context), nextLink -> listBySubscriptionNextSinglePageAsync(nextLink, context));
}
/**
* Lists all of the peerings under the given subscription.
*
* @throws ManagementException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the paginated list of peering services.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public PagedIterable<PeeringServiceInner> list() {
return new PagedIterable<>(listAsync());
}
/**
* Lists all of the peerings under the given subscription.
*
* @param context The context to associate with this operation.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws ManagementException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the paginated list of peering services.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public PagedIterable<PeeringServiceInner> list(Context context) {
return new PagedIterable<>(listAsync(context));
}
/**
* Get the next page of items.
*
* @param nextLink The nextLink parameter.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws ManagementException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the paginated list of peering services.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
private Mono<PagedResponse<PeeringServiceInner>> listByResourceGroupNextSinglePageAsync(String nextLink) {
if (nextLink == null) {
return Mono.error(new IllegalArgumentException("Parameter nextLink is required and cannot be null."));
}
if (this.client.getEndpoint() == null) {
return Mono
.error(
new IllegalArgumentException(
"Parameter this.client.getEndpoint() is required and cannot be null."));
}
final String accept = "application/json";
return FluxUtil
.withContext(
context -> service.listByResourceGroupNext(nextLink, this.client.getEndpoint(), accept, context))
.<PagedResponse<PeeringServiceInner>>map(
res ->
new PagedResponseBase<>(
res.getRequest(),
res.getStatusCode(),
res.getHeaders(),
res.getValue().value(),
res.getValue().nextLink(),
null))
.contextWrite(context -> context.putAll(FluxUtil.toReactorContext(this.client.getContext()).readOnly()));
}
/**
* Get the next page of items.
*
* @param nextLink The nextLink parameter.
* @param context The context to associate with this operation.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws ManagementException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the paginated list of peering services.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
private Mono<PagedResponse<PeeringServiceInner>> listByResourceGroupNextSinglePageAsync(
String nextLink, Context context) {
if (nextLink == null) {
return Mono.error(new IllegalArgumentException("Parameter nextLink is required and cannot be null."));
}
if (this.client.getEndpoint() == null) {
return Mono
.error(
new IllegalArgumentException(
"Parameter this.client.getEndpoint() is required and cannot be null."));
}
final String accept = "application/json";
context = this.client.mergeContext(context);
return service
.listByResourceGroupNext(nextLink, this.client.getEndpoint(), accept, context)
.map(
res ->
new PagedResponseBase<>(
res.getRequest(),
res.getStatusCode(),
res.getHeaders(),
res.getValue().value(),
res.getValue().nextLink(),
null));
}
/**
* Get the next page of items.
*
* @param nextLink The nextLink parameter.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws ManagementException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the paginated list of peering services.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
private Mono<PagedResponse<PeeringServiceInner>> listBySubscriptionNextSinglePageAsync(String nextLink) {
if (nextLink == null) {
return Mono.error(new IllegalArgumentException("Parameter nextLink is required and cannot be null."));
}
if (this.client.getEndpoint() == null) {
return Mono
.error(
new IllegalArgumentException(
"Parameter this.client.getEndpoint() is required and cannot be null."));
}
final String accept = "application/json";
return FluxUtil
.withContext(
context -> service.listBySubscriptionNext(nextLink, this.client.getEndpoint(), accept, context))
.<PagedResponse<PeeringServiceInner>>map(
res ->
new PagedResponseBase<>(
res.getRequest(),
res.getStatusCode(),
res.getHeaders(),
res.getValue().value(),
res.getValue().nextLink(),
null))
.contextWrite(context -> context.putAll(FluxUtil.toReactorContext(this.client.getContext()).readOnly()));
}
/**
* Get the next page of items.
*
* @param nextLink The nextLink parameter.
* @param context The context to associate with this operation.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws ManagementException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the paginated list of peering services.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
private Mono<PagedResponse<PeeringServiceInner>> listBySubscriptionNextSinglePageAsync(
String nextLink, Context context) {
if (nextLink == null) {
return Mono.error(new IllegalArgumentException("Parameter nextLink is required and cannot be null."));
}
if (this.client.getEndpoint() == null) {
return Mono
.error(
new IllegalArgumentException(
"Parameter this.client.getEndpoint() is required and cannot be null."));
}
final String accept = "application/json";
context = this.client.mergeContext(context);
return service
.listBySubscriptionNext(nextLink, this.client.getEndpoint(), accept, context)
.map(
res ->
new PagedResponseBase<>(
res.getRequest(),
res.getStatusCode(),
res.getHeaders(),
res.getValue().value(),
res.getValue().nextLink(),
null));
}
}
| 23,833 |
1,338 | /*
Copyright 1999-2001, Be Incorporated. All Rights Reserved.
This file may be used under the terms of the Be Sample Code License.
*/
#ifndef FAT_FILE_H
#define FAT_FILE_H
mode_t make_mode(nspace *volume, vnode *node);
status_t write_vnode_entry(nspace *vol, vnode *node);
status_t dosfs_get_vnode_name(fs_volume *_vol, fs_vnode *_node,
char *buffer, size_t bufferSize);
status_t dosfs_release_vnode(fs_volume *_vol, fs_vnode *_node,
bool reenter);
status_t dosfs_rstat(fs_volume *_vol, fs_vnode *_node, struct stat *st);
status_t dosfs_open(fs_volume *_vol, fs_vnode *_node, int omode,
void **cookie);
status_t dosfs_read(fs_volume *_vol, fs_vnode *_node, void *cookie,
off_t pos, void *buf, size_t *len);
status_t dosfs_free_cookie(fs_volume *vol, fs_vnode *node, void *cookie);
status_t dosfs_close(fs_volume *vol, fs_vnode *node, void *cookie);
status_t dosfs_remove_vnode(fs_volume *vol, fs_vnode *node, bool reenter);
status_t dosfs_create(fs_volume *vol, fs_vnode *dir, const char *name,
int omode, int perms, void **cookie, ino_t *vnid);
status_t dosfs_mkdir(fs_volume *vol, fs_vnode *dir, const char *name,
int perms);
status_t dosfs_rename(fs_volume *vol, fs_vnode *olddir, const char *oldname,
fs_vnode *newdir, const char *newname);
status_t dosfs_unlink(fs_volume *vol, fs_vnode *dir, const char *name);
status_t dosfs_rmdir(fs_volume *vol, fs_vnode *dir, const char *name);
status_t dosfs_wstat(fs_volume *vol, fs_vnode *node, const struct stat *st,
uint32 mask);
status_t dosfs_write(fs_volume *vol, fs_vnode *node, void *cookie,
off_t pos, const void *buf, size_t *len);
status_t dosfs_get_file_map(fs_volume *_vol, fs_vnode *_node, off_t pos,
size_t reqLen, struct file_io_vec *vecs, size_t *_count);
bool dosfs_can_page(fs_volume *_vol, fs_vnode *_node, void *_cookie);
status_t dosfs_read_pages(fs_volume *_vol, fs_vnode *_node, void *_cookie,
off_t pos, const iovec *vecs, size_t count, size_t *_numBytes);
status_t dosfs_write_pages(fs_volume *_vol, fs_vnode *_node, void *_cookie,
off_t pos, const iovec *vecs, size_t count, size_t *_numBytes);
#endif /* FAT_FILE_H */
| 906 |
4,879 | <filename>base/cancellable.cpp
#include "base/cancellable.hpp"
#include "base/assert.hpp"
namespace base
{
void Cancellable::Reset()
{
std::lock_guard<std::mutex> lock(m_mutex);
m_status = Status::Active;
m_deadline = {};
}
void Cancellable::Cancel()
{
std::lock_guard<std::mutex> lock(m_mutex);
m_status = Status::CancelCalled;
}
void Cancellable::SetDeadline(std::chrono::steady_clock::time_point const & deadline)
{
std::lock_guard<std::mutex> lock(m_mutex);
m_deadline = deadline;
CheckDeadline();
}
bool Cancellable::IsCancelled() const
{
return CancellationStatus() != Status::Active;
}
Cancellable::Status Cancellable::CancellationStatus() const
{
std::lock_guard<std::mutex> lock(m_mutex);
CheckDeadline();
return m_status;
}
void Cancellable::CheckDeadline() const
{
if (m_status == Status::Active &&
m_deadline && *m_deadline < std::chrono::steady_clock::now())
{
m_status = Status::DeadlineExceeded;
}
}
std::string DebugPrint(Cancellable::Status status)
{
switch (status)
{
case Cancellable::Status::Active: return "Active";
case Cancellable::Status::CancelCalled: return "CancelCalled";
case Cancellable::Status::DeadlineExceeded: return "DeadlineExceeded";
}
UNREACHABLE();
}
} // namespace base
| 480 |
415 | /*
* bug-overflow - program causing a buffer overflow when fuzzed
*
* Copyright © 2002—2015 <NAME> <<EMAIL>>
*
* This program is free software. It comes without any warranty, to
* the extent permitted by applicable law. You can redistribute it
* and/or modify it under the terms of the Do What the Fuck You Want
* to Public License, Version 2, as published by the WTFPL Task Force.
* See http://www.wtfpl.net/ for more details.
*/
#include "config.h"
#include <stdio.h>
#include <stdlib.h>
volatile char buf[1];
int main(void)
{
int ch;
while ((ch = getc(stdin)) != EOF)
buf[ch * 1024 * 1024] = ch;
return EXIT_SUCCESS;
}
| 231 |
691 | import sys
from circuits.models import Provider
from startup_script_utils import load_yaml, pop_custom_fields, set_custom_fields_values
providers = load_yaml("/opt/netbox/initializers/providers.yml")
if providers is None:
sys.exit()
for params in providers:
custom_field_data = pop_custom_fields(params)
provider, created = Provider.objects.get_or_create(**params)
if created:
set_custom_fields_values(provider, custom_field_data)
print("📡 Created provider", provider.name)
| 175 |
892 | <reponame>westonsteimel/advisory-database-github<filename>advisories/unreviewed/2022/05/GHSA-vwm7-fg3f-ffjr/GHSA-vwm7-fg3f-ffjr.json
{
"schema_version": "1.2.0",
"id": "GHSA-vwm7-fg3f-ffjr",
"modified": "2022-05-01T18:09:00Z",
"published": "2022-05-01T18:09:00Z",
"aliases": [
"CVE-2007-2956"
],
"details": "Stack-based buffer overflow in the readRadianceHeader function in (1) src/fileformat/rgbeio.cpp in pfstools 1.6.2 and (2) src/Fileformat/rgbeio.cpp in Qtpfsgui 1.8.11 allows remote attackers to execute arbitrary code via a crafted Radiance RGBE (.hdr) file.",
"severity": [
],
"affected": [
],
"references": [
{
"type": "ADVISORY",
"url": "https://nvd.nist.gov/vuln/detail/CVE-2007-2956"
},
{
"type": "WEB",
"url": "https://exchange.xforce.ibmcloud.com/vulnerabilities/35948"
},
{
"type": "WEB",
"url": "https://exchange.xforce.ibmcloud.com/vulnerabilities/35949"
},
{
"type": "WEB",
"url": "http://pfstools.cvs.sourceforge.net/pfstools/pfstools/src/fileformat/rgbeio.cpp?r1=1.8&r2=1.9"
},
{
"type": "WEB",
"url": "http://pfstools.cvs.sourceforge.net/pfstools/pfstools/src/fileformat/rgbeio.cpp?revision=1.9&view=markup"
},
{
"type": "WEB",
"url": "http://secunia.com/advisories/26387"
},
{
"type": "WEB",
"url": "http://secunia.com/advisories/26388"
},
{
"type": "WEB",
"url": "http://secunia.com/advisories/26674"
},
{
"type": "WEB",
"url": "http://secunia.com/secunia_research/2007-67/advisory/"
},
{
"type": "WEB",
"url": "http://secunia.com/secunia_research/2007-68/advisory/"
},
{
"type": "WEB",
"url": "http://umn.dl.sourceforge.net/sourceforge/qtpfsgui/qtpfsgui-1.8.12.tar.gz"
},
{
"type": "WEB",
"url": "http://www.novell.com/linux/security/advisories/2007_18_sr.html"
},
{
"type": "WEB",
"url": "http://www.vupen.com/english/advisories/2007/2855"
},
{
"type": "WEB",
"url": "http://www.vupen.com/english/advisories/2007/2856"
}
],
"database_specific": {
"cwe_ids": [
],
"severity": "MODERATE",
"github_reviewed": false
}
} | 1,138 |
1,131 | // Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.network.contrail.model;
import java.io.IOException;
import java.io.Serializable;
import java.lang.ref.WeakReference;
import java.util.TreeSet;
import org.apache.log4j.Logger;
import com.cloud.exception.InternalErrorException;
/**
* ModelObject
*
* A model object represents the desired state of the system.
*
* The object constructor should set the uuid and the internal id of the cloudstack objects.
*
* The build method reads the primary database (typically cloudstack mysql) and derives the state that
* we wish to reflect in the contrail API. This method should not modify the Contrail API state.
*
* The verify method reads the API server state and compares with cached properties.
*
* The update method pushes updates to the contrail API server.
*/
public interface ModelObject {
public static class ModelReference implements Comparable<ModelReference>, Serializable {
private static final long serialVersionUID = -2019113974956703526L;
private static final Logger s_logger = Logger.getLogger(ModelReference.class);
/*
* WeakReference class is not serializable by definition. So, we cannot enforce its serialization unless we write the implementation of
* methods writeObject() and readObject(). Since the code was already not serializing it, it's been marked as transient.
*/
transient WeakReference<ModelObject> reference;
ModelReference(ModelObject obj) {
reference = new WeakReference<ModelObject>(obj);
}
@Override
public int compareTo(ModelReference other) {
ModelObject lhs = reference.get();
ModelObject rhs = other.reference.get();
if (lhs == null) {
if (rhs == null) {
return 0;
}
return -1;
}
return lhs.compareTo(rhs);
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((reference == null) ? 0 : reference.hashCode());
return result;
}
@Override
public boolean equals(Object other) {
if (this == other)
return true;
if (other == null)
return false;
try {
ModelReference rhs = (ModelReference)other;
return compareTo(rhs) == 0;
} catch (ClassCastException ex) {
// not this class , so
return false;
}
}
public ModelObject get() {
return reference.get();
}
};
public void addSuccessor(ModelObject child);
public TreeSet<ModelReference> ancestors();
public void clearSuccessors();
public int compareTo(ModelObject o);
/**
* Delete the object from the API server.
* @param controller
* @throws IOException
*/
public void delete(ModelController controller) throws IOException;
/**
* Deletes the object from the data model graph.
*
* @param controller
* @throws IOException
*/
public void destroy(ModelController controller) throws IOException;
public void removeSuccessor(ModelObject child);
public TreeSet<ModelObject> successors();
/**
* Push updates to Contrail API server. This API is only valid for objects in the database.
* @param controller
* @throws IOException
* @throws InternalErrorException
*/
public void update(ModelController controller) throws InternalErrorException, IOException;
/**
* Check that the state of the current object matches the state of the API server.
* @param controller
* @return
*/
public boolean verify(ModelController controller);
/*
* Compare the state of existing model object with latest model object
*/
public boolean compare(ModelController controller, ModelObject current);
}
| 1,670 |
679 | /**************************************************************
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*************************************************************/
package org.apache.openoffice.ooxml.schema.model.optimize;
import org.apache.openoffice.ooxml.schema.model.attribute.Attribute;
import org.apache.openoffice.ooxml.schema.model.attribute.AttributeGroupReference;
import org.apache.openoffice.ooxml.schema.model.attribute.AttributeReference;
import org.apache.openoffice.ooxml.schema.model.base.INode;
import org.apache.openoffice.ooxml.schema.model.base.NodeVisitorAdapter;
import org.apache.openoffice.ooxml.schema.model.complex.Element;
import org.apache.openoffice.ooxml.schema.model.complex.Extension;
import org.apache.openoffice.ooxml.schema.model.complex.GroupReference;
import org.apache.openoffice.ooxml.schema.model.schema.SchemaBase;
import org.apache.openoffice.ooxml.schema.model.simple.List;
import org.apache.openoffice.ooxml.schema.model.simple.Restriction;
import org.apache.openoffice.ooxml.schema.model.simple.SimpleTypeReference;
/** A visitor that is called for all nodes of a complex or simple type to mark
* the referenced types as being used.
*/
public class RequestVisitor
extends NodeVisitorAdapter
{
RequestVisitor (
final SchemaBase aSourceSchema,
final SchemaOptimizer aOptimizer)
{
maSourceSchemaBase = aSourceSchema;
maSchemaOptimizer = aOptimizer;
}
@Override public void Visit (final Attribute aAttribute)
{
maSchemaOptimizer.RequestType(aAttribute.GetTypeName());
}
@Override public void Visit (final AttributeReference aAttributeReference)
{
maSchemaOptimizer.RequestType(aAttributeReference.GetReferencedName());
}
@Override public void Visit (final AttributeGroupReference aAttributeGroupReference)
{
maSchemaOptimizer.RequestType(aAttributeGroupReference.GetReferencedName());
}
@Override public void Visit (final Element aElement)
{
maSchemaOptimizer.RequestType(aElement.GetTypeName());
}
@Override public void Visit (final Extension aExtension)
{
maSchemaOptimizer.RequestType(aExtension.GetBaseTypeName());
}
@Override public void Visit (final GroupReference aReference)
{
maSchemaOptimizer.RequestType(aReference.GetReferencedGroup(maSourceSchemaBase));
}
@Override public void Visit (final List aList)
{
maSchemaOptimizer.RequestType(aList.GetItemType());
}
@Override public void Visit (final Restriction aRestriction)
{
maSchemaOptimizer.RequestType(aRestriction.GetBaseType());
}
@Override public void Visit (final SimpleTypeReference aReference)
{
maSchemaOptimizer.RequestType(aReference.GetReferencedSimpleType(maSourceSchemaBase));
}
@Override public void Default (final INode aNode)
{
switch (aNode.GetNodeType())
{
case All:
case Any:
case AttributeGroup:
case BuiltIn:
case Choice:
case ComplexContent:
case ComplexType:
case ElementReference:
case Group:
case List:
case OccurrenceIndicator:
case Sequence:
case SimpleContent:
case SimpleType:
case Union:
break;
default:
throw new RuntimeException(
String.format("don't know how to request %s which was defined at %s",
aNode.toString(),
aNode.GetLocation()));
}
}
private final SchemaBase maSourceSchemaBase;
private final SchemaOptimizer maSchemaOptimizer;
}
| 1,667 |
554 | package github.tornaco.xposedmoduletest.ui.adapter;
import android.content.Context;
import android.support.annotation.ColorInt;
import android.support.annotation.LayoutRes;
import android.support.v4.content.ContextCompat;
import android.support.v7.widget.RecyclerView;
import android.util.TypedValue;
import android.view.LayoutInflater;
import android.view.View;
import android.view.ViewGroup;
import android.widget.Checkable;
import android.widget.ImageView;
import android.widget.Switch;
import android.widget.TextView;
import com.google.common.collect.Lists;
import java.util.Collection;
import java.util.List;
import github.tornaco.xposedmoduletest.R;
import github.tornaco.xposedmoduletest.compat.os.XAppOpsManager;
import github.tornaco.xposedmoduletest.loader.GlideApp;
import github.tornaco.xposedmoduletest.model.CommonPackageInfo;
import github.tornaco.xposedmoduletest.xposed.app.XAPMManager;
import lombok.Getter;
import lombok.Setter;
import static com.bumptech.glide.load.resource.drawable.DrawableTransitionOptions.withCrossFade;
/**
* Created by guohao4 on 2017/12/12.
* Email: <EMAIL>
*/
public class PermissionAppsAdapter extends RecyclerView.Adapter<PermissionAppsAdapter.AppsHolder> {
@Getter
private final List<CommonPackageInfo> data = Lists.newArrayList();
@Getter
private int selection = -1;
private Context context;
@ColorInt
@Setter
private int highlightColor, normalColor;
@Setter
@Getter
private int op;
public PermissionAppsAdapter(Context context) {
this.context = context;
this.highlightColor = ContextCompat.getColor(context, R.color.blue_grey);
TypedValue typedValue = new TypedValue();
context.getTheme().resolveAttribute(R.attr.torCardBackgroundColor, typedValue, true);
int resId = typedValue.resourceId;
this.normalColor = ContextCompat.getColor(context, resId);
}
public void update(Collection<CommonPackageInfo> src) {
synchronized (data) {
data.clear();
data.addAll(src);
}
notifyDataSetChanged();
}
public void setSelection(int selection) {
this.selection = selection;
notifyDataSetChanged();
}
@Override
public AppsHolder onCreateViewHolder(ViewGroup parent, int viewType) {
View view = LayoutInflater.from(context).inflate(getTemplateLayoutRes(), parent, false);
return new AppsHolder(view);
}
@LayoutRes
int getTemplateLayoutRes() {
return R.layout.perm_list_item_apps;
}
@Override
public void onBindViewHolder(AppsHolder holder, int position) {
final CommonPackageInfo commonPackageInfo = getData().get(position);
holder.getTitleView().setText(commonPackageInfo.getAppName());
if (commonPackageInfo.isSystemApp()) {
holder.getSummaryView().setVisibility(View.VISIBLE);
} else {
holder.getSummaryView().setVisibility(View.GONE);
}
GlideApp.with(context)
.load(commonPackageInfo)
.placeholder(0)
.error(R.mipmap.ic_launcher_round)
.fallback(R.mipmap.ic_launcher_round)
.transition(withCrossFade())
.into(holder.getIconView());
boolean allowed = commonPackageInfo.getVersion() == XAppOpsManager.MODE_ALLOWED;
holder.getCompSwitch().setChecked(allowed);
holder.getCompSwitch().setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
Checkable c = (Checkable) v;
int mode = c.isChecked() ? XAppOpsManager.MODE_ALLOWED : XAppOpsManager.MODE_IGNORED;
XAPMManager.get().setPermissionControlBlockModeForPkg(getOp(),
commonPackageInfo.getPkgName(), mode);
commonPackageInfo.setVersion(mode);
}
});
if (getSelection() >= 0 && position == selection) {
holder.itemView.setBackgroundColor(highlightColor);
} else {
holder.itemView.setBackgroundColor(normalColor);
}
}
@Override
public int getItemCount() {
return data.size();
}
@Getter
static final class AppsHolder extends RecyclerView.ViewHolder {
private ImageView iconView;
private TextView titleView;
private TextView summaryView;
private Switch compSwitch;
AppsHolder(View itemView) {
super(itemView);
this.iconView = itemView.findViewById(R.id.icon);
this.titleView = itemView.findViewById(R.id.title);
this.summaryView = itemView.findViewById(R.id.status);
this.compSwitch = itemView.findViewById(R.id.comp_switch);
itemView.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
compSwitch.performClick();
}
});
}
}
}
| 2,254 |
985 | extern void b();
extern void bb();
extern void func(void*);
void c()
{
func(&b);
func(&bb);
} | 43 |
777 | // Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "modules/cachestorage/CacheStorageError.h"
#include "core/dom/DOMException.h"
#include "core/dom/ExceptionCode.h"
#include "modules/cachestorage/Cache.h"
#include "public/platform/modules/serviceworker/WebServiceWorkerCacheError.h"
namespace blink {
DOMException* CacheStorageError::createException(
WebServiceWorkerCacheError webError) {
switch (webError) {
case WebServiceWorkerCacheErrorNotImplemented:
return DOMException::create(NotSupportedError,
"Method is not implemented.");
case WebServiceWorkerCacheErrorNotFound:
return DOMException::create(NotFoundError, "Entry was not found.");
case WebServiceWorkerCacheErrorExists:
return DOMException::create(InvalidAccessError, "Entry already exists.");
case WebServiceWorkerCacheErrorQuotaExceeded:
return DOMException::create(QuotaExceededError, "Quota exceeded.");
case WebServiceWorkerCacheErrorCacheNameNotFound:
return DOMException::create(NotFoundError, "Cache was not found.");
case WebServiceWorkerCacheErrorTooLarge:
return DOMException::create(AbortError, "Operation too large.");
}
NOTREACHED();
return nullptr;
}
} // namespace blink
| 453 |
1,921 | <reponame>ksvr444/daily-coding-problem<filename>solutions/problem_118.py
def merge_sorted_lists(arr1, arr2):
i, k = 0, 0
merged = list()
while i < len(arr1) and k < len(arr2):
if arr1[i] <= arr2[k]:
merged.append(arr1[i])
i += 1
else:
merged.append(arr2[k])
k += 1
merged += arr1[i:]
merged += arr2[k:]
return merged
def sort_squares(arr):
first_pos_index = 0
for num in arr:
if num >= 0:
break
first_pos_index += 1
neg_nums = [x ** 2 for x in reversed(arr[:first_pos_index])]
pos_nums = [x ** 2 for x in arr[first_pos_index:]]
return merge_sorted_lists(pos_nums, neg_nums)
assert sort_squares([]) == []
assert sort_squares([0]) == [0]
assert sort_squares([-1, 1]) == [1, 1]
assert sort_squares([0, 2, 3]) == [0, 4, 9]
assert sort_squares([-9, -2, 0]) == [0, 4, 81]
assert sort_squares([-9, -2, 0, 2, 3]) == [0, 4, 4, 9, 81]
| 474 |
777 | // Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef CONTENT_BROWSER_CHILD_PROCESS_LAUNCHER_HELPER_H_
#define CONTENT_BROWSER_CHILD_PROCESS_LAUNCHER_HELPER_H_
#include <memory>
#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "base/process/kill.h"
#include "base/process/process.h"
#include "build/build_config.h"
#include "content/public/browser/browser_thread.h"
#include "content/public/common/result_codes.h"
#include "mojo/edk/embedder/embedder.h"
#include "mojo/edk/embedder/scoped_platform_handle.h"
#if defined(OS_WIN)
#include "sandbox/win/src/sandbox_types.h"
#else
#include "content/public/browser/file_descriptor_info.h"
#endif
#if defined(OS_LINUX)
#include "content/public/common/zygote_handle.h"
#endif
namespace base {
class CommandLine;
}
namespace content {
class ChildProcessLauncher;
class FileDescriptorInfo;
class SandboxedProcessLauncherDelegate;
namespace internal {
#if defined(OS_WIN)
using FileMappedForLaunch = base::HandlesToInheritVector;
#else
using FileMappedForLaunch = FileDescriptorInfo;
#endif
// ChildProcessLauncherHelper is used by ChildProcessLauncher to start a
// process. Since ChildProcessLauncher can be deleted by its client at any time,
// this class is used to keep state as the process is started asynchronously.
// It also contains the platform specific pieces.
class ChildProcessLauncherHelper :
public base::RefCountedThreadSafe<ChildProcessLauncherHelper> {
public:
// Abstraction around a process required to deal in a platform independent way
// between Linux (which can use zygotes) and the other platforms.
struct Process {
Process() {}
Process(Process&& other);
~Process() {}
Process& operator=(Process&& other);
base::Process process;
#if defined(OS_LINUX)
ZygoteHandle zygote = nullptr;
#endif
};
ChildProcessLauncherHelper(
int child_process_id,
BrowserThread::ID client_thread_id,
std::unique_ptr<base::CommandLine> command_line,
std::unique_ptr<SandboxedProcessLauncherDelegate> delegate,
const base::WeakPtr<ChildProcessLauncher>& child_process_launcher,
bool terminate_on_shutdown);
// The methods below are defined in the order they are called.
// Starts the flow of launching the process.
void StartLaunchOnClientThread();
// Platform specific.
void BeforeLaunchOnClientThread();
// Called in to give implementors a chance at creating a server pipe.
// Platform specific.
mojo::edk::ScopedPlatformHandle PrepareMojoPipeHandlesOnClientThread();
// Returns the list of files that should be mapped in the child process.
// Platform specific.
std::unique_ptr<FileMappedForLaunch> GetFilesToMap();
// Platform specific.
void BeforeLaunchOnLauncherThread(
const FileMappedForLaunch& files_to_register,
base::LaunchOptions* options);
// Does the actual starting of the process.
// |is_synchronous_launch| is set to false if the starting of the process is
// asynchonous (this is the case on Android), in which case the returned
// Process is not valid (and PostLaunchOnLauncherThread() will provide the
// process once it is available).
// Platform specific.
ChildProcessLauncherHelper::Process LaunchProcessOnLauncherThread(
const base::LaunchOptions& options,
std::unique_ptr<FileMappedForLaunch> files_to_register,
bool* is_synchronous_launch,
int* launch_result);
// Called right after the process has been launched, whether it was created
// yet or not.
// Platform specific.
void AfterLaunchOnLauncherThread(
const ChildProcessLauncherHelper::Process& process,
const base::LaunchOptions& options);
// Called once the process has been created, successfully or not.
// If |post_launch_on_client_thread_called| is false,
// this calls PostLaunchOnClientThread on the client thread.
void PostLaunchOnLauncherThread(ChildProcessLauncherHelper::Process process,
int launch_result,
bool post_launch_on_client_thread_called);
// Note that this could be called before PostLaunchOnLauncherThread() is
// called.
void PostLaunchOnClientThread(ChildProcessLauncherHelper::Process process,
int error_code);
int client_thread_id() const { return client_thread_id_; }
// Returns the termination status and sets |exit_code| if non null.
// See ChildProcessLauncher::GetChildTerminationStatus for more info.
static base::TerminationStatus GetTerminationStatus(
const ChildProcessLauncherHelper::Process& process,
bool known_dead,
int* exit_code);
// Terminates |process|.
// Returns true if the process was stopped, false if the process had not been
// started yet or could not be stopped.
// Note that |exit_code| and |wait| are not used on Android.
static bool TerminateProcess(const base::Process& process,
int exit_code,
bool wait);
// Terminates the process with the normal exit code and ensures it has been
// stopped. By returning a normal exit code this ensures UMA won't treat this
// as a crash.
// Returns immediately and perform the work on the launcher thread.
static void ForceNormalProcessTerminationAsync(
ChildProcessLauncherHelper::Process process);
static void SetProcessBackgroundedOnLauncherThread(
base::Process process, bool background);
private:
friend class base::RefCountedThreadSafe<ChildProcessLauncherHelper>;
~ChildProcessLauncherHelper();
void LaunchOnLauncherThread();
const mojo::edk::PlatformHandle& mojo_client_handle() const {
return mojo_client_handle_.get();
}
base::CommandLine* command_line() { return command_line_.get(); }
int child_process_id() const { return child_process_id_; }
std::string GetProcessType();
static void ForceNormalProcessTerminationSync(
ChildProcessLauncherHelper::Process process);
const int child_process_id_;
const BrowserThread::ID client_thread_id_;
base::TimeTicks begin_launch_time_;
std::unique_ptr<base::CommandLine> command_line_;
std::unique_ptr<SandboxedProcessLauncherDelegate> delegate_;
base::WeakPtr<ChildProcessLauncher> child_process_launcher_;
mojo::edk::ScopedPlatformHandle mojo_client_handle_;
mojo::edk::ScopedPlatformHandle mojo_server_handle_;
bool terminate_on_shutdown_;
};
} // namespace internal
} // namespace content
#endif // CONTENT_BROWSER_CHILD_PROCESS_LAUNCHER_HELPER_H_
| 2,129 |
733 | from rx.core import Observable
from rx.internal import extensionmethod
@extensionmethod(Observable)
def do_while(self, condition):
"""Repeats source as long as condition holds emulating a do while loop.
Keyword arguments:
condition -- {Function} The condition which determines if the source
will be repeated.
Returns an observable {Observable} sequence which is repeated as long
as the condition holds.
"""
return Observable.concat([self, Observable.while_do(condition, self)])
| 152 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.