text
stringlengths 2
100k
| meta
dict |
---|---|
settings:
fantasy:
description: "living in the kingdom of Larion. "
characters:
noble:
prompts: ["You are awakened by one of your servants who tells you that your keep is under attack. You look out the window and see"]
item1: "pouch of gold"
item2: "small dagger"
knight:
prompts: ["You are on a quest to defeat the evil dragon of Larion. You've heard he lives up at the north of the kingdom. You set on the path to defeat him and walk into a dark forest. As you enter the forest you see"]
item1: "steel longsword"
item2: "wooden shield"
wizard:
prompts: ["You finish your long journey and finally arrive at the ruin you've been looking for. You look around and see"]
item1: "staff"
item2: "spellbook"
peasant:
prompts: ["You wake up and begin working in the fields. You see"]
item1: "pitchfork"
item2: "nothing else"
rogue:
prompts: ["You walk down the city street looking for somewhere to steal from. You look around and see"]
item1: "long steel dagger"
item2: "length of rope"
apocalyptic:
description: " trying to survive in a post apocalyptic world by scavenging among the ruins of what is left. "
characters:
scavenger:
prompts: ["You walk for two hours and take a break. You've left your town in search of food. You look around and see "]
item1: "rusty knife"
item2: "canteen"
mutant:
prompts: ["In the colony you were born in, your strange condition was considered a curse, and you has been banished since you were sixteen. After a long journey, you find an abandoned bunker. You see"]
item1: "scales on your face"
item2: "third leg"
headhunter:
prompts: ["You are driving your rusty motorbike. You go past many abandoned bunkers. You arrive at a colony and stop the engine. You take a look around and see"]
item1: "binoculars"
item2: "crappy shotgun"
mystery:
description: "living in Chicago. "
characters:
patient:
prompts: ["You wake up in an old rundown hospital with no memory of how you got there. You take a look around the room and see"]
item1: "hospital bracelet"
item2: "pack of bandages"
detective:
prompts: ["You enter the forest where you believe the criminal you're searching for fled to. Suddenly"]
item1: "pistol"
item2: "police badge"
spy:
prompts: ["You listen to the Russian diplomats and hear them discussing"]
item1: "concealed pistol"
item2: "syringe of poison"
zombies:
description: " trying to survive in a world filled with infected zombies everywhere. "
characters:
soldier:
prompts: ["Your unit lost a lot of men when the infection broke, but you've managed to keep the small town
you're stationed near safe for now. You look over the town and think"]
item1: "automatic rifle"
item2: "grenade"
survivor:
prompts: ["You have managed to survive several months avoiding zombies and scavenging food.
You cautiously enter a rundown store and hear"]
item1: "pistol"
item2: "backpack"
scientist:
prompts: ["You pound your fist on the table, angry that you still haven't found the cure to the infection. You turn to your assistant and"]
item1: "backpack"
item2: "solar powered tablet"
| {
"pile_set_name": "Github"
} |
sha256:ec378802e40550066f501df89bfa28debfd461681274ff3b45c09369a0e421df
| {
"pile_set_name": "Github"
} |
/*! *****************************************************************************
Copyright (c) Microsoft Corporation. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use
this file except in compliance with the License. You may obtain a copy of the
License at http://www.apache.org/licenses/LICENSE-2.0
THIS CODE IS PROVIDED ON AN *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED
WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
MERCHANTABLITY OR NON-INFRINGEMENT.
See the Apache Version 2.0 License for specific language governing permissions
and limitations under the License.
***************************************************************************** */
/// <reference no-default-lib="true"/>
interface Map<K, V> {
clear(): void;
delete(key: K): boolean;
forEach(callbackfn: (value: V, key: K, map: Map<K, V>) => void, thisArg?: any): void;
get(key: K): V | undefined;
has(key: K): boolean;
set(key: K, value: V): this;
readonly size: number;
}
interface MapConstructor {
new(): Map<any, any>;
new<K, V>(entries?: ReadonlyArray<[K, V]> | null): Map<K, V>;
readonly prototype: Map<any, any>;
}
declare var Map: MapConstructor;
interface ReadonlyMap<K, V> {
forEach(callbackfn: (value: V, key: K, map: ReadonlyMap<K, V>) => void, thisArg?: any): void;
get(key: K): V | undefined;
has(key: K): boolean;
readonly size: number;
}
interface WeakMap<K extends object, V> {
delete(key: K): boolean;
get(key: K): V | undefined;
has(key: K): boolean;
set(key: K, value: V): this;
}
interface WeakMapConstructor {
new <K extends object = object, V = any>(entries?: ReadonlyArray<[K, V]> | null): WeakMap<K, V>;
readonly prototype: WeakMap<object, any>;
}
declare var WeakMap: WeakMapConstructor | undefined;
interface Set<T> {
add(value: T): this;
clear(): void;
delete(value: T): boolean;
forEach(callbackfn: (value: T, value2: T, set: Set<T>) => void, thisArg?: any): void;
has(value: T): boolean;
readonly size: number;
}
interface SetConstructor {
new <T = any>(values?: ReadonlyArray<T> | null): Set<T>;
readonly prototype: Set<any>;
}
declare var Set: SetConstructor | undefined;
interface ReadonlySet<T> {
forEach(callbackfn: (value: T, value2: T, set: ReadonlySet<T>) => void, thisArg?: any): void;
has(value: T): boolean;
readonly size: number;
}
interface WeakSet<T extends object> {
add(value: T): this;
delete(value: T): boolean;
has(value: T): boolean;
}
interface WeakSetConstructor {
new <T extends object = object>(values?: ReadonlyArray<T> | null): WeakSet<T>;
readonly prototype: WeakSet<object>;
}
declare var WeakSet: WeakSetConstructor | undefined;
interface Window {
WeakSet?: WeakSetConstructor;
}
| {
"pile_set_name": "Github"
} |
// Copyright (c) AlphaSierraPapa for the SharpDevelop Team (for details please see \doc\copyright.txt)
// This code is distributed under the GNU LGPL (for details please see \doc\license.txt)
using System;
using System.Text;
namespace ICSharpCode.AvalonEdit.Xml
{
/// <summary>
/// Converts the XML tree back to text.
/// The text should exactly match the original.
/// </summary>
public class PrettyPrintAXmlVisitor: AbstractAXmlVisitor
{
StringBuilder sb = new StringBuilder();
/// <summary>
/// Gets the pretty printed text
/// </summary>
public string Output {
get {
return sb.ToString();
}
}
/// <summary> Create XML text from a document </summary>
public static string PrettyPrint(AXmlDocument doc)
{
PrettyPrintAXmlVisitor visitor = new PrettyPrintAXmlVisitor();
visitor.VisitDocument(doc);
return visitor.Output;
}
/// <summary> Visit RawDocument </summary>
public override void VisitDocument(AXmlDocument document)
{
base.VisitDocument(document);
}
/// <summary> Visit RawElement </summary>
public override void VisitElement(AXmlElement element)
{
base.VisitElement(element);
}
/// <summary> Visit RawTag </summary>
public override void VisitTag(AXmlTag tag)
{
sb.Append(tag.OpeningBracket);
sb.Append(tag.Name);
base.VisitTag(tag);
sb.Append(tag.ClosingBracket);
}
/// <summary> Visit RawAttribute </summary>
public override void VisitAttribute(AXmlAttribute attribute)
{
sb.Append(attribute.Name);
sb.Append(attribute.EqualsSign);
sb.Append(attribute.QuotedValue);
}
/// <summary> Visit RawText </summary>
public override void VisitText(AXmlText text)
{
sb.Append(text.EscapedValue);
}
}
}
| {
"pile_set_name": "Github"
} |
/*
* Copyright (C) 2011 Andes Technology Corporation
* Copyright (C) 2010 Shawn Lin ([email protected])
* Copyright (C) 2011 Macpaul Lin ([email protected])
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#ifndef __ASM_NDS_STRING_H
#define __ASM_NDS_STRING_H
/*
* We don't do inline string functions, since the
* optimised inline asm versions are not small.
*/
#undef __HAVE_ARCH_STRRCHR
extern char *strrchr(const char *s, int c);
#undef __HAVE_ARCH_STRCHR
extern char *strchr(const char *s, int c);
#undef __HAVE_ARCH_MEMCPY
extern void *memcpy(void *, const void *, __kernel_size_t);
#undef __HAVE_ARCH_MEMMOVE
extern void *memmove(void *, const void *, __kernel_size_t);
#undef __HAVE_ARCH_MEMCHR
extern void *memchr(const void *, int, __kernel_size_t);
#undef __HAVE_ARCH_MEMZERO
#undef __HAVE_ARCH_MEMSET
extern void *memset(void *, int, __kernel_size_t);
#ifdef CONFIG_MARCO_MEMSET
extern void __memzero(void *ptr, __kernel_size_t n);
#define memset(p, v, n) \
({ \
if ((n) != 0) { \
if (__builtin_constant_p((v)) && (v) == 0) \
__memzero((p), (n)); \
else \
memset((p), (v), (n)); \
} \
(p); \
})
#define memzero(p, n) ({ if ((n) != 0) __memzero((p), (n)); (p); })
#else
extern void memzero(void *ptr, __kernel_size_t n);
#endif
#endif /* __ASM_NDS_STRING_H */
| {
"pile_set_name": "Github"
} |
#container1, #container2, #container3 {
min-width: 300px;
max-width: 800px;
height: 120px;
margin: 0 auto;
} | {
"pile_set_name": "Github"
} |
--------------ofCamera parameters--------------
transformMatrix
-0.97676, -0.0130288, 0.213939, 0
0.213327, -0.155834, 0.964473, 0
0.0207731, 0.987696, 0.154992, 0
2.02831, 524.852, 83.2131, 1
fov
60
near
1
far
100000
lensOffset
0, 0
isOrtho
0
--------------ofEasyCam parameters--------------
target
0, 0, 0
bEnableMouseMiddleButton
1
bMouseInputEnabled
0
drag
0.9
doTranslationKey
m
| {
"pile_set_name": "Github"
} |
# 组件
组件使你可以将 UI 划分为一个一个独立,可复用的小部件,并可以对每个部件进行单独的设计。
在单页面应用(SPA)中扮演着重要角色,有以下优点分治,方便管理,减少耦合,复用,提高效率和性能
## 组件简单实现 —— 函数式组件
组件的本质其实是函数,**最简单的组件**可以使用函数来定义,组件首字母必须是大写
```js
var Xheader = (props) => {
return <header>微博</header>
}
ReactDOM.render(
<div>
<Xheader />
<Xheader />
</div>,
document.querySelector("#demo")
)
```
webpack 模块化下的编写方式
```javascript
import React from 'react'
import ReactDOM from 'react-dom'
let Component1 = () => {
return <h1>React Component</h1>
}
ReactDOM.render(
<Component1 />,
document.getElementById('app')
)
```
## 类组件 —— ES5 语法
```javascript
var React = require('react');
var ReactDOM = require('react-dom')
var Component1 = React.createClass({
render: function(){
return (
<div>
<h1>DK</h1>
<h1>Eno Yao</h1>
</div>
)
}
})
ReactDOM.render(
<Component1 />,
document.getElementById('app')
)
```
## 类组件 —— ES6 语法
以 ES6 定义类的方式去生命组件是最常用的,也是比较建议用的方法
```javascript
class Xheader extends React.Component {
constructor(props){
super(props)
}
render() {
return <header>Eno Yao</header>;
}
}
ReactDOM.render(
<div>
<Xheader />
<Xheader />
</div>,
document.querySelector("#demo")
)
```
### 效果预览
- [定义组件](https://wscats.github.io/react-tutorial/react/component/src/define/define.html)
- [函数组件](https://wscats.github.io/react-tutorial/react/component/src/define/函数组件.html)
- [类组件和props](https://wscats.github.io/react-tutorial/react/component/src/define/类组件和props.html)
### 组件小结
- 组件名首字母必须为大写
- 函数返回一个虚拟 DOM 节点
- 类组件必须要有 render 方法
- render 必须返回一个虚拟 DOM 节点
- 实际工作中,类组件是常用的方式
# 组件属性(Props)
因为组件的调用是 html 标签的形式,而 html 标签是可以添加属性,所以在 React 的组件当中也是可以添加自定义的属性,而属性的获取则用 `this.props`
`props`可以理解为是继承父一辈的东西,父子组件通信的话,首选`props`,但我个人不推荐把它用在隔代遗传(父传孙子)
## 函数式组件
```javascript
import React from 'react'
import ReactDOM from 'react-dom'
let Component1 = (props) => {
return <h1>name-{props.name}</h1>
}
ReactDOM.render(
<Component1 name="Sam"/>,
document.getElementById('app')
)
```
## 类组件
```javascript
import React from 'react'
import ReactDOM from 'react-dom'
class Component1 extends React.Component{
render(){
return <h1>name-{this.props.name}</h1>
}
}
ReactDOM.render(
<Component1 name="Sam"/>,
document.getElementById('app')
)
```
## 默认属性(DefaultProps)
组件的属性除了可以通过调用的时候以 DOM 节点属性的方式传值,也可以设置默认的属性值,如果调用的时候没有传对应的属性值,则会用默认的属性值。
`getDefalutProps` 这个方法只会被调用一次。
```javascript
//es5
var React = require('react');
var ReactDOM = require('react-dom');
var Component1 = React.createClass({
getDefaultProps: function(){
return {
name: 'Eno Yao',
age: 20
}
},
render: function(){
return (
<div>
<p>姓名:{this.props.name}</p>
<p>年龄:{this.props.age}</p>
</div>
)
}
})
//es6
import React from 'react';
import ReactDOM from 'react-dom';
class Component1 extends React.Component{
static defaultProps = {
name: 'DK',
age: 20
}
render(){
return (
<div>
<h1>姓名:{this.props.name}</h1>
<h1>年龄:{this.props.age}</h1>
</div>
)
}
}
//或者
Component1.defaultProps = {
name: "Sam",
age: 22
}
//使用
ReactDOM.render(<Component1/>, document.getElementById('div1'));
```
## 属性的类型规则(propTypes)
通常情况下,在定义一个组件的时候把属性定义好,会加上一些使用的条件限制,比如某些属性值的数据类型必须是数组,或者某些属性不能为空,在这个时候,可以通过 `propTypes` 来设置。
```javascript
import React from 'react';
import ReactDOM from 'react-dom';
import PropTypes from 'prop-types'
class Component1 extends React.Component{
render(){
return (
<div>
<p>姓名:{this.props.name}</p>
<p>年龄:{this.props.age}</p>
<p>学科:</p>
<ul>
{
this.props.subjects.map(function(_item){
return <li>{_item}</li>
})
}
</ul>
</div>
)
}
}
//定义属性 name 为字符串且必须有值
Component1.propTypes = {
name: PropTypes.string
}
ReactDOM.render(<Component1 name="DK"/>, document.getElementById('div1'));
```
prop 默认情况下是可选,常用的类型:
- `PropTypes.array`
- `PropTypes.bool`
- `PropTypes.func`
- `PropTypes.number`
- `PropTypes.object`
- `PropTypes.string`
- `PropTypes.symbol`
- `PropTypes.any.isRequired`
# 多态组件
如果需要让同一个组件呈现不同的状态,可以考虑用`props`来解决,父组件往子组件的属性值上面定义一个值,然后该子组件就可以接受该值呈现对应的状态
```js
class Xheader extends React.Component {
constructor(props){
super(props)
console.log(props);
this.props = props;
}
render() {
return <header>{this.props.title}</header>;
}
}
ReactDOM.render(
<div>
<Xheader title="微信" />
<Xheader title="支付宝" />
</div>,
document.querySelector("#demo")
)
```
`ReactDOM.render`这个老爸,把`微信`和`支付宝`分别传给两个不同的儿子`<Xheader>`,这两个儿子通过自身的`props`来吸收,然后转化自己的属性值显示 | {
"pile_set_name": "Github"
} |
Automated release
=====
Usage
Replace the option values inside `index.js` and run the command
```
npm install && node index.js
```
| {
"pile_set_name": "Github"
} |
/**************************************************
*
* Part one of the system initialization code, contains low-level
* initialization, plain thumb variant.
*
* Copyright 2011 IAR Systems. All rights reserved.
*
* $Revision: 47876 $
*
**************************************************/
;
; The modules in this file are included in the libraries, and may be replaced
; by any user-defined modules that define the PUBLIC symbol _program_start or
; a user defined start symbol.
; To override the cstartup defined in the library, simply add your modified
; version to the workbench project.
;
; The vector table is normally located at address 0.
; When debugging in RAM, it can be located in RAM, aligned to at least 2^6.
; The name "__vector_table" has special meaning for C-SPY:
; it is where the SP start value is found, and the NVIC vector
; table register (VTOR) is initialized to this address if != 0.
;
; Cortex-M version
;
MODULE ?cstartup
;; Forward declaration of sections.
SECTION CSTACK:DATA:NOROOT(3)
SECTION .intvec:CODE:NOROOT(2)
EXTERN __iar_program_start
EXTERN SystemInit
PUBLIC __vector_table
PUBLIC __vector_table_0x1c
PUBLIC __Vectors
PUBLIC __Vectors_End
PUBLIC __Vectors_Size
DATA
__vector_table
DCD sfe(CSTACK)
DCD Reset_Handler
DCD NMI_Handler
DCD HardFault_Handler
DCD MemManage_Handler
DCD BusFault_Handler
DCD UsageFault_Handler
__vector_table_0x1c
DCD 0
DCD 0
DCD 0
DCD 0
DCD SVC_Handler
DCD DebugMon_Handler
DCD 0
DCD PendSV_Handler
DCD SysTick_Handler
; External Interrupts
DCD DAC_IRQHandler ; 16 D/A Converter
DCD MX_CORE_IRQHandler ; 17 CortexM0 (LPC43XX ONLY)
DCD DMA_IRQHandler ; 18 General Purpose DMA
DCD 0 ; 19 Reserved
DCD FLASHEEPROM_IRQHandler ; 20 ORed flash bank A, flash bank B, EEPROM interrupts
DCD ETH_IRQHandler ; 21 Ethernet
DCD SDIO_IRQHandler ; 22 SD/MMC
DCD LCD_IRQHandler ; 23 LCD
DCD USB0_IRQHandler ; 24 USB0
DCD USB1_IRQHandler ; 25 USB1
DCD SCT_IRQHandler ; 26 State Configurable Timer
DCD RIT_IRQHandler ; 27 Repetitive Interrupt Timer
DCD TIMER0_IRQHandler ; 28 Timer0
DCD TIMER1_IRQHandler ; 29 Timer1
DCD TIMER2_IRQHandler ; 30 Timer2
DCD TIMER3_IRQHandler ; 31 Timer3
DCD MCPWM_IRQHandler ; 32 Motor Control PWM
DCD ADC0_IRQHandler ; 33 A/D Converter 0
DCD I2C0_IRQHandler ; 34 I2C0
DCD I2C1_IRQHandler ; 35 I2C1
DCD SPI_IRQHandler ; 36 SPI (LPC43XX ONLY)
DCD ADC1_IRQHandler ; 37 A/D Converter 1
DCD SSP0_IRQHandler ; 38 SSP0
DCD SSP1_IRQHandler ; 39 SSP1
DCD UART0_IRQHandler ; 40 UART0
DCD UART1_IRQHandler ; 41 UART1
DCD UART2_IRQHandler ; 42 UART2
DCD UART3_IRQHandler ; 43 UART3
DCD I2S0_IRQHandler ; 44 I2S0
DCD I2S1_IRQHandler ; 45 I2S1
DCD SPIFI_IRQHandler ; 46 SPI Flash Interface
DCD SGPIO_IRQHandler ; 47 SGPIO (LPC43XX ONLY)
DCD GPIO0_IRQHandler ; 48 GPIO0
DCD GPIO1_IRQHandler ; 49 GPIO1
DCD GPIO2_IRQHandler ; 50 GPIO2
DCD GPIO3_IRQHandler ; 51 GPIO3
DCD GPIO4_IRQHandler ; 52 GPIO4
DCD GPIO5_IRQHandler ; 53 GPIO5
DCD GPIO6_IRQHandler ; 54 GPIO6
DCD GPIO7_IRQHandler ; 55 GPIO7
DCD GINT0_IRQHandler ; 56 GINT0
DCD GINT1_IRQHandler ; 57 GINT1
DCD EVRT_IRQHandler ; 58 Event Router
DCD CAN1_IRQHandler ; 59 C_CAN1
DCD 0
DCD 0
DCD ATIMER_IRQHandler ; 62 ATIMER
DCD RTC_IRQHandler ; 63 RTC
DCD 0
DCD WDT_IRQHandler ; 65 WDT
DCD 0
DCD CAN0_IRQHandler ; 67 C_CAN0
DCD QEI_IRQHandler ; 68 QEI
__Vectors_End
__Vectors EQU __vector_table
__Vectors_Size EQU __Vectors_End - __Vectors
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
;; Default interrupt handlers.
;;
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
THUMB
PUBWEAK Reset_Handler
SECTION .text:CODE:REORDER(2)
Reset_Handler
LDR R0, =SystemInit
BLX R0
LDR R0, =__iar_program_start
BX R0
PUBWEAK NMI_Handler
PUBWEAK HardFault_Handler
PUBWEAK MemManage_Handler
PUBWEAK BusFault_Handler
PUBWEAK UsageFault_Handler
PUBWEAK SVC_Handler
PUBWEAK DebugMon_Handler
PUBWEAK PendSV_Handler
PUBWEAK SysTick_Handler
PUBWEAK DAC_IRQHandler
PUBWEAK MX_CORE_IRQHandler
PUBWEAK DMA_IRQHandler
PUBWEAK FLASHEEPROM_IRQHandler
PUBWEAK ETH_IRQHandler
PUBWEAK SDIO_IRQHandler
PUBWEAK LCD_IRQHandler
PUBWEAK USB0_IRQHandler
PUBWEAK USB1_IRQHandler
PUBWEAK SCT_IRQHandler
PUBWEAK RIT_IRQHandler
PUBWEAK TIMER0_IRQHandler
PUBWEAK TIMER1_IRQHandler
PUBWEAK TIMER2_IRQHandler
PUBWEAK TIMER3_IRQHandler
PUBWEAK MCPWM_IRQHandler
PUBWEAK ADC0_IRQHandler
PUBWEAK I2C0_IRQHandler
PUBWEAK I2C1_IRQHandler
PUBWEAK SPI_IRQHandler
PUBWEAK ADC1_IRQHandler
PUBWEAK SSP0_IRQHandler
PUBWEAK SSP1_IRQHandler
PUBWEAK UART0_IRQHandler
PUBWEAK UART1_IRQHandler
PUBWEAK UART2_IRQHandler
PUBWEAK UART3_IRQHandler
PUBWEAK I2S0_IRQHandler
PUBWEAK I2S1_IRQHandler
PUBWEAK SPIFI_IRQHandler
PUBWEAK SGPIO_IRQHandler
PUBWEAK GPIO0_IRQHandler
PUBWEAK GPIO1_IRQHandler
PUBWEAK GPIO2_IRQHandler
PUBWEAK GPIO3_IRQHandler
PUBWEAK GPIO4_IRQHandler
PUBWEAK GPIO5_IRQHandler
PUBWEAK GPIO6_IRQHandler
PUBWEAK GPIO7_IRQHandler
PUBWEAK GINT0_IRQHandler
PUBWEAK GINT1_IRQHandler
PUBWEAK EVRT_IRQHandler
PUBWEAK CAN1_IRQHandler
PUBWEAK ATIMER_IRQHandler
PUBWEAK RTC_IRQHandler
PUBWEAK WDT_IRQHandler
PUBWEAK CAN0_IRQHandler
PUBWEAK QEI_IRQHandler
SECTION .text:CODE:REORDER(1)
NMI_Handler
B NMI_Handler
SVC_Handler
B SVC_Handler
DebugMon_Handler
B DebugMon_Handler
PendSV_Handler
B PendSV_Handler
SysTick_Handler
B SysTick_Handler
HardFault_Handler
B HardFault_Handler
MemManage_Handler
B MemManage_Handler
BusFault_Handler
B BusFault_Handler
UsageFault_Handler
DAC_IRQHandler
MX_CORE_IRQHandler
DMA_IRQHandler
FLASHEEPROM_IRQHandler
ETH_IRQHandler
SDIO_IRQHandler
LCD_IRQHandler
USB0_IRQHandler
USB1_IRQHandler
SCT_IRQHandler
RIT_IRQHandler
TIMER0_IRQHandler
TIMER1_IRQHandler
TIMER2_IRQHandler
TIMER3_IRQHandler
MCPWM_IRQHandler
ADC0_IRQHandler
I2C0_IRQHandler
I2C1_IRQHandler
SPI_IRQHandler
ADC1_IRQHandler
SSP0_IRQHandler
SSP1_IRQHandler
UART0_IRQHandler
UART1_IRQHandler
UART2_IRQHandler
UART3_IRQHandler
I2S0_IRQHandler
I2S1_IRQHandler
SPIFI_IRQHandler
SGPIO_IRQHandler
GPIO0_IRQHandler
GPIO1_IRQHandler
GPIO2_IRQHandler
GPIO3_IRQHandler
GPIO4_IRQHandler
GPIO5_IRQHandler
GPIO6_IRQHandler
GPIO7_IRQHandler
GINT0_IRQHandler
GINT1_IRQHandler
EVRT_IRQHandler
CAN1_IRQHandler
ATIMER_IRQHandler
RTC_IRQHandler
WDT_IRQHandler
CAN0_IRQHandler
QEI_IRQHandler
Default_IRQHandler
B Default_IRQHandler
/* CRP Section - not needed for flashless devices */
;;; SECTION .crp:CODE:ROOT(2)
;;; DATA
/* Code Read Protection
NO_ISP 0x4E697370 - Prevents sampling of pin PIO0_1 for entering ISP mode
CRP1 0x12345678 - Write to RAM command cannot access RAM below 0x10000300.
- Copy RAM to flash command can not write to Sector 0.
- Erase command can erase Sector 0 only when all sectors
are selected for erase.
- Compare command is disabled.
- Read Memory command is disabled.
CRP2 0x87654321 - Read Memory is disabled.
- Write to RAM is disabled.
- "Go" command is disabled.
- Copy RAM to flash is disabled.
- Compare is disabled.
CRP3 0x43218765 - Access to chip via the SWD pins is disabled. ISP entry
by pulling PIO0_1 LOW is disabled if a valid user code is
present in flash sector 0.
Caution: If CRP3 is selected, no future factory testing can be
performed on the device.
*/
;;; DCD 0xFFFFFFFF
;;;
END
| {
"pile_set_name": "Github"
} |
{
"parent": "thermalfoundation:item/tool/bow_constantan",
"textures": {
"layer0": "thermalfoundation:items/tool/bow_constantan_0"
}
}
| {
"pile_set_name": "Github"
} |
# SEO
See [Production website note](/webserver/Production-website.md#seo)
### Keyword research for a website
- Gather seed keywords (from the client, from website)
- [Google Adwords Keyword Planner](https://ads.google.com/aw/keywordplanner/home)
/ Find new keywords (set country, language)
- Remove irrelevant ones (e.g. other brands)
- Google Adwords Keyword Planner / Get search volume (set country, language)
- Set one keyword per web page
- Talk to the client about customer groups and why they choose them
### Keyword planner tools
- https://trends.google.com
- [Google Autocomplete](https://support.google.com/websearch/answer/106230)
by http://keywordshitter.com/
- https://answerthepublic.com/
- https://www.google.com/trends/correlate/
- moz.com
- semrush.com
- ahrefs.com
- https://www.link-assistant.com/rank-tracker/comparison.html
- https://textoptimizer.com/
- https://kparser.com/
and more: https://ahrefs.com/blog/free-keyword-research-tools/
| {
"pile_set_name": "Github"
} |
package org.locationtech.jtstest.testbuilder.ui.style;
import java.awt.Graphics2D;
import org.locationtech.jts.geom.Geometry;
import org.locationtech.jtstest.testbuilder.ui.Viewport;
public class StyleGroup implements Style {
private Style[] styles;
public StyleGroup(Style ... styles) {
this.styles = styles;
}
@Override
public void paint(Geometry geom, Viewport viewport, Graphics2D g) throws Exception {
for (Style style : styles) {
style.paint(geom, viewport, g);
}
}
}
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="utf-8"?>
<!-- This file was generated by Bakefile (http://bakefile.org).
Do not modify, all changes will be overwritten! -->
<Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemGroup Label="ProjectConfigurations">
<ProjectConfiguration Include="Debug|Win32">
<Configuration>Debug</Configuration>
<Platform>Win32</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release|Win32">
<Configuration>Release</Configuration>
<Platform>Win32</Platform>
</ProjectConfiguration>
</ItemGroup>
<PropertyGroup Label="Globals">
<ProjectGuid>{6D0FCCB5-5C5D-5A73-86EE-B0637CC26752}</ProjectGuid>
<Keyword>Win32Proj</Keyword>
<RootNamespace>synthetic_select1</RootNamespace>
<ProjectName>synthetic_select1</ProjectName>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
<PropertyGroup Label="Configuration" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>true</UseDebugLibraries>
<CharacterSet>Unicode</CharacterSet>
<PlatformToolset>v110</PlatformToolset>
</PropertyGroup>
<PropertyGroup Label="Configuration" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>false</UseDebugLibraries>
<CharacterSet>Unicode</CharacterSet>
<PlatformToolset>v110</PlatformToolset>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
<ImportGroup Label="ExtensionSettings">
</ImportGroup>
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
<Import Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" />
</ImportGroup>
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
<Import Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" />
</ImportGroup>
<PropertyGroup Label="UserMacros" />
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
<LinkIncremental>true</LinkIncremental>
<IntDir>$(Configuration)\$(ProjectName)\</IntDir>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
<LinkIncremental>false</LinkIncremental>
<IntDir>$(Configuration)\$(ProjectName)\</IntDir>
</PropertyGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
<ClCompile>
<WarningLevel>Level3</WarningLevel>
<Optimization>Disabled</Optimization>
<PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<MultiProcessorCompilation>true</MultiProcessorCompilation>
<MinimalRebuild>false</MinimalRebuild>
<AdditionalIncludeDirectories>..\..\mach7</AdditionalIncludeDirectories>
<RuntimeLibrary>MultiThreadedDebugDLL</RuntimeLibrary>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
<ClCompile>
<WarningLevel>Level3</WarningLevel>
<Optimization>MaxSpeed</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions>
<PreprocessorDefinitions>WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<MultiProcessorCompilation>true</MultiProcessorCompilation>
<MinimalRebuild>false</MinimalRebuild>
<AdditionalIncludeDirectories>..\..\mach7</AdditionalIncludeDirectories>
<RuntimeLibrary>MultiThreadedDLL</RuntimeLibrary>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<EnableCOMDATFolding>true</EnableCOMDATFolding>
<OptimizeReferences>true</OptimizeReferences>
</Link>
</ItemDefinitionGroup>
<ItemGroup>
<ClCompile Include="..\time\synthetic_select1.cpp" />
</ItemGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
<ImportGroup Label="ExtensionTargets">
</ImportGroup>
</Project>
| {
"pile_set_name": "Github"
} |
Attempt to alias restrict qualified pointers associated with the same block:
> in main at j069b.c:7:7
Undefined behavior (UB-EEA2):
see C11 section 6.7.3.1:4 http://rvdoc.org/C11/6.7.3.1
see C11 section J.2:1 item 69 http://rvdoc.org/C11/J.2
see CERT-C section EXP43-C http://rvdoc.org/CERT-C/EXP43-C
see MISRA-C section 8.1:3 http://rvdoc.org/MISRA-C/8.1
| {
"pile_set_name": "Github"
} |
Avoiding redundancy during analysis
===================================
It is convenient to be able to define an analysis over a C++ codebase as
the composition of separate analyses over each translation unit in that
codebase. This may lead to redundant work being performed, especially when
(as is commonly the case) there is a large overlap in the set of headers that
each translation unit includes. Such extra work may result in a large blowup
in computation time or temporary storage requirements. In this document we
describe a technique that simplifies the problem of reducing redundant work.
At the coarsest level, we define (for some stable `GraphObserver` `NodeId`)
the operation `claim : (TranslationUnit * (File | NodeId)) -> bool`. If `claim`
is `true`, then the partial analysis done on the `TranslationUnit` is
responsible for analyzing the data in the `File` or with the `NodeId`.
If it is `false`, then the partial analysis is not responsible for doing work
for that `File` or `NodeId` (though it may do the work anyway).
== Static claiming
The simplest implementation of `claim` is to simply return `true` in all
circumstances. This is the most conservative possible estimate of
responsibility, but wastes the most amount of effort. For example, every
header file will be processed for every translation unit that includes it;
every template instantiation will be processed anew whenever a translation unit
causes it to be instantiated.
This idea may be refined by statically assigning responsibility for `NodeId` s
to translation units. Ideally this would result in each file being analyzed
exactly once per whole-project run, where each translation unit would be
responsible for roughly the same amount of code. Unfortunately, some header
files differ in behavior depending on their context (because they are included
inside `extern {}` or `namespace {}` blocks, for example). In full generality,
these files must be visited once per unique context.
LLVM already includes a tool (`modularize`) that can determine which files in a
collection of headers and source code are badly behaved in this sense. This
works roughly by recording all preprocessor effects seen during compilation
(as well as some effects that can only be checked in the AST, like the
aforementioned extern or unnamed namespace blocks), then checking to see whether
there are any conflicts in the records made for a given header file. For
example, if a particular #ifdef in some header foo.h is observed to resolve in
two different ways (modulo heuristics to admit header guards), that header is
non-hermetic.
These headers can be handled in the following way. A preprocess run on each
compilation task under analysis keeps track of a transcript of preprocessor
effects for each file the job causes to be included. Inclusion sites for
some a.h are labelled with the transcript for a.h _that will result from
that particular inclusion site_. This means that two inclusions of a.h that
behave differently will have different transcript labels. Because compilation
is deterministic, we know that the next time the compile is run for the task
we can expect to see the same effects everywhere. If we also save the
transcript for the main source file, then it is possible to provide the later
analysis phase with an oracle that knows exactly which effects an include will
have. Such a construction is made by building a table that maps from
(header-file * input-transcript * include) to output-transcript. (We can
reconstruct the destination header file because compilation is deterministic.)
When the analysis step reaches some include inside some header-file while
running in the context of some input-transcript, it can use this table to pick
the correct output-transcript.
Now it is left to us to partition all (header-file * transcript) pairs among
the translation units being analyzed. Every translation unit independently
knows which of these pairs it uses. We merely need to transpose the
table--change all (TU, header-pair) to (header-pair, TU)--and choose for each
header-pair a single TU among those that require it. We currently implement
this choice in a fairly arbitrary way, choosing for some header-pair the
associated TU with the least number of other header-pairs assigned to it.
We can use this mechanism to restrict the emission of preprocessor-specific
analysis data depending on whether the TU being analyzed claims the header
being preprocessed. We can also use this mechanism to restrict emission of
data for AST entities with physical source locations--if some declaration D
is located in a file for which a TU under analysis has a claim, that TU
should emit data for that declaration.
This mechanism means that one must be careful in an analysis to avoid dropping
data completely. For instance, if you are trying to connect up a declaration
with all of its definitions, each definition should be tested independently
for a claim rather than the declaration itself. This is because the declaration
may be in a header that is not claimed by the TU that contains some definition.
Static claiming still results in deterministic and independent analyses.
Unfortunately, certain features of C++ prevent us from being able to use
it in all contexts; its whole-program style assignment phase prevents us
from performing analysis in chunks. In the next section we introduce a second
mechanism that can work in concert with static claiming but can handle these
additional problems.
== Dynamic claiming
Compiling a C++ project may result in the generation of _implicit code_.
This code does not have any set location where it is spelled out in a
source file, but it is still important for analysis. It may be unreasonable
to make static assignments for implicit code: there can be many more implicitly
declared special member functions than includes, for example. For corpora
that are sufficiently large, it may be impossible to build and query the static
assignment table quickly.
To handle implicit code--and, for large codebases, to handle _all_ claiming,
provided that one can tolerate nondeterminism--we suggest a complimentary
approach to the static one described in the previous section. In this approach,
called dynamic claiming, every analyzer has access to a shared "blackboard".
When an analyzer is about to begin analysis on some artifact, it queries the
blackboard to see if any other analyzer has done this work. If not, then the
querying analyzer becomes responsible for that artifact. The blackboard need not
keep permanent record of all work that has been posted. Since a completely
conservative implementation of `claim` is always admissible, it is fine for the
blackboard to be implemented as a fixed-size cache.
| {
"pile_set_name": "Github"
} |
//
// asio.hpp
// ~~~~~~~~
//
// Copyright (c) 2003-2018 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
// See www.boost.org/libs/asio for documentation.
//
#ifndef BOOST_ASIO_HPP
#define BOOST_ASIO_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include <boost/asio/associated_allocator.hpp>
#include <boost/asio/associated_executor.hpp>
#include <boost/asio/async_result.hpp>
#include <boost/asio/basic_datagram_socket.hpp>
#include <boost/asio/basic_deadline_timer.hpp>
#include <boost/asio/basic_io_object.hpp>
#include <boost/asio/basic_raw_socket.hpp>
#include <boost/asio/basic_seq_packet_socket.hpp>
#include <boost/asio/basic_serial_port.hpp>
#include <boost/asio/basic_signal_set.hpp>
#include <boost/asio/basic_socket_acceptor.hpp>
#include <boost/asio/basic_socket_iostream.hpp>
#include <boost/asio/basic_socket_streambuf.hpp>
#include <boost/asio/basic_stream_socket.hpp>
#include <boost/asio/basic_streambuf.hpp>
#include <boost/asio/basic_waitable_timer.hpp>
#include <boost/asio/bind_executor.hpp>
#include <boost/asio/buffer.hpp>
#include <boost/asio/buffered_read_stream_fwd.hpp>
#include <boost/asio/buffered_read_stream.hpp>
#include <boost/asio/buffered_stream_fwd.hpp>
#include <boost/asio/buffered_stream.hpp>
#include <boost/asio/buffered_write_stream_fwd.hpp>
#include <boost/asio/buffered_write_stream.hpp>
#include <boost/asio/buffers_iterator.hpp>
#include <boost/asio/completion_condition.hpp>
#include <boost/asio/connect.hpp>
#include <boost/asio/coroutine.hpp>
#include <boost/asio/datagram_socket_service.hpp>
#include <boost/asio/deadline_timer_service.hpp>
#include <boost/asio/deadline_timer.hpp>
#include <boost/asio/defer.hpp>
#include <boost/asio/dispatch.hpp>
#include <boost/asio/error.hpp>
#include <boost/asio/execution_context.hpp>
#include <boost/asio/executor.hpp>
#include <boost/asio/executor_work_guard.hpp>
#include <boost/asio/generic/basic_endpoint.hpp>
#include <boost/asio/generic/datagram_protocol.hpp>
#include <boost/asio/generic/raw_protocol.hpp>
#include <boost/asio/generic/seq_packet_protocol.hpp>
#include <boost/asio/generic/stream_protocol.hpp>
#include <boost/asio/handler_alloc_hook.hpp>
#include <boost/asio/handler_continuation_hook.hpp>
#include <boost/asio/handler_invoke_hook.hpp>
#include <boost/asio/handler_type.hpp>
#include <boost/asio/high_resolution_timer.hpp>
#include <boost/asio/io_context.hpp>
#include <boost/asio/io_context_strand.hpp>
#include <boost/asio/io_service.hpp>
#include <boost/asio/io_service_strand.hpp>
#include <boost/asio/ip/address.hpp>
#include <boost/asio/ip/address_v4.hpp>
#include <boost/asio/ip/address_v4_iterator.hpp>
#include <boost/asio/ip/address_v4_range.hpp>
#include <boost/asio/ip/address_v6.hpp>
#include <boost/asio/ip/address_v6_iterator.hpp>
#include <boost/asio/ip/address_v6_range.hpp>
#include <boost/asio/ip/bad_address_cast.hpp>
#include <boost/asio/ip/basic_endpoint.hpp>
#include <boost/asio/ip/basic_resolver.hpp>
#include <boost/asio/ip/basic_resolver_entry.hpp>
#include <boost/asio/ip/basic_resolver_iterator.hpp>
#include <boost/asio/ip/basic_resolver_query.hpp>
#include <boost/asio/ip/host_name.hpp>
#include <boost/asio/ip/icmp.hpp>
#include <boost/asio/ip/multicast.hpp>
#include <boost/asio/ip/resolver_base.hpp>
#include <boost/asio/ip/resolver_query_base.hpp>
#include <boost/asio/ip/resolver_service.hpp>
#include <boost/asio/ip/tcp.hpp>
#include <boost/asio/ip/udp.hpp>
#include <boost/asio/ip/unicast.hpp>
#include <boost/asio/ip/v6_only.hpp>
#include <boost/asio/is_executor.hpp>
#include <boost/asio/is_read_buffered.hpp>
#include <boost/asio/is_write_buffered.hpp>
#include <boost/asio/local/basic_endpoint.hpp>
#include <boost/asio/local/connect_pair.hpp>
#include <boost/asio/local/datagram_protocol.hpp>
#include <boost/asio/local/stream_protocol.hpp>
#include <boost/asio/packaged_task.hpp>
#include <boost/asio/placeholders.hpp>
#include <boost/asio/posix/basic_descriptor.hpp>
#include <boost/asio/posix/basic_stream_descriptor.hpp>
#include <boost/asio/posix/descriptor.hpp>
#include <boost/asio/posix/descriptor_base.hpp>
#include <boost/asio/posix/stream_descriptor.hpp>
#include <boost/asio/posix/stream_descriptor_service.hpp>
#include <boost/asio/post.hpp>
#include <boost/asio/raw_socket_service.hpp>
#include <boost/asio/read.hpp>
#include <boost/asio/read_at.hpp>
#include <boost/asio/read_until.hpp>
#include <boost/asio/seq_packet_socket_service.hpp>
#include <boost/asio/serial_port.hpp>
#include <boost/asio/serial_port_base.hpp>
#include <boost/asio/serial_port_service.hpp>
#include <boost/asio/signal_set.hpp>
#include <boost/asio/signal_set_service.hpp>
#include <boost/asio/socket_acceptor_service.hpp>
#include <boost/asio/socket_base.hpp>
#include <boost/asio/steady_timer.hpp>
#include <boost/asio/strand.hpp>
#include <boost/asio/stream_socket_service.hpp>
#include <boost/asio/streambuf.hpp>
#include <boost/asio/system_context.hpp>
#include <boost/asio/system_executor.hpp>
#include <boost/asio/system_timer.hpp>
#include <boost/asio/thread_pool.hpp>
#include <boost/asio/time_traits.hpp>
#include <boost/asio/use_future.hpp>
#include <boost/asio/uses_executor.hpp>
#include <boost/asio/version.hpp>
#include <boost/asio/wait_traits.hpp>
#include <boost/asio/waitable_timer_service.hpp>
#include <boost/asio/windows/basic_handle.hpp>
#include <boost/asio/windows/basic_object_handle.hpp>
#include <boost/asio/windows/basic_random_access_handle.hpp>
#include <boost/asio/windows/basic_stream_handle.hpp>
#include <boost/asio/windows/object_handle.hpp>
#include <boost/asio/windows/object_handle_service.hpp>
#include <boost/asio/windows/overlapped_handle.hpp>
#include <boost/asio/windows/overlapped_ptr.hpp>
#include <boost/asio/windows/random_access_handle.hpp>
#include <boost/asio/windows/random_access_handle_service.hpp>
#include <boost/asio/windows/stream_handle.hpp>
#include <boost/asio/windows/stream_handle_service.hpp>
#include <boost/asio/write.hpp>
#include <boost/asio/write_at.hpp>
#endif // BOOST_ASIO_HPP
| {
"pile_set_name": "Github"
} |
################################################################################
#
# tstools
#
################################################################################
TSTOOLS_VERSION = 1_11
TSTOOLS_SITE = https://tstools.googlecode.com/files
TSTOOLS_SOURCE = tstools-$(TSTOOLS_VERSION).tgz
TSTOOLS_LICENSE = MPL v1.1
define TSTOOLS_BUILD_CMDS
$(TARGET_CONFIGURE_OPTS) LD="$(TARGET_CC)" $(TARGET_MAKE_ENV) \
$(MAKE1) -C $(@D)
endef
define TSTOOLS_INSTALL_TARGET_CMDS
$(TARGET_CONFIGURE_OPTS) $(TARGET_MAKE_ENV) $(MAKE) -C $(@D) \
DESTDIR=$(TARGET_DIR) install
endef
$(eval $(generic-package))
| {
"pile_set_name": "Github"
} |
package com.lance.activiti.utils;
import java.nio.charset.Charset;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
import org.apache.commons.codec.binary.Base64;
import org.apache.commons.lang3.StringUtils;
public final class EncryptUtils {
private EncryptUtils(){
}
/**
* Base64加密
* @author lance
* 2015年12月30日 下午11:54:21
*/
public synchronized static String encodeBase64(String password){
if(StringUtils.isBlank(password)) {
return "";
}
Base64 base64 = new Base64();
return base64.encodeToString(password.getBytes());
}
/**
* Base64解密
* @author lance
* 2015年12月30日 下午11:54:21
*/
public synchronized static String decodeBase64(String password){
if(StringUtils.isBlank(password)) {
return "";
}
Base64 base64 = new Base64();
byte[] b = base64.decode(password.getBytes());
return new String(b);
}
/**
* 采用MD5加密
* @param password
* @author lance
* 2015年12月31日 上午12:24:12
*/
public synchronized static String MD5(String password){
return MD5(password, null);
}
/**
* 采用MD5和手机号码部分加密
* @param password
* @author lance
* 2015年12月31日 上午12:24:12
*/
public synchronized static String MD5(String password, String salt){
Charset utf8 = Charset.forName("UTF-8");
if(StringUtils.isNotBlank(salt)) {
password = salt+password+salt;
}
try {
MessageDigest md5 = MessageDigest.getInstance("MD5");
md5.update(password.getBytes(utf8));
byte[] b = md5.digest();
StringBuilder builder = new StringBuilder();
for (int i=0; i<b.length; i++) {
String hex = Integer.toHexString(0xff & b[i]);
if(hex.length()==1) builder.append('0');
builder.append(hex);
}
return builder.toString();
} catch (NoSuchAlgorithmException e) {
e.printStackTrace();
}
return null;
}
} | {
"pile_set_name": "Github"
} |
import "babel-polyfill"
import * as bodyParser from "body-parser"
import chalk from "chalk"
import * as express from "express"
import * as xhub from "express-x-hub"
import { startApp } from "./listen"
import {
DATABASE_JSON_FILE,
MONGODB_URI,
PAPERTRAIL_URL,
PERIL_WEBHOOK_SECRET,
PUBLIC_API_ROOT_URL,
PUBLIC_FACING_API,
PUBLIC_WEB_ROOT_URL,
SENTRY_DSN,
validateENVForPerilServer,
WEB_CONCURRENCY,
} from "./globals"
import { init } from "@sentry/node"
import { URL } from "url"
import { setupPublicAPI } from "./api/api"
import logger from "./logger"
import { githubRouter } from "./routing/router"
import { startTaskScheduler } from "./tasks/startTaskScheduler"
const welcomeMessages = [] as string[]
export const tick = chalk.bold.greenBright("✓")
export const cross = chalk.bold.redBright("ⅹ")
export const peril = () => {
validateENVForPerilServer()
// Error logging
process.on("unhandledRejection", (reason: string, _: any) => {
logger.error("UnhandledRejection Error: ", reason)
throw reason
})
const app = express()
app.set("port", process.env.PORT || 5000)
app.use(xhub({ algorith: "sha1", secret: PERIL_WEBHOOK_SECRET }))
app.use(bodyParser.json())
app.use(express.static("public"))
app.post("/webhook", githubRouter)
welcomeMessages.push("☢️ Starting up Peril")
const paperTrail = PAPERTRAIL_URL ? tick : cross
welcomeMessages.push(paperTrail + " Papertrail")
const clustering = WEB_CONCURRENCY ? tick : cross
welcomeMessages.push(clustering + " Clustering")
if (MONGODB_URI) {
const uri = new URL(MONGODB_URI)
welcomeMessages.push(tick + ` Mongo at ${uri.host}`)
} else {
welcomeMessages.push(tick + ` JSON Db at ${DATABASE_JSON_FILE}`)
}
if (SENTRY_DSN) {
// Set up Sentry first
init({ dsn: SENTRY_DSN })
welcomeMessages.push(tick + ` Sentry`)
}
if (MONGODB_URI) {
welcomeMessages.push(tick + " Task Scheduler")
startTaskScheduler()
}
const port = process.env.PORT || 5000
// This should go last
if (PUBLIC_FACING_API) {
welcomeMessages.push(tick + " Public API:")
welcomeMessages.push(` - Web Root: ${PUBLIC_WEB_ROOT_URL}`)
welcomeMessages.push(` - API Root: ${PUBLIC_API_ROOT_URL}`)
welcomeMessages.push(` - GraphQL : http://localhost:${port}/api/graphql`)
welcomeMessages.push(` - GraphiQL: http://localhost:${port}/api/graphiql`)
setupPublicAPI(app)
}
// Start server
startApp(app, () => {
if (!process.env.HEROKU || !process.env.NOW) {
welcomeMessages.push(tick + " Server:")
welcomeMessages.push(` - Local: http://localhost:${port}`)
}
welcomeMessages.forEach(l => logger.info(l))
})
}
| {
"pile_set_name": "Github"
} |
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<!-- NewPage -->
<html lang="en">
<head>
<!-- Generated by javadoc (1.8.0_144) on Wed Sep 06 08:23:33 PDT 2017 -->
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
<title>javax.faces.bean Class Hierarchy (Java(TM) EE 8 Specification APIs)</title>
<meta name="date" content="2017-09-06">
<link rel="stylesheet" type="text/css" href="../../../stylesheet.css" title="Style">
<script type="text/javascript" src="../../../script.js"></script>
</head>
<body>
<script type="text/javascript"><!--
try {
if (location.href.indexOf('is-external=true') == -1) {
parent.document.title="javax.faces.bean Class Hierarchy (Java(TM) EE 8 Specification APIs)";
}
}
catch(err) {
}
//-->
</script>
<noscript>
<div>JavaScript is disabled on your browser.</div>
</noscript>
<!-- ========= START OF TOP NAVBAR ======= -->
<div class="topNav"><a name="navbar.top">
<!-- -->
</a>
<div class="skipNav"><a href="#skip.navbar.top" title="Skip navigation links">Skip navigation links</a></div>
<a name="navbar.top.firstrow">
<!-- -->
</a>
<ul class="navList" title="Navigation">
<li><a href="../../../overview-summary.html">Overview</a></li>
<li><a href="package-summary.html">Package</a></li>
<li>Class</li>
<li>Use</li>
<li class="navBarCell1Rev">Tree</li>
<li><a href="../../../deprecated-list.html">Deprecated</a></li>
<li><a href="../../../index-all.html">Index</a></li>
<li><a href="../../../help-doc.html">Help</a></li>
</ul>
</div>
<div class="subNav">
<ul class="navList">
<li><a href="../../../javax/faces/application/package-tree.html">Prev</a></li>
<li><a href="../../../javax/faces/component/package-tree.html">Next</a></li>
</ul>
<ul class="navList">
<li><a href="../../../index.html?javax/faces/bean/package-tree.html" target="_top">Frames</a></li>
<li><a href="package-tree.html" target="_top">No Frames</a></li>
</ul>
<ul class="navList" id="allclasses_navbar_top">
<li><a href="../../../allclasses-noframe.html">All Classes</a></li>
</ul>
<div>
<script type="text/javascript"><!--
allClassesLink = document.getElementById("allclasses_navbar_top");
if(window==top) {
allClassesLink.style.display = "block";
}
else {
allClassesLink.style.display = "none";
}
//-->
</script>
</div>
<a name="skip.navbar.top">
<!-- -->
</a></div>
<!-- ========= END OF TOP NAVBAR ========= -->
<div class="header">
<h1 class="title">Hierarchy For Package javax.faces.bean</h1>
<span class="packageHierarchyLabel">Package Hierarchies:</span>
<ul class="horizontal">
<li><a href="../../../overview-tree.html">All Packages</a></li>
</ul>
</div>
<div class="contentContainer">
<h2 title="Annotation Type Hierarchy">Annotation Type Hierarchy</h2>
<ul>
<li type="circle">javax.faces.bean.<a href="../../../javax/faces/bean/ViewScoped.html" title="annotation in javax.faces.bean"><span class="typeNameLink">ViewScoped</span></a> (implements java.lang.annotation.<a href="http://docs.oracle.com/javase/8/docs/api/java/lang/annotation/Annotation.html?is-external=true" title="class or interface in java.lang.annotation">Annotation</a>)</li>
<li type="circle">javax.faces.bean.<a href="../../../javax/faces/bean/SessionScoped.html" title="annotation in javax.faces.bean"><span class="typeNameLink">SessionScoped</span></a> (implements java.lang.annotation.<a href="http://docs.oracle.com/javase/8/docs/api/java/lang/annotation/Annotation.html?is-external=true" title="class or interface in java.lang.annotation">Annotation</a>)</li>
<li type="circle">javax.faces.bean.<a href="../../../javax/faces/bean/RequestScoped.html" title="annotation in javax.faces.bean"><span class="typeNameLink">RequestScoped</span></a> (implements java.lang.annotation.<a href="http://docs.oracle.com/javase/8/docs/api/java/lang/annotation/Annotation.html?is-external=true" title="class or interface in java.lang.annotation">Annotation</a>)</li>
<li type="circle">javax.faces.bean.<a href="../../../javax/faces/bean/ReferencedBean.html" title="annotation in javax.faces.bean"><span class="typeNameLink">ReferencedBean</span></a> (implements java.lang.annotation.<a href="http://docs.oracle.com/javase/8/docs/api/java/lang/annotation/Annotation.html?is-external=true" title="class or interface in java.lang.annotation">Annotation</a>)</li>
<li type="circle">javax.faces.bean.<a href="../../../javax/faces/bean/NoneScoped.html" title="annotation in javax.faces.bean"><span class="typeNameLink">NoneScoped</span></a> (implements java.lang.annotation.<a href="http://docs.oracle.com/javase/8/docs/api/java/lang/annotation/Annotation.html?is-external=true" title="class or interface in java.lang.annotation">Annotation</a>)</li>
<li type="circle">javax.faces.bean.<a href="../../../javax/faces/bean/ManagedProperty.html" title="annotation in javax.faces.bean"><span class="typeNameLink">ManagedProperty</span></a> (implements java.lang.annotation.<a href="http://docs.oracle.com/javase/8/docs/api/java/lang/annotation/Annotation.html?is-external=true" title="class or interface in java.lang.annotation">Annotation</a>)</li>
<li type="circle">javax.faces.bean.<a href="../../../javax/faces/bean/ManagedBean.html" title="annotation in javax.faces.bean"><span class="typeNameLink">ManagedBean</span></a> (implements java.lang.annotation.<a href="http://docs.oracle.com/javase/8/docs/api/java/lang/annotation/Annotation.html?is-external=true" title="class or interface in java.lang.annotation">Annotation</a>)</li>
<li type="circle">javax.faces.bean.<a href="../../../javax/faces/bean/CustomScoped.html" title="annotation in javax.faces.bean"><span class="typeNameLink">CustomScoped</span></a> (implements java.lang.annotation.<a href="http://docs.oracle.com/javase/8/docs/api/java/lang/annotation/Annotation.html?is-external=true" title="class or interface in java.lang.annotation">Annotation</a>)</li>
<li type="circle">javax.faces.bean.<a href="../../../javax/faces/bean/ApplicationScoped.html" title="annotation in javax.faces.bean"><span class="typeNameLink">ApplicationScoped</span></a> (implements java.lang.annotation.<a href="http://docs.oracle.com/javase/8/docs/api/java/lang/annotation/Annotation.html?is-external=true" title="class or interface in java.lang.annotation">Annotation</a>)</li>
</ul>
</div>
<!-- ======= START OF BOTTOM NAVBAR ====== -->
<div class="bottomNav"><a name="navbar.bottom">
<!-- -->
</a>
<div class="skipNav"><a href="#skip.navbar.bottom" title="Skip navigation links">Skip navigation links</a></div>
<a name="navbar.bottom.firstrow">
<!-- -->
</a>
<ul class="navList" title="Navigation">
<li><a href="../../../overview-summary.html">Overview</a></li>
<li><a href="package-summary.html">Package</a></li>
<li>Class</li>
<li>Use</li>
<li class="navBarCell1Rev">Tree</li>
<li><a href="../../../deprecated-list.html">Deprecated</a></li>
<li><a href="../../../index-all.html">Index</a></li>
<li><a href="../../../help-doc.html">Help</a></li>
</ul>
</div>
<div class="subNav">
<ul class="navList">
<li><a href="../../../javax/faces/application/package-tree.html">Prev</a></li>
<li><a href="../../../javax/faces/component/package-tree.html">Next</a></li>
</ul>
<ul class="navList">
<li><a href="../../../index.html?javax/faces/bean/package-tree.html" target="_top">Frames</a></li>
<li><a href="package-tree.html" target="_top">No Frames</a></li>
</ul>
<ul class="navList" id="allclasses_navbar_bottom">
<li><a href="../../../allclasses-noframe.html">All Classes</a></li>
</ul>
<div>
<script type="text/javascript"><!--
allClassesLink = document.getElementById("allclasses_navbar_bottom");
if(window==top) {
allClassesLink.style.display = "block";
}
else {
allClassesLink.style.display = "none";
}
//-->
</script>
</div>
<a name="skip.navbar.bottom">
<!-- -->
</a></div>
<!-- ======== END OF BOTTOM NAVBAR ======= -->
<p class="legalCopy"><small>Copyright © 1996-2017, <a href="http://www.oracle.com">Oracle</a> and/or its affiliates. All Rights Reserved. Use is subject to <a href="../../../doc-files/speclicense.html" target="_top">license terms</a>.</small></p>
</body>
</html>
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="utf-8"?>
<configuration>
<packageSources>
<add key="nuget" value="https://api.nuget.org/v3/index.json" />
<add key="butterfly-apm-vnext" value="https://www.myget.org/F/butterfly-apm/api/v3/index.json" />
</packageSources>
</configuration> | {
"pile_set_name": "Github"
} |
<Language Name="English" IsDefault="true" IsRightToLeft="false" LanguageCulture="en-US" FlagImageFileName="us.png">
<LocaleResource Name="AdminEmail">
<Value>Admin user email</Value>
</LocaleResource>
<LocaleResource Name="AdminPassword">
<Value>Admin user password</Value>
</LocaleResource>
<LocaleResource Name="ConfigureDirectoryPermissions">
<Value>The '{0}' account is not granted with Modify permission on folder '{1}'. Please configure these permissions.</Value>
</LocaleResource>
<LocaleResource Name="ConfigureFilePermissions">
<Value>The '{0}' account is not granted with Modify permission on file '{1}'. Please configure these permissions.</Value>
</LocaleResource>
<LocaleResource Name="ConfirmPassword">
<Value>Confirm the password</Value>
</LocaleResource>
<LocaleResource Name="ConnectionString">
<Value>Connection string</Value>
</LocaleResource>
<LocaleResource Name="ConnectionStringValues">
<Value>Enter SQL connection values</Value>
</LocaleResource>
<LocaleResource Name="ConnectionStringWrongFormat">
<Value>Wrong SQL connection string format</Value>
</LocaleResource>
<LocaleResource Name="CreateDatabaseIfDoesNotExist">
<Value>Create database if it doesn't exist</Value>
</LocaleResource>
<LocaleResource Name="CreateSampleData">
<Value>Create sample data</Value>
</LocaleResource>
<LocaleResource Name="CustomCollation">
<Value>Specify custom SQL Server collation</Value>
</LocaleResource>
<LocaleResource Name="DatabaseCreationError">
<Value>An error occurred while creating the database: {0}</Value>
</LocaleResource>
<LocaleResource Name="DatabaseInformation">
<Value>Database information</Value>
</LocaleResource>
<LocaleResource Name="DatabaseName">
<Value>Database name</Value>
</LocaleResource>
<LocaleResource Name="DatabaseNotExists">
<Value>Database does not exist or you don't have permissions to connect to it</Value>
</LocaleResource>
<LocaleResource Name="Example">
<Value>Example</Value>
</LocaleResource>
<LocaleResource Name="Install">
<Value>Install</Value>
</LocaleResource>
<LocaleResource Name="Installing">
<Value>Installing Smartstore...</Value>
</LocaleResource>
<LocaleResource Name="PasswordsDoNotMatch">
<Value>The passwords do not match</Value>
</LocaleResource>
<LocaleResource Name="RawConnectionString">
<Value>Enter raw connection string (advanced)</Value>
</LocaleResource>
<LocaleResource Name="Recommended">
<Value>[Recommended]</Value>
</LocaleResource>
<LocaleResource Name="RestartInstallation">
<Value>Restart installation</Value>
</LocaleResource>
<LocaleResource Name="RestartInstallationTooltip">
<Value>Click to restart the installation process (clear all cached values)</Value>
</LocaleResource>
<LocaleResource Name="SetupFailed">
<Value>Setup failed: {0}</Value>
</LocaleResource>
<LocaleResource Name="SqlAuthentication">
<Value>Use SQL Server account</Value>
</LocaleResource>
<LocaleResource Name="SqlCompact">
<Value>SQL Server Compact (not suitable in production mode)</Value>
</LocaleResource>
<LocaleResource Name="SqlServerName">
<Value>SQL Server name</Value>
</LocaleResource>
<LocaleResource Name="SqlServerPassword">
<Value>SQL Password</Value>
</LocaleResource>
<LocaleResource Name="SqlServerUsername">
<Value>SQL Username</Value>
</LocaleResource>
<LocaleResource Name="SqlStandard">
<Value>Use SQL Server (or SQL Express) database</Value>
</LocaleResource>
<LocaleResource Name="StoreInformation">
<Value>Store information</Value>
</LocaleResource>
<LocaleResource Name="Title">
<Value>Installation</Value>
</LocaleResource>
<LocaleResource Name="Tooltip1">
<Value>Smartstore is the leading ASP.NET online shop e-commerce solution. This wizard will guide you through the process of configuring Smartstore</Value>
</LocaleResource>
<LocaleResource Name="Tooltip2">
<Value>To complete this wizard, you must know some information regarding your database server ("connection string"). Please contact your ISP if necessary. If you're installing on a local machine or server, you might need information from your System Admin</Value>
</LocaleResource>
<LocaleResource Name="WindowsAuthentication">
<Value>Use integrated Windows authentication</Value>
</LocaleResource>
<LocaleResource Name="PrimaryLanguage">
<Value>Primary language</Value>
</LocaleResource>
<LocaleResource Name="PrimaryLanguageRequired">
<Value>Primary language is required</Value>
</LocaleResource>
<LocaleResource Name="ConfirmInstall">
<Value>Do you want to install Smartstore now?</Value>
</LocaleResource>
<LocaleResource Name="AdminEmailValue">
<Value>[email protected]</Value>
</LocaleResource>
<LocaleResource Name="MediaStorage.Label">
<Value>Media storage</Value>
</LocaleResource>
<LocaleResource Name="MediaStorage.Hint">
<Value>Select 'File system', if your database does not provide enough disk space (< 500 MB). Otherwise, 'Database' is recommended.</Value>
</LocaleResource>
<LocaleResource Name="MediaStorage.DB">
<Value>Database</Value>
</LocaleResource>
<LocaleResource Name="MediaStorage.FS">
<Value>File system</Value>
</LocaleResource>
<LocaleResource Name="Common.Notification">
<Value>Alert</Value>
</LocaleResource>
<LocaleResource Name="Common.StartShop">
<Value>Yippieee! The installation was completed successfully ;-) Your shop is being started now...</Value>
</LocaleResource>
<LocaleResource Name="Progress.CheckingRequirements">
<Value>Checking requirements</Value>
</LocaleResource>
<LocaleResource Name="Progress.BuildingDatabase">
<Value>Building database</Value>
</LocaleResource>
<LocaleResource Name="Progress.InstallingPlugins">
<Value>Installing plugin {0} of {1}</Value>
</LocaleResource>
<LocaleResource Name="Progress.ProcessingMedia">
<Value>Processing media files</Value>
</LocaleResource>
<LocaleResource Name="Progress.Finalizing">
<Value>Finalizing installation</Value>
</LocaleResource>
<LocaleResource Name="Progress.CreatingRequiredData">
<Value>Creating required data</Value>
</LocaleResource>
<LocaleResource Name="Progress.CreatingSampleData">
<Value>Creating sample data</Value>
</LocaleResource>
<LocaleResource Name="InstallOptions">
<Value>Options</Value>
</LocaleResource>
<LocaleResource Name="Database">
<Value>Database</Value>
</LocaleResource>
<LocaleResource Name="Connection">
<Value>Connection</Value>
</LocaleResource>
<LocaleResource Name="Authentication">
<Value>Authentication</Value>
</LocaleResource>
</Language> | {
"pile_set_name": "Github"
} |
# frozen_string_literal: true
namespace :active_storage do
ID_PARTITION_LIMIT = 1_000_000_000
DIGEST = OpenSSL::Digest.const_get('SHA1').new
desc 'Copy all files from Paperclip to ActiveStorage, only same storage types'
task :migrate_files, [:before] => :environment do |_, _args|
if ENV['PAPERCLIP_STORAGE'] == 'filesystem' || ENV['ACTIVESTORAGE_SERVICE'] == 'local'
ActiveStorage::Blob.find_each do |blob|
src_path = Rails.root.join('public', 'system', blob.key)
next unless src_path.exist?
blob.transaction do
blob.key = ActiveStorage::Blob.generate_unique_secure_token
dst_path = ActiveStorage::Blob.service.path_for(blob.key)
puts "Moving #{src_path} to #{dst_path}"
FileUtils.mkdir_p(File.dirname(dst_path))
FileUtils.mv(src_path, dst_path)
blob.save!
end
end
puts 'Finished'
elsif ENV['PAPERCLIP_STORAGE'] == 's3' || ENV['ACTIVESTORAGE_SERVICE'] == 'amazon'
S3_BUCKET = Aws::S3::Resource.new.bucket(ENV['S3_BUCKET'])
ActiveStorage::Blob.find_each do |blob|
next unless blob.key.match?(%r{assets|experiments|temp_files|tiny_mce_assets|users|zip_exports\/})
src_path = ENV['S3_SUBFOLDER'] ? File.join(ENV['S3_SUBFOLDER'], blob.key) : blob.key
src_obj = S3_BUCKET.object(src_path)
next unless src_obj.exists?
blob.transaction do
blob.key = ActiveStorage::Blob.generate_unique_secure_token
dst_path = ENV['S3_SUBFOLDER'] ? File.join(ENV['S3_SUBFOLDER'], blob.key) : blob.key
puts "Moving #{src_path} to #{dst_path}"
src_obj.move_to(bucket: S3_BUCKET.name, key: dst_path)
blob.save!
end
rescue StandardError => e
puts 'Caught exception copying object ' + src_path + ':'
puts e.message
end
puts 'Finished'
end
end
end
| {
"pile_set_name": "Github"
} |
package org.jenkinsci.plugins.dockerbuildstep.action;
import com.github.dockerjava.api.DockerClient;
import com.github.dockerjava.api.command.AttachContainerCmd;
import com.github.dockerjava.api.model.Frame;
import com.github.dockerjava.core.command.AttachContainerResultCallback;
import com.google.common.base.Charsets;
import com.jcraft.jzlib.GZIPInputStream;
import hudson.console.AnnotatedLargeText;
import hudson.model.*;
import hudson.security.ACL;
import hudson.security.Permission;
import jenkins.model.Jenkins;
import org.apache.commons.io.IOUtils;
import org.apache.commons.jelly.XMLOutput;
import org.jenkinsci.plugins.dockerbuildstep.DockerBuilder;
import org.jenkinsci.plugins.dockerbuildstep.log.container.DockerLogStreamReader;
import java.io.*;
/**
* Jenkins action to add a 'Console Output' like page for the Docker container output. Container output is gathered
* using the {@link AttachContainerCmd}.
*/
public class DockerContainerConsoleAction extends TaskAction implements Serializable {
private static final long serialVersionUID = 1L;
private final AbstractBuild<?, ?> build;
private final String containerId;
private String containerName;
public DockerContainerConsoleAction(AbstractBuild<?, ?> build, String containerId) {
super();
this.build = build;
this.containerId = containerId;
}
public String getIconFileName() {
return Jenkins.RESOURCE_PATH + "/plugin/docker-build-step/icons/docker-icon-20x20.png";
}
public String getDisplayName() {
if (containerName != null && !isSingleContainerBuild()) {
return (containerName.startsWith("/") ? containerName.substring(1) : containerName) + " Output";
}
return "Container Output";
}
private boolean isSingleContainerBuild() {
return build.getActions(DockerContainerConsoleAction.class).size() == 1;
}
public String getFullDisplayName() {
return build.getFullDisplayName() + ' ' + getDisplayName();
}
public String getUrlName() {
return "dockerconsole_" + containerId;
}
public AbstractBuild<?, ?> getOwner() {
return this.build;
}
@Override
protected Permission getPermission() {
return Item.READ;
}
@Override
protected ACL getACL() {
return build.getACL();
}
public String getBuildStatusUrl() {
return build.getIconColor().getImage();
}
public void setContainerName(String containerName) {
this.containerName = containerName;
}
public File getLogFile() {
return new File(build.getRootDir(), "docker_" + containerId + ".log");
}
@Override
public AnnotatedLargeText obtainLog() {
return new AnnotatedLargeText(getLogFile(), Charsets.UTF_8, !isLogUpdated(), this);
}
public boolean isLogUpdated() {
return (workerThread != null) && build.isLogUpdated();
}
public InputStream getLogInputStream() throws IOException {
File logFile = getLogFile();
if (logFile != null && logFile.exists()) {
// Checking if a ".gz" file was return
FileInputStream fis = new FileInputStream(logFile);
if (logFile.getName().endsWith(".gz")) {
return new GZIPInputStream(fis);
} else {
return fis;
}
}
String message = "No such file: " + logFile;
return new ByteArrayInputStream(message.getBytes(Charsets.UTF_8));
}
public void writeLogTo(long offset, XMLOutput out) throws IOException {
try {
obtainLog().writeHtmlTo(offset, out.asWriter());
} catch (IOException e) {
// try to fall back to the old getLogInputStream()
// mainly to support .gz compressed files
// In this case, console annotation handling will be turned off.
InputStream input = getLogInputStream();
try {
IOUtils.copy(input, out.asWriter());
} finally {
IOUtils.closeQuietly(input);
}
}
}
public DockerContainerConsoleAction start() throws IOException {
workerThread = new DockerLogWorkerThread(getLogFile());
workerThread.start();
return this;
}
public void stop() {
workerThread.interrupt();
workerThread = null;
}
public final class DockerLogWorkerThread extends TaskThread {
protected DockerLogWorkerThread(File logFile) throws IOException {
super(DockerContainerConsoleAction.this, ListenerAndText
.forFile(logFile, DockerContainerConsoleAction.this));
}
@Override
protected void perform(final TaskListener listener) throws Exception {
DockerLogStreamReader reader = null;
OutputStreamWriter writer = null;
try {
writer = new OutputStreamWriter(listener.getLogger(), Charsets.UTF_8);
final OutputStreamWriter finalWriter = writer;
AttachContainerResultCallback callback = new AttachContainerResultCallback() {
@Override
public void onNext(Frame item) {
try {
finalWriter.append(item.toString());
finalWriter.flush();
} catch (IOException e) {
e.printStackTrace();
}
super.onNext(item);
}
@Override
public void onError(Throwable throwable) {
try {
finalWriter.append(throwable.getMessage());
finalWriter.flush();
} catch (IOException e) {
e.printStackTrace();
}
super.onError(throwable);
}
};
DockerClient client = ((DockerBuilder.DescriptorImpl) Jenkins.getInstance().getDescriptor(
DockerBuilder.class)).getDockerClient(build, null);
client.attachContainerCmd(containerId).withFollowStream(true).withStdOut(true).withStdErr(true).exec(callback).awaitCompletion();
} finally {
if (writer != null) {
writer.close();
}
if (reader != null) {
reader.close();
}
workerThread = null;
}
}
}
}
| {
"pile_set_name": "Github"
} |
namespace FirClient.Handler
{
class RetUserInfoHandler : BaseHandler
{
public override void OnMessage(byte[] bytes)
{
}
}
}
| {
"pile_set_name": "Github"
} |
# load the gem
require 'petstore'
# The following was generated by the `rspec --init` command. Conventionally, all
# specs live under a `spec` directory, which RSpec adds to the `$LOAD_PATH`.
# The generated `.rspec` file contains `--require spec_helper` which will cause
# this file to always be loaded, without a need to explicitly require it in any
# files.
#
# Given that it is always loaded, you are encouraged to keep this file as
# light-weight as possible. Requiring heavyweight dependencies from this file
# will add to the boot time of your test suite on EVERY test run, even for an
# individual file that may not need all of that loaded. Instead, consider making
# a separate helper file that requires the additional dependencies and performs
# the additional setup, and require it from the spec files that actually need
# it.
#
# The `.rspec` file also contains a few flags that are not defaults but that
# users commonly want.
#
# See http://rubydoc.info/gems/rspec-core/RSpec/Core/Configuration
RSpec.configure do |config|
# rspec-expectations config goes here. You can use an alternate
# assertion/expectation library such as wrong or the stdlib/minitest
# assertions if you prefer.
config.expect_with :rspec do |expectations|
# This option will default to `true` in RSpec 4. It makes the `description`
# and `failure_message` of custom matchers include text for helper methods
# defined using `chain`, e.g.:
# be_bigger_than(2).and_smaller_than(4).description
# # => "be bigger than 2 and smaller than 4"
# ...rather than:
# # => "be bigger than 2"
expectations.include_chain_clauses_in_custom_matcher_descriptions = true
end
# rspec-mocks config goes here. You can use an alternate test double
# library (such as bogus or mocha) by changing the `mock_with` option here.
config.mock_with :rspec do |mocks|
# Prevents you from mocking or stubbing a method that does not exist on
# a real object. This is generally recommended, and will default to
# `true` in RSpec 4.
mocks.verify_partial_doubles = true
end
# The settings below are suggested to provide a good initial experience
# with RSpec, but feel free to customize to your heart's content.
=begin
# These two settings work together to allow you to limit a spec run
# to individual examples or groups you care about by tagging them with
# `:focus` metadata. When nothing is tagged with `:focus`, all examples
# get run.
config.filter_run :focus
config.run_all_when_everything_filtered = true
# Allows RSpec to persist some state between runs in order to support
# the `--only-failures` and `--next-failure` CLI options. We recommend
# you configure your source control system to ignore this file.
config.example_status_persistence_file_path = "spec/examples.txt"
# Limits the available syntax to the non-monkey patched syntax that is
# recommended. For more details, see:
# - http://rspec.info/blog/2012/06/rspecs-new-expectation-syntax/
# - http://www.teaisaweso.me/blog/2013/05/27/rspecs-new-message-expectation-syntax/
# - http://rspec.info/blog/2014/05/notable-changes-in-rspec-3/#zero-monkey-patching-mode
config.disable_monkey_patching!
# This setting enables warnings. It's recommended, but in some cases may
# be too noisy due to issues in dependencies.
config.warnings = true
# Many RSpec users commonly either run the entire suite or an individual
# file, and it's useful to allow more verbose output when running an
# individual spec file.
if config.files_to_run.one?
# Use the documentation formatter for detailed output,
# unless a formatter has already been configured
# (e.g. via a command-line flag).
config.default_formatter = 'doc'
end
# Print the 10 slowest examples and example groups at the
# end of the spec run, to help surface which specs are running
# particularly slow.
config.profile_examples = 10
# Run specs in random order to surface order dependencies. If you find an
# order dependency and want to debug it, you can fix the order by providing
# the seed, which is printed after each run.
# --seed 1234
config.order = :random
# Seed global randomization in this process using the `--seed` CLI option.
# Setting this allows you to use `--seed` to deterministically reproduce
# test failures related to randomization by passing the same `--seed` value
# as the one that triggered the failure.
Kernel.srand config.seed
=end
end
# API client (shared between all the test cases)
API_CLIENT = Petstore::ApiClient.new(Petstore::Configuration.new)
# randomly generate an ID
def random_id
rand(1000000) + 20000
end
# create a random pet, return its id
def prepare_pet(pet_api)
pet_id = random_id
category = Petstore::Category.new('id' => 20002, 'name' => 'category test')
tag = Petstore::Tag.new('id' => 30002, 'name' => 'tag test')
pet = Petstore::Pet.new('id' => pet_id, 'name' => "RUBY UNIT TESTING", 'photo_urls' => 'photo url',
'category' => category, 'tags' => [tag], 'status' => 'pending')
pet_api.add_pet(pet)
return pet_id
end
# create a random order, return its id
def prepare_store(store_api)
order_id = 5
order = Petstore::Order.new("id" => order_id,
"petId" => 123,
"quantity" => 789,
"shipDate" => "2015-04-06T23:42:01.678Z",
"status" => "placed",
"complete" => false)
store_api.place_order(order)
return order_id
end
# A random string to tack onto stuff to ensure we're not seeing
# data from a previous test run
RAND = ("a".."z").to_a.sample(8).join
# helper method to serialize object to json string
def serialize_json(o)
API_CLIENT.object_to_http_body(o)
end
# helper method to deserialize json string back to object
def deserialize_json(s, type)
headers = {'Content-Type' => 'application/json'}
response = double('response', headers: headers, body: s)
API_CLIENT.deserialize(response, type)
end
| {
"pile_set_name": "Github"
} |
/*
* Copyright (C) 2009 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*------------------------------------------------------------------------------
Table of contents
1. Include headers
2. Module defines
3. Data types
4. Function prototypes
------------------------------------------------------------------------------*/
#ifndef H264SWDEC_CAVLC_H
#define H264SWDEC_CAVLC_H
/*------------------------------------------------------------------------------
1. Include headers
------------------------------------------------------------------------------*/
#include "basetype.h"
#include "h264bsd_stream.h"
/*------------------------------------------------------------------------------
2. Module defines
------------------------------------------------------------------------------*/
/*------------------------------------------------------------------------------
3. Data types
------------------------------------------------------------------------------*/
/*------------------------------------------------------------------------------
4. Function prototypes
------------------------------------------------------------------------------*/
u32 h264bsdDecodeResidualBlockCavlc(
strmData_t *pStrmData,
i32 *coeffLevel,
i32 nc,
u32 maxNumCoeff);
#endif /* #ifdef H264SWDEC_CAVLC_H */
| {
"pile_set_name": "Github"
} |
package pflag
import (
"fmt"
"strconv"
"strings"
)
// -- uintSlice Value
type uintSliceValue struct {
value *[]uint
changed bool
}
func newUintSliceValue(val []uint, p *[]uint) *uintSliceValue {
uisv := new(uintSliceValue)
uisv.value = p
*uisv.value = val
return uisv
}
func (s *uintSliceValue) Set(val string) error {
ss := strings.Split(val, ",")
out := make([]uint, len(ss))
for i, d := range ss {
u, err := strconv.ParseUint(d, 10, 0)
if err != nil {
return err
}
out[i] = uint(u)
}
if !s.changed {
*s.value = out
} else {
*s.value = append(*s.value, out...)
}
s.changed = true
return nil
}
func (s *uintSliceValue) Type() string {
return "uintSlice"
}
func (s *uintSliceValue) String() string {
out := make([]string, len(*s.value))
for i, d := range *s.value {
out[i] = fmt.Sprintf("%d", d)
}
return "[" + strings.Join(out, ",") + "]"
}
func (s *uintSliceValue) fromString(val string) (uint, error) {
t, err := strconv.ParseUint(val, 10, 0)
if err != nil {
return 0, err
}
return uint(t), nil
}
func (s *uintSliceValue) toString(val uint) string {
return fmt.Sprintf("%d", val)
}
func (s *uintSliceValue) Append(val string) error {
i, err := s.fromString(val)
if err != nil {
return err
}
*s.value = append(*s.value, i)
return nil
}
func (s *uintSliceValue) Replace(val []string) error {
out := make([]uint, len(val))
for i, d := range val {
var err error
out[i], err = s.fromString(d)
if err != nil {
return err
}
}
*s.value = out
return nil
}
func (s *uintSliceValue) GetSlice() []string {
out := make([]string, len(*s.value))
for i, d := range *s.value {
out[i] = s.toString(d)
}
return out
}
func uintSliceConv(val string) (interface{}, error) {
val = strings.Trim(val, "[]")
// Empty string would cause a slice with one (empty) entry
if len(val) == 0 {
return []uint{}, nil
}
ss := strings.Split(val, ",")
out := make([]uint, len(ss))
for i, d := range ss {
u, err := strconv.ParseUint(d, 10, 0)
if err != nil {
return nil, err
}
out[i] = uint(u)
}
return out, nil
}
// GetUintSlice returns the []uint value of a flag with the given name.
func (f *FlagSet) GetUintSlice(name string) ([]uint, error) {
val, err := f.getFlagType(name, "uintSlice", uintSliceConv)
if err != nil {
return []uint{}, err
}
return val.([]uint), nil
}
// UintSliceVar defines a uintSlice flag with specified name, default value, and usage string.
// The argument p points to a []uint variable in which to store the value of the flag.
func (f *FlagSet) UintSliceVar(p *[]uint, name string, value []uint, usage string) {
f.VarP(newUintSliceValue(value, p), name, "", usage)
}
// UintSliceVarP is like UintSliceVar, but accepts a shorthand letter that can be used after a single dash.
func (f *FlagSet) UintSliceVarP(p *[]uint, name, shorthand string, value []uint, usage string) {
f.VarP(newUintSliceValue(value, p), name, shorthand, usage)
}
// UintSliceVar defines a uint[] flag with specified name, default value, and usage string.
// The argument p points to a uint[] variable in which to store the value of the flag.
func UintSliceVar(p *[]uint, name string, value []uint, usage string) {
CommandLine.VarP(newUintSliceValue(value, p), name, "", usage)
}
// UintSliceVarP is like the UintSliceVar, but accepts a shorthand letter that can be used after a single dash.
func UintSliceVarP(p *[]uint, name, shorthand string, value []uint, usage string) {
CommandLine.VarP(newUintSliceValue(value, p), name, shorthand, usage)
}
// UintSlice defines a []uint flag with specified name, default value, and usage string.
// The return value is the address of a []uint variable that stores the value of the flag.
func (f *FlagSet) UintSlice(name string, value []uint, usage string) *[]uint {
p := []uint{}
f.UintSliceVarP(&p, name, "", value, usage)
return &p
}
// UintSliceP is like UintSlice, but accepts a shorthand letter that can be used after a single dash.
func (f *FlagSet) UintSliceP(name, shorthand string, value []uint, usage string) *[]uint {
p := []uint{}
f.UintSliceVarP(&p, name, shorthand, value, usage)
return &p
}
// UintSlice defines a []uint flag with specified name, default value, and usage string.
// The return value is the address of a []uint variable that stores the value of the flag.
func UintSlice(name string, value []uint, usage string) *[]uint {
return CommandLine.UintSliceP(name, "", value, usage)
}
// UintSliceP is like UintSlice, but accepts a shorthand letter that can be used after a single dash.
func UintSliceP(name, shorthand string, value []uint, usage string) *[]uint {
return CommandLine.UintSliceP(name, shorthand, value, usage)
}
| {
"pile_set_name": "Github"
} |
fileFormatVersion: 2
guid: c863f825ccd144909f26979b6d089ce9
MonoImporter:
externalObjects: {}
serializedVersion: 2
defaultReferences: []
executionOrder: 0
icon: {fileID: 2800000, guid: 8ac5213854cf4dbabd140decf8df1946, type: 3}
userData:
assetBundleName:
assetBundleVariant:
| {
"pile_set_name": "Github"
} |
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: github.com/openshift/api/route/v1/generated.proto
package v1
import (
fmt "fmt"
io "io"
proto "github.com/gogo/protobuf/proto"
k8s_io_api_core_v1 "k8s.io/api/core/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
math "math"
math_bits "math/bits"
reflect "reflect"
strings "strings"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
func (m *Route) Reset() { *m = Route{} }
func (*Route) ProtoMessage() {}
func (*Route) Descriptor() ([]byte, []int) {
return fileDescriptor_373b8fa7ff738721, []int{0}
}
func (m *Route) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *Route) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *Route) XXX_Merge(src proto.Message) {
xxx_messageInfo_Route.Merge(m, src)
}
func (m *Route) XXX_Size() int {
return m.Size()
}
func (m *Route) XXX_DiscardUnknown() {
xxx_messageInfo_Route.DiscardUnknown(m)
}
var xxx_messageInfo_Route proto.InternalMessageInfo
func (m *RouteIngress) Reset() { *m = RouteIngress{} }
func (*RouteIngress) ProtoMessage() {}
func (*RouteIngress) Descriptor() ([]byte, []int) {
return fileDescriptor_373b8fa7ff738721, []int{1}
}
func (m *RouteIngress) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *RouteIngress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *RouteIngress) XXX_Merge(src proto.Message) {
xxx_messageInfo_RouteIngress.Merge(m, src)
}
func (m *RouteIngress) XXX_Size() int {
return m.Size()
}
func (m *RouteIngress) XXX_DiscardUnknown() {
xxx_messageInfo_RouteIngress.DiscardUnknown(m)
}
var xxx_messageInfo_RouteIngress proto.InternalMessageInfo
func (m *RouteIngressCondition) Reset() { *m = RouteIngressCondition{} }
func (*RouteIngressCondition) ProtoMessage() {}
func (*RouteIngressCondition) Descriptor() ([]byte, []int) {
return fileDescriptor_373b8fa7ff738721, []int{2}
}
func (m *RouteIngressCondition) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *RouteIngressCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *RouteIngressCondition) XXX_Merge(src proto.Message) {
xxx_messageInfo_RouteIngressCondition.Merge(m, src)
}
func (m *RouteIngressCondition) XXX_Size() int {
return m.Size()
}
func (m *RouteIngressCondition) XXX_DiscardUnknown() {
xxx_messageInfo_RouteIngressCondition.DiscardUnknown(m)
}
var xxx_messageInfo_RouteIngressCondition proto.InternalMessageInfo
func (m *RouteList) Reset() { *m = RouteList{} }
func (*RouteList) ProtoMessage() {}
func (*RouteList) Descriptor() ([]byte, []int) {
return fileDescriptor_373b8fa7ff738721, []int{3}
}
func (m *RouteList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *RouteList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *RouteList) XXX_Merge(src proto.Message) {
xxx_messageInfo_RouteList.Merge(m, src)
}
func (m *RouteList) XXX_Size() int {
return m.Size()
}
func (m *RouteList) XXX_DiscardUnknown() {
xxx_messageInfo_RouteList.DiscardUnknown(m)
}
var xxx_messageInfo_RouteList proto.InternalMessageInfo
func (m *RoutePort) Reset() { *m = RoutePort{} }
func (*RoutePort) ProtoMessage() {}
func (*RoutePort) Descriptor() ([]byte, []int) {
return fileDescriptor_373b8fa7ff738721, []int{4}
}
func (m *RoutePort) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *RoutePort) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *RoutePort) XXX_Merge(src proto.Message) {
xxx_messageInfo_RoutePort.Merge(m, src)
}
func (m *RoutePort) XXX_Size() int {
return m.Size()
}
func (m *RoutePort) XXX_DiscardUnknown() {
xxx_messageInfo_RoutePort.DiscardUnknown(m)
}
var xxx_messageInfo_RoutePort proto.InternalMessageInfo
func (m *RouteSpec) Reset() { *m = RouteSpec{} }
func (*RouteSpec) ProtoMessage() {}
func (*RouteSpec) Descriptor() ([]byte, []int) {
return fileDescriptor_373b8fa7ff738721, []int{5}
}
func (m *RouteSpec) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *RouteSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *RouteSpec) XXX_Merge(src proto.Message) {
xxx_messageInfo_RouteSpec.Merge(m, src)
}
func (m *RouteSpec) XXX_Size() int {
return m.Size()
}
func (m *RouteSpec) XXX_DiscardUnknown() {
xxx_messageInfo_RouteSpec.DiscardUnknown(m)
}
var xxx_messageInfo_RouteSpec proto.InternalMessageInfo
func (m *RouteStatus) Reset() { *m = RouteStatus{} }
func (*RouteStatus) ProtoMessage() {}
func (*RouteStatus) Descriptor() ([]byte, []int) {
return fileDescriptor_373b8fa7ff738721, []int{6}
}
func (m *RouteStatus) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *RouteStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *RouteStatus) XXX_Merge(src proto.Message) {
xxx_messageInfo_RouteStatus.Merge(m, src)
}
func (m *RouteStatus) XXX_Size() int {
return m.Size()
}
func (m *RouteStatus) XXX_DiscardUnknown() {
xxx_messageInfo_RouteStatus.DiscardUnknown(m)
}
var xxx_messageInfo_RouteStatus proto.InternalMessageInfo
func (m *RouteTargetReference) Reset() { *m = RouteTargetReference{} }
func (*RouteTargetReference) ProtoMessage() {}
func (*RouteTargetReference) Descriptor() ([]byte, []int) {
return fileDescriptor_373b8fa7ff738721, []int{7}
}
func (m *RouteTargetReference) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *RouteTargetReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *RouteTargetReference) XXX_Merge(src proto.Message) {
xxx_messageInfo_RouteTargetReference.Merge(m, src)
}
func (m *RouteTargetReference) XXX_Size() int {
return m.Size()
}
func (m *RouteTargetReference) XXX_DiscardUnknown() {
xxx_messageInfo_RouteTargetReference.DiscardUnknown(m)
}
var xxx_messageInfo_RouteTargetReference proto.InternalMessageInfo
func (m *RouterShard) Reset() { *m = RouterShard{} }
func (*RouterShard) ProtoMessage() {}
func (*RouterShard) Descriptor() ([]byte, []int) {
return fileDescriptor_373b8fa7ff738721, []int{8}
}
func (m *RouterShard) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *RouterShard) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *RouterShard) XXX_Merge(src proto.Message) {
xxx_messageInfo_RouterShard.Merge(m, src)
}
func (m *RouterShard) XXX_Size() int {
return m.Size()
}
func (m *RouterShard) XXX_DiscardUnknown() {
xxx_messageInfo_RouterShard.DiscardUnknown(m)
}
var xxx_messageInfo_RouterShard proto.InternalMessageInfo
func (m *TLSConfig) Reset() { *m = TLSConfig{} }
func (*TLSConfig) ProtoMessage() {}
func (*TLSConfig) Descriptor() ([]byte, []int) {
return fileDescriptor_373b8fa7ff738721, []int{9}
}
func (m *TLSConfig) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *TLSConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *TLSConfig) XXX_Merge(src proto.Message) {
xxx_messageInfo_TLSConfig.Merge(m, src)
}
func (m *TLSConfig) XXX_Size() int {
return m.Size()
}
func (m *TLSConfig) XXX_DiscardUnknown() {
xxx_messageInfo_TLSConfig.DiscardUnknown(m)
}
var xxx_messageInfo_TLSConfig proto.InternalMessageInfo
func init() {
proto.RegisterType((*Route)(nil), "github.com.openshift.api.route.v1.Route")
proto.RegisterType((*RouteIngress)(nil), "github.com.openshift.api.route.v1.RouteIngress")
proto.RegisterType((*RouteIngressCondition)(nil), "github.com.openshift.api.route.v1.RouteIngressCondition")
proto.RegisterType((*RouteList)(nil), "github.com.openshift.api.route.v1.RouteList")
proto.RegisterType((*RoutePort)(nil), "github.com.openshift.api.route.v1.RoutePort")
proto.RegisterType((*RouteSpec)(nil), "github.com.openshift.api.route.v1.RouteSpec")
proto.RegisterType((*RouteStatus)(nil), "github.com.openshift.api.route.v1.RouteStatus")
proto.RegisterType((*RouteTargetReference)(nil), "github.com.openshift.api.route.v1.RouteTargetReference")
proto.RegisterType((*RouterShard)(nil), "github.com.openshift.api.route.v1.RouterShard")
proto.RegisterType((*TLSConfig)(nil), "github.com.openshift.api.route.v1.TLSConfig")
}
func init() {
proto.RegisterFile("github.com/openshift/api/route/v1/generated.proto", fileDescriptor_373b8fa7ff738721)
}
var fileDescriptor_373b8fa7ff738721 = []byte{
// 1163 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0x4f, 0x6f, 0x1b, 0x45,
0x14, 0xcf, 0xfa, 0x5f, 0xe2, 0x71, 0x1b, 0xc8, 0x40, 0xa9, 0x1b, 0x29, 0x76, 0xba, 0x07, 0x94,
0xa2, 0xb2, 0x4b, 0x42, 0x81, 0x4a, 0x88, 0x43, 0x9d, 0x22, 0x48, 0xe3, 0xa4, 0xd1, 0xd8, 0xa2,
0xa2, 0xea, 0x81, 0xc9, 0xee, 0x78, 0x3d, 0xd8, 0x9e, 0x5d, 0x66, 0xc6, 0x29, 0xbe, 0xa0, 0x4a,
0x7c, 0x81, 0xf2, 0x6d, 0xb8, 0x73, 0xc9, 0xb1, 0xc7, 0x1e, 0x90, 0x45, 0xcc, 0x91, 0x6f, 0x90,
0x13, 0x9a, 0xd9, 0xb1, 0x77, 0xed, 0x38, 0xa9, 0x0b, 0xb7, 0xdd, 0xf7, 0x7e, 0xbf, 0xdf, 0x7b,
0xf3, 0xde, 0x9b, 0x37, 0x60, 0x3b, 0xa0, 0xb2, 0xdd, 0x3f, 0x76, 0xbc, 0xb0, 0xe7, 0x86, 0x11,
0x61, 0xa2, 0x4d, 0x5b, 0xd2, 0xc5, 0x11, 0x75, 0x79, 0xd8, 0x97, 0xc4, 0x3d, 0xd9, 0x76, 0x03,
0xc2, 0x08, 0xc7, 0x92, 0xf8, 0x4e, 0xc4, 0x43, 0x19, 0xc2, 0xdb, 0x09, 0xc5, 0x99, 0x50, 0x1c,
0x1c, 0x51, 0x47, 0x53, 0x9c, 0x93, 0xed, 0xf5, 0x8f, 0x53, 0xaa, 0x41, 0x18, 0x84, 0xae, 0x66,
0x1e, 0xf7, 0x5b, 0xfa, 0x4f, 0xff, 0xe8, 0xaf, 0x58, 0x71, 0xdd, 0xee, 0xdc, 0x17, 0x0e, 0x0d,
0x75, 0x58, 0x2f, 0xe4, 0xf3, 0xa2, 0xae, 0xdf, 0x4b, 0x30, 0x3d, 0xec, 0xb5, 0x29, 0x23, 0x7c,
0xe0, 0x46, 0x9d, 0x40, 0x19, 0x84, 0xdb, 0x23, 0x12, 0xcf, 0x63, 0x7d, 0x7e, 0x19, 0x8b, 0xf7,
0x99, 0xa4, 0x3d, 0xe2, 0x0a, 0xaf, 0x4d, 0x7a, 0xf8, 0x02, 0xef, 0xd3, 0xcb, 0x78, 0x7d, 0x49,
0xbb, 0x2e, 0x65, 0x52, 0x48, 0x3e, 0x4b, 0xb2, 0x7f, 0xcb, 0x80, 0x3c, 0x52, 0x25, 0x80, 0x3f,
0x80, 0x15, 0x95, 0x91, 0x8f, 0x25, 0x2e, 0x5b, 0x9b, 0xd6, 0x56, 0x69, 0xe7, 0x13, 0x27, 0x56,
0x74, 0xd2, 0x8a, 0x4e, 0xd4, 0x09, 0x94, 0x41, 0x38, 0x0a, 0xed, 0x9c, 0x6c, 0x3b, 0x8f, 0x8f,
0x7f, 0x24, 0x9e, 0x3c, 0x20, 0x12, 0xd7, 0xe0, 0xe9, 0xb0, 0xba, 0x34, 0x1a, 0x56, 0x41, 0x62,
0x43, 0x13, 0x55, 0x78, 0x08, 0x72, 0x22, 0x22, 0x5e, 0x39, 0xa3, 0xd5, 0xef, 0x3a, 0x6f, 0xec,
0x89, 0xa3, 0x33, 0x6b, 0x44, 0xc4, 0xab, 0x5d, 0x33, 0xca, 0x39, 0xf5, 0x87, 0xb4, 0x0e, 0xfc,
0x0e, 0x14, 0x84, 0xc4, 0xb2, 0x2f, 0xca, 0x59, 0xad, 0xe8, 0x2c, 0xac, 0xa8, 0x59, 0xb5, 0x55,
0xa3, 0x59, 0x88, 0xff, 0x91, 0x51, 0xb3, 0x7f, 0xcd, 0x82, 0x6b, 0x1a, 0xb7, 0xc7, 0x02, 0x4e,
0x84, 0x80, 0x9b, 0x20, 0xd7, 0x0e, 0x85, 0xd4, 0x65, 0x29, 0x26, 0xa9, 0x7c, 0x1b, 0x0a, 0x89,
0xb4, 0x07, 0xee, 0x00, 0xa0, 0x43, 0xf0, 0x43, 0xdc, 0x23, 0xfa, 0x80, 0xc5, 0xa4, 0x18, 0x68,
0xe2, 0x41, 0x29, 0x14, 0xec, 0x02, 0xe0, 0x85, 0xcc, 0xa7, 0x92, 0x86, 0x4c, 0x1d, 0x21, 0xbb,
0x55, 0xda, 0xb9, 0xbf, 0xe8, 0x11, 0x4c, 0x6a, 0xbb, 0x63, 0x81, 0x24, 0xda, 0xc4, 0x24, 0x50,
0x4a, 0x1f, 0x36, 0xc1, 0xea, 0x73, 0xda, 0xf5, 0x3d, 0xcc, 0xfd, 0xa3, 0xb0, 0x4b, 0xbd, 0x41,
0x39, 0xa7, 0xb3, 0xbc, 0x6b, 0x78, 0xab, 0x4f, 0xa6, 0xbc, 0xe7, 0xc3, 0x2a, 0x9c, 0xb6, 0x34,
0x07, 0x11, 0x41, 0x33, 0x1a, 0xf0, 0x7b, 0x70, 0x33, 0x3e, 0xd1, 0x2e, 0x66, 0x21, 0xa3, 0x1e,
0xee, 0xaa, 0xa2, 0x30, 0x55, 0x84, 0xbc, 0x96, 0xaf, 0x1a, 0xf9, 0x9b, 0x68, 0x3e, 0x0c, 0x5d,
0xc6, 0xb7, 0xff, 0xc9, 0x80, 0x1b, 0x73, 0x8f, 0x0a, 0xbf, 0x02, 0x39, 0x39, 0x88, 0x88, 0x69,
0xc7, 0x9d, 0x71, 0x3b, 0x54, 0x82, 0xe7, 0xc3, 0xea, 0xad, 0xb9, 0x24, 0x9d, 0xbd, 0xa6, 0xc1,
0xfa, 0x64, 0x6c, 0xe2, 0x3e, 0xdd, 0x9b, 0x1e, 0x83, 0xf3, 0x61, 0x75, 0xce, 0xdd, 0x76, 0x26,
0x4a, 0xd3, 0xc3, 0x02, 0x3f, 0x04, 0x05, 0x4e, 0xb0, 0x08, 0x99, 0x1e, 0xc2, 0x62, 0x32, 0x54,
0x48, 0x5b, 0x91, 0xf1, 0xc2, 0x3b, 0x60, 0xb9, 0x47, 0x84, 0xc0, 0x01, 0x31, 0x85, 0x7f, 0xc7,
0x00, 0x97, 0x0f, 0x62, 0x33, 0x1a, 0xfb, 0x21, 0x07, 0xb0, 0x8b, 0x85, 0x6c, 0x72, 0xcc, 0x44,
0x9c, 0x3c, 0x35, 0xf5, 0x2c, 0xed, 0x7c, 0xb4, 0xd8, 0x9d, 0x54, 0x8c, 0xda, 0x07, 0xa3, 0x61,
0x15, 0xd6, 0x2f, 0x28, 0xa1, 0x39, 0xea, 0xf6, 0xef, 0x16, 0x28, 0xea, 0xc2, 0xd5, 0xa9, 0x90,
0xf0, 0xd9, 0x85, 0x5d, 0xe0, 0x2c, 0x16, 0x57, 0xb1, 0xf5, 0x26, 0x78, 0xd7, 0x9c, 0x6e, 0x65,
0x6c, 0x49, 0xed, 0x81, 0x03, 0x90, 0xa7, 0x92, 0xf4, 0x54, 0xfd, 0xd5, 0xcc, 0x6f, 0x2d, 0x3a,
0xf3, 0xb5, 0xeb, 0x46, 0x34, 0xbf, 0xa7, 0xe8, 0x28, 0x56, 0xb1, 0x7f, 0x32, 0x99, 0x1f, 0x85,
0x5c, 0x42, 0x1f, 0x00, 0x89, 0x79, 0x40, 0xa4, 0xfa, 0x7b, 0xe3, 0x1e, 0x53, 0x9b, 0xd1, 0x89,
0x37, 0xa3, 0xb3, 0xc7, 0xe4, 0x63, 0xde, 0x90, 0x9c, 0xb2, 0x20, 0xb9, 0x4c, 0xcd, 0x89, 0x16,
0x4a, 0xe9, 0xda, 0x7f, 0xe4, 0x4c, 0x4c, 0xb5, 0x8d, 0x16, 0x58, 0x0f, 0x2e, 0x28, 0x8a, 0xfe,
0xb1, 0x1f, 0xf6, 0x30, 0x65, 0xe5, 0x15, 0x0d, 0x5b, 0x33, 0xb0, 0x62, 0x63, 0xec, 0x40, 0x09,
0x46, 0x49, 0x46, 0x58, 0xb6, 0xcd, 0x84, 0x4e, 0x24, 0x8f, 0xb0, 0x6c, 0x23, 0xed, 0x81, 0x0d,
0x90, 0x91, 0xa1, 0x59, 0x7c, 0x5f, 0x2c, 0x5a, 0xc1, 0xf8, 0x38, 0x88, 0xb4, 0x08, 0x27, 0xcc,
0x23, 0x35, 0x60, 0x84, 0x33, 0xcd, 0x10, 0x65, 0x64, 0x08, 0x5f, 0x58, 0x60, 0x0d, 0x77, 0x25,
0xe1, 0x0c, 0x4b, 0x52, 0xc3, 0x5e, 0x87, 0x30, 0x5f, 0x94, 0x73, 0xba, 0x4d, 0xff, 0x39, 0xc8,
0x2d, 0x13, 0x64, 0xed, 0xc1, 0xac, 0x32, 0xba, 0x18, 0x0c, 0x3e, 0x02, 0xb9, 0x48, 0xb5, 0x2e,
0xff, 0x76, 0x8f, 0x84, 0x6a, 0x4b, 0x6d, 0x45, 0xd7, 0x48, 0x35, 0x4b, 0x6b, 0xc0, 0x6f, 0x40,
0x56, 0x76, 0x45, 0xb9, 0xb0, 0xb0, 0x54, 0xb3, 0xde, 0xd8, 0x0d, 0x59, 0x8b, 0x06, 0xb5, 0xe5,
0xd1, 0xb0, 0x9a, 0x6d, 0xd6, 0x1b, 0x48, 0x29, 0xcc, 0x59, 0x9e, 0xcb, 0xff, 0x7f, 0x79, 0xda,
0x14, 0x94, 0x52, 0xcf, 0x11, 0x7c, 0x0a, 0x96, 0x69, 0xbc, 0xb5, 0xca, 0x96, 0xae, 0xb8, 0xfb,
0x96, 0x8f, 0x41, 0xb2, 0x52, 0x8c, 0x01, 0x8d, 0x05, 0xed, 0x5f, 0xc0, 0xfb, 0xf3, 0x7a, 0xa3,
0xe6, 0xac, 0x43, 0x99, 0x3f, 0x3b, 0xba, 0xfb, 0x94, 0xf9, 0x48, 0x7b, 0x14, 0x82, 0x25, 0x6f,
0xda, 0x04, 0xa1, 0x5f, 0x33, 0xed, 0x81, 0x36, 0x28, 0x3c, 0x27, 0x34, 0x68, 0x4b, 0x3d, 0x8d,
0xf9, 0x1a, 0x50, 0xdb, 0xef, 0x89, 0xb6, 0x20, 0xe3, 0xb1, 0x43, 0x73, 0x54, 0xde, 0x68, 0x63,
0xee, 0xeb, 0xfb, 0xa0, 0x3e, 0xf4, 0x6b, 0x69, 0xcd, 0xdc, 0x87, 0xb1, 0x03, 0x25, 0x18, 0x45,
0xf0, 0x99, 0x68, 0xf4, 0x5b, 0x2d, 0xfa, 0xb3, 0x49, 0x65, 0x42, 0x78, 0x78, 0xd8, 0x88, 0x1d,
0x28, 0xc1, 0xd8, 0x7f, 0x66, 0x41, 0x71, 0xd2, 0x4d, 0xb8, 0x0f, 0x4a, 0x92, 0xf0, 0x1e, 0x65,
0x58, 0x2d, 0xbc, 0x99, 0x87, 0xa3, 0xd4, 0x4c, 0x5c, 0xaa, 0x73, 0xcd, 0x7a, 0x23, 0x65, 0xd1,
0x9d, 0x4b, 0xb3, 0xe1, 0x67, 0xa0, 0xe4, 0x11, 0x2e, 0x69, 0x8b, 0x7a, 0x58, 0x8e, 0x0b, 0xf3,
0xde, 0x58, 0x6c, 0x37, 0x71, 0xa1, 0x34, 0x0e, 0x6e, 0x80, 0x6c, 0x87, 0x0c, 0xcc, 0x2b, 0x51,
0x32, 0xf0, 0xec, 0x3e, 0x19, 0x20, 0x65, 0x87, 0x5f, 0x82, 0xeb, 0x1e, 0x4e, 0x91, 0xcd, 0x2b,
0x71, 0xc3, 0x00, 0xaf, 0xef, 0x3e, 0x48, 0x2b, 0x4f, 0x63, 0xe1, 0x33, 0x50, 0xf6, 0x89, 0x90,
0x26, 0xc3, 0x29, 0xa8, 0x79, 0x87, 0x37, 0x8d, 0x4e, 0xf9, 0xe1, 0x25, 0x38, 0x74, 0xa9, 0x02,
0x7c, 0x69, 0x81, 0x0d, 0xca, 0x04, 0xf1, 0xfa, 0x9c, 0x7c, 0xed, 0x07, 0x24, 0x55, 0x1d, 0x73,
0x1b, 0x0a, 0x3a, 0xc6, 0x23, 0x13, 0x63, 0x63, 0xef, 0x2a, 0xf0, 0xf9, 0xb0, 0x7a, 0xfb, 0x4a,
0x80, 0xae, 0xf8, 0xd5, 0x01, 0x6b, 0x5b, 0xa7, 0x67, 0x95, 0xa5, 0x57, 0x67, 0x95, 0xa5, 0xd7,
0x67, 0x95, 0xa5, 0x17, 0xa3, 0x8a, 0x75, 0x3a, 0xaa, 0x58, 0xaf, 0x46, 0x15, 0xeb, 0xf5, 0xa8,
0x62, 0xfd, 0x35, 0xaa, 0x58, 0x2f, 0xff, 0xae, 0x2c, 0x3d, 0xcd, 0x9c, 0x6c, 0xff, 0x1b, 0x00,
0x00, 0xff, 0xff, 0x26, 0x8b, 0x83, 0xf6, 0x2d, 0x0c, 0x00, 0x00,
}
func (m *Route) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Route) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *Route) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
{
size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x1a
{
size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
{
size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func (m *RouteIngress) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *RouteIngress) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *RouteIngress) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
i -= len(m.RouterCanonicalHostname)
copy(dAtA[i:], m.RouterCanonicalHostname)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.RouterCanonicalHostname)))
i--
dAtA[i] = 0x2a
i -= len(m.WildcardPolicy)
copy(dAtA[i:], m.WildcardPolicy)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.WildcardPolicy)))
i--
dAtA[i] = 0x22
if len(m.Conditions) > 0 {
for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x1a
}
}
i -= len(m.RouterName)
copy(dAtA[i:], m.RouterName)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.RouterName)))
i--
dAtA[i] = 0x12
i -= len(m.Host)
copy(dAtA[i:], m.Host)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Host)))
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func (m *RouteIngressCondition) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *RouteIngressCondition) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *RouteIngressCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.LastTransitionTime != nil {
{
size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x2a
}
i -= len(m.Message)
copy(dAtA[i:], m.Message)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message)))
i--
dAtA[i] = 0x22
i -= len(m.Reason)
copy(dAtA[i:], m.Reason)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason)))
i--
dAtA[i] = 0x1a
i -= len(m.Status)
copy(dAtA[i:], m.Status)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status)))
i--
dAtA[i] = 0x12
i -= len(m.Type)
copy(dAtA[i:], m.Type)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type)))
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func (m *RouteList) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *RouteList) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *RouteList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.Items) > 0 {
for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
}
}
{
size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func (m *RoutePort) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *RoutePort) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *RoutePort) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
{
size, err := m.TargetPort.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func (m *RouteSpec) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *RouteSpec) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *RouteSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
i -= len(m.Subdomain)
copy(dAtA[i:], m.Subdomain)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Subdomain)))
i--
dAtA[i] = 0x42
i -= len(m.WildcardPolicy)
copy(dAtA[i:], m.WildcardPolicy)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.WildcardPolicy)))
i--
dAtA[i] = 0x3a
if m.TLS != nil {
{
size, err := m.TLS.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x32
}
if m.Port != nil {
{
size, err := m.Port.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x2a
}
if len(m.AlternateBackends) > 0 {
for iNdEx := len(m.AlternateBackends) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.AlternateBackends[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x22
}
}
{
size, err := m.To.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x1a
i -= len(m.Path)
copy(dAtA[i:], m.Path)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path)))
i--
dAtA[i] = 0x12
i -= len(m.Host)
copy(dAtA[i:], m.Host)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Host)))
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func (m *RouteStatus) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *RouteStatus) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *RouteStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.Ingress) > 0 {
for iNdEx := len(m.Ingress) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Ingress[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
}
}
return len(dAtA) - i, nil
}
func (m *RouteTargetReference) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *RouteTargetReference) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *RouteTargetReference) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.Weight != nil {
i = encodeVarintGenerated(dAtA, i, uint64(*m.Weight))
i--
dAtA[i] = 0x18
}
i -= len(m.Name)
copy(dAtA[i:], m.Name)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
i--
dAtA[i] = 0x12
i -= len(m.Kind)
copy(dAtA[i:], m.Kind)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind)))
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func (m *RouterShard) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *RouterShard) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *RouterShard) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
i -= len(m.DNSSuffix)
copy(dAtA[i:], m.DNSSuffix)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.DNSSuffix)))
i--
dAtA[i] = 0x12
i -= len(m.ShardName)
copy(dAtA[i:], m.ShardName)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.ShardName)))
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func (m *TLSConfig) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *TLSConfig) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *TLSConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
i -= len(m.InsecureEdgeTerminationPolicy)
copy(dAtA[i:], m.InsecureEdgeTerminationPolicy)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.InsecureEdgeTerminationPolicy)))
i--
dAtA[i] = 0x32
i -= len(m.DestinationCACertificate)
copy(dAtA[i:], m.DestinationCACertificate)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.DestinationCACertificate)))
i--
dAtA[i] = 0x2a
i -= len(m.CACertificate)
copy(dAtA[i:], m.CACertificate)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.CACertificate)))
i--
dAtA[i] = 0x22
i -= len(m.Key)
copy(dAtA[i:], m.Key)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key)))
i--
dAtA[i] = 0x1a
i -= len(m.Certificate)
copy(dAtA[i:], m.Certificate)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Certificate)))
i--
dAtA[i] = 0x12
i -= len(m.Termination)
copy(dAtA[i:], m.Termination)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Termination)))
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
offset -= sovGenerated(v)
base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return base
}
func (m *Route) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = m.ObjectMeta.Size()
n += 1 + l + sovGenerated(uint64(l))
l = m.Spec.Size()
n += 1 + l + sovGenerated(uint64(l))
l = m.Status.Size()
n += 1 + l + sovGenerated(uint64(l))
return n
}
func (m *RouteIngress) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = len(m.Host)
n += 1 + l + sovGenerated(uint64(l))
l = len(m.RouterName)
n += 1 + l + sovGenerated(uint64(l))
if len(m.Conditions) > 0 {
for _, e := range m.Conditions {
l = e.Size()
n += 1 + l + sovGenerated(uint64(l))
}
}
l = len(m.WildcardPolicy)
n += 1 + l + sovGenerated(uint64(l))
l = len(m.RouterCanonicalHostname)
n += 1 + l + sovGenerated(uint64(l))
return n
}
func (m *RouteIngressCondition) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = len(m.Type)
n += 1 + l + sovGenerated(uint64(l))
l = len(m.Status)
n += 1 + l + sovGenerated(uint64(l))
l = len(m.Reason)
n += 1 + l + sovGenerated(uint64(l))
l = len(m.Message)
n += 1 + l + sovGenerated(uint64(l))
if m.LastTransitionTime != nil {
l = m.LastTransitionTime.Size()
n += 1 + l + sovGenerated(uint64(l))
}
return n
}
func (m *RouteList) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = m.ListMeta.Size()
n += 1 + l + sovGenerated(uint64(l))
if len(m.Items) > 0 {
for _, e := range m.Items {
l = e.Size()
n += 1 + l + sovGenerated(uint64(l))
}
}
return n
}
func (m *RoutePort) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = m.TargetPort.Size()
n += 1 + l + sovGenerated(uint64(l))
return n
}
func (m *RouteSpec) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = len(m.Host)
n += 1 + l + sovGenerated(uint64(l))
l = len(m.Path)
n += 1 + l + sovGenerated(uint64(l))
l = m.To.Size()
n += 1 + l + sovGenerated(uint64(l))
if len(m.AlternateBackends) > 0 {
for _, e := range m.AlternateBackends {
l = e.Size()
n += 1 + l + sovGenerated(uint64(l))
}
}
if m.Port != nil {
l = m.Port.Size()
n += 1 + l + sovGenerated(uint64(l))
}
if m.TLS != nil {
l = m.TLS.Size()
n += 1 + l + sovGenerated(uint64(l))
}
l = len(m.WildcardPolicy)
n += 1 + l + sovGenerated(uint64(l))
l = len(m.Subdomain)
n += 1 + l + sovGenerated(uint64(l))
return n
}
func (m *RouteStatus) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if len(m.Ingress) > 0 {
for _, e := range m.Ingress {
l = e.Size()
n += 1 + l + sovGenerated(uint64(l))
}
}
return n
}
func (m *RouteTargetReference) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = len(m.Kind)
n += 1 + l + sovGenerated(uint64(l))
l = len(m.Name)
n += 1 + l + sovGenerated(uint64(l))
if m.Weight != nil {
n += 1 + sovGenerated(uint64(*m.Weight))
}
return n
}
func (m *RouterShard) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = len(m.ShardName)
n += 1 + l + sovGenerated(uint64(l))
l = len(m.DNSSuffix)
n += 1 + l + sovGenerated(uint64(l))
return n
}
func (m *TLSConfig) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = len(m.Termination)
n += 1 + l + sovGenerated(uint64(l))
l = len(m.Certificate)
n += 1 + l + sovGenerated(uint64(l))
l = len(m.Key)
n += 1 + l + sovGenerated(uint64(l))
l = len(m.CACertificate)
n += 1 + l + sovGenerated(uint64(l))
l = len(m.DestinationCACertificate)
n += 1 + l + sovGenerated(uint64(l))
l = len(m.InsecureEdgeTerminationPolicy)
n += 1 + l + sovGenerated(uint64(l))
return n
}
func sovGenerated(x uint64) (n int) {
return (math_bits.Len64(x|1) + 6) / 7
}
func sozGenerated(x uint64) (n int) {
return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (this *Route) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&Route{`,
`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
`Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "RouteSpec", "RouteSpec", 1), `&`, ``, 1) + `,`,
`Status:` + strings.Replace(strings.Replace(this.Status.String(), "RouteStatus", "RouteStatus", 1), `&`, ``, 1) + `,`,
`}`,
}, "")
return s
}
func (this *RouteIngress) String() string {
if this == nil {
return "nil"
}
repeatedStringForConditions := "[]RouteIngressCondition{"
for _, f := range this.Conditions {
repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "RouteIngressCondition", "RouteIngressCondition", 1), `&`, ``, 1) + ","
}
repeatedStringForConditions += "}"
s := strings.Join([]string{`&RouteIngress{`,
`Host:` + fmt.Sprintf("%v", this.Host) + `,`,
`RouterName:` + fmt.Sprintf("%v", this.RouterName) + `,`,
`Conditions:` + repeatedStringForConditions + `,`,
`WildcardPolicy:` + fmt.Sprintf("%v", this.WildcardPolicy) + `,`,
`RouterCanonicalHostname:` + fmt.Sprintf("%v", this.RouterCanonicalHostname) + `,`,
`}`,
}, "")
return s
}
func (this *RouteIngressCondition) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&RouteIngressCondition{`,
`Type:` + fmt.Sprintf("%v", this.Type) + `,`,
`Status:` + fmt.Sprintf("%v", this.Status) + `,`,
`Reason:` + fmt.Sprintf("%v", this.Reason) + `,`,
`Message:` + fmt.Sprintf("%v", this.Message) + `,`,
`LastTransitionTime:` + strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1) + `,`,
`}`,
}, "")
return s
}
func (this *RouteList) String() string {
if this == nil {
return "nil"
}
repeatedStringForItems := "[]Route{"
for _, f := range this.Items {
repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Route", "Route", 1), `&`, ``, 1) + ","
}
repeatedStringForItems += "}"
s := strings.Join([]string{`&RouteList{`,
`ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
`Items:` + repeatedStringForItems + `,`,
`}`,
}, "")
return s
}
func (this *RoutePort) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&RoutePort{`,
`TargetPort:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.TargetPort), "IntOrString", "intstr.IntOrString", 1), `&`, ``, 1) + `,`,
`}`,
}, "")
return s
}
func (this *RouteSpec) String() string {
if this == nil {
return "nil"
}
repeatedStringForAlternateBackends := "[]RouteTargetReference{"
for _, f := range this.AlternateBackends {
repeatedStringForAlternateBackends += strings.Replace(strings.Replace(f.String(), "RouteTargetReference", "RouteTargetReference", 1), `&`, ``, 1) + ","
}
repeatedStringForAlternateBackends += "}"
s := strings.Join([]string{`&RouteSpec{`,
`Host:` + fmt.Sprintf("%v", this.Host) + `,`,
`Path:` + fmt.Sprintf("%v", this.Path) + `,`,
`To:` + strings.Replace(strings.Replace(this.To.String(), "RouteTargetReference", "RouteTargetReference", 1), `&`, ``, 1) + `,`,
`AlternateBackends:` + repeatedStringForAlternateBackends + `,`,
`Port:` + strings.Replace(this.Port.String(), "RoutePort", "RoutePort", 1) + `,`,
`TLS:` + strings.Replace(this.TLS.String(), "TLSConfig", "TLSConfig", 1) + `,`,
`WildcardPolicy:` + fmt.Sprintf("%v", this.WildcardPolicy) + `,`,
`Subdomain:` + fmt.Sprintf("%v", this.Subdomain) + `,`,
`}`,
}, "")
return s
}
func (this *RouteStatus) String() string {
if this == nil {
return "nil"
}
repeatedStringForIngress := "[]RouteIngress{"
for _, f := range this.Ingress {
repeatedStringForIngress += strings.Replace(strings.Replace(f.String(), "RouteIngress", "RouteIngress", 1), `&`, ``, 1) + ","
}
repeatedStringForIngress += "}"
s := strings.Join([]string{`&RouteStatus{`,
`Ingress:` + repeatedStringForIngress + `,`,
`}`,
}, "")
return s
}
func (this *RouteTargetReference) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&RouteTargetReference{`,
`Kind:` + fmt.Sprintf("%v", this.Kind) + `,`,
`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
`Weight:` + valueToStringGenerated(this.Weight) + `,`,
`}`,
}, "")
return s
}
func (this *RouterShard) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&RouterShard{`,
`ShardName:` + fmt.Sprintf("%v", this.ShardName) + `,`,
`DNSSuffix:` + fmt.Sprintf("%v", this.DNSSuffix) + `,`,
`}`,
}, "")
return s
}
func (this *TLSConfig) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&TLSConfig{`,
`Termination:` + fmt.Sprintf("%v", this.Termination) + `,`,
`Certificate:` + fmt.Sprintf("%v", this.Certificate) + `,`,
`Key:` + fmt.Sprintf("%v", this.Key) + `,`,
`CACertificate:` + fmt.Sprintf("%v", this.CACertificate) + `,`,
`DestinationCACertificate:` + fmt.Sprintf("%v", this.DestinationCACertificate) + `,`,
`InsecureEdgeTerminationPolicy:` + fmt.Sprintf("%v", this.InsecureEdgeTerminationPolicy) + `,`,
`}`,
}, "")
return s
}
func valueToStringGenerated(v interface{}) string {
rv := reflect.ValueOf(v)
if rv.IsNil() {
return "nil"
}
pv := reflect.Indirect(rv).Interface()
return fmt.Sprintf("*%v", pv)
}
func (m *Route) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Route: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Route: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *RouteIngress) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: RouteIngress: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: RouteIngress: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Host = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field RouterName", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.RouterName = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Conditions = append(m.Conditions, RouteIngressCondition{})
if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field WildcardPolicy", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.WildcardPolicy = WildcardPolicyType(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 5:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field RouterCanonicalHostname", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.RouterCanonicalHostname = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *RouteIngressCondition) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: RouteIngressCondition: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: RouteIngressCondition: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Type = RouteIngressConditionType(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Reason = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Message = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 5:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.LastTransitionTime == nil {
m.LastTransitionTime = &v1.Time{}
}
if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *RouteList) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: RouteList: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: RouteList: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Items = append(m.Items, Route{})
if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *RoutePort) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: RoutePort: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: RoutePort: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field TargetPort", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.TargetPort.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *RouteSpec) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: RouteSpec: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: RouteSpec: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Host = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Path = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field To", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.To.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field AlternateBackends", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.AlternateBackends = append(m.AlternateBackends, RouteTargetReference{})
if err := m.AlternateBackends[len(m.AlternateBackends)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 5:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Port == nil {
m.Port = &RoutePort{}
}
if err := m.Port.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 6:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field TLS", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.TLS == nil {
m.TLS = &TLSConfig{}
}
if err := m.TLS.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 7:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field WildcardPolicy", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.WildcardPolicy = WildcardPolicyType(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 8:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Subdomain", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Subdomain = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *RouteStatus) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: RouteStatus: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: RouteStatus: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Ingress", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Ingress = append(m.Ingress, RouteIngress{})
if err := m.Ingress[len(m.Ingress)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *RouteTargetReference) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: RouteTargetReference: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: RouteTargetReference: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Kind = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Name = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Weight", wireType)
}
var v int32
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= int32(b&0x7F) << shift
if b < 0x80 {
break
}
}
m.Weight = &v
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *RouterShard) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: RouterShard: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: RouterShard: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ShardName", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.ShardName = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field DNSSuffix", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.DNSSuffix = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *TLSConfig) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: TLSConfig: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: TLSConfig: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Termination", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Termination = TLSTerminationType(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Certificate", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Certificate = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Key = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field CACertificate", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.CACertificate = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 5:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field DestinationCACertificate", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.DestinationCACertificate = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 6:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field InsecureEdgeTerminationPolicy", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.InsecureEdgeTerminationPolicy = InsecureEdgeTerminationPolicyType(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipGenerated(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
depth := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowGenerated
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowGenerated
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if dAtA[iNdEx-1] < 0x80 {
break
}
}
case 1:
iNdEx += 8
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowGenerated
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if length < 0 {
return 0, ErrInvalidLengthGenerated
}
iNdEx += length
case 3:
depth++
case 4:
if depth == 0 {
return 0, ErrUnexpectedEndOfGroupGenerated
}
depth--
case 5:
iNdEx += 4
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
if iNdEx < 0 {
return 0, ErrInvalidLengthGenerated
}
if depth == 0 {
return iNdEx, nil
}
}
return 0, io.ErrUnexpectedEOF
}
var (
ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow")
ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group")
)
| {
"pile_set_name": "Github"
} |
// Copyright 2009 the Sputnik authors. All rights reserved.
// This code is governed by the BSD license found in the LICENSE file.
/*---
info: >
The Boolean prototype object is itself not a Boolean object
(its [[Class]] is "Object")
es5id: 15.6.4_A1
description: Checking type and value of Boolean.prototype
includes: [$FAIL.js]
---*/
//CHECK#1
if (typeof Boolean.prototype !== "object") {
$ERROR('#1: typeof Boolean.prototype === "object"');
}
//CHECK#2
try {
(Boolean.prototype != false);
$FAIL('#2: "(Boolean.prototype != false);" lead to throwing exception. Actual: '+(Boolean.prototype != false));
} catch (e) {
if (!(e instanceof TypeError)) {
$ERROR('#2.1: "(Boolean.prototype != false)" lead to throwing exception. Exception is instance of TypeError. Actual: exception is '+e);
}
}
delete Boolean.prototype.toString;
if (Boolean.prototype.toString() !== "[object Object]") {
$ERROR('#3: The [[Class]] property of the Boolean prototype object is set to "Object"');
}
| {
"pile_set_name": "Github"
} |
# Translation of Odoo Server.
# This file contains the translation of the following modules:
# * partner_contact_lang
#
# Translators:
# OCA Transbot <[email protected]>, 2017
msgid ""
msgstr ""
"Project-Id-Version: Odoo Server 9.0c\n"
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2017-01-25 03:41+0000\n"
"PO-Revision-Date: 2017-01-25 03:41+0000\n"
"Last-Translator: OCA Transbot <[email protected]>, 2017\n"
"Language-Team: Lithuanian (https://www.transifex.com/oca/teams/23907/lt/)\n"
"Language: lt\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: \n"
"Plural-Forms: nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && (n"
"%100<10 || n%100>=20) ? 1 : 2);\n"
#. module: partner_contact_lang
#: model:ir.model,name:partner_contact_lang.model_res_partner
msgid "Contact"
msgstr ""
#. module: partner_contact_lang
#: model_terms:ir.ui.view,arch_db:partner_contact_lang.view_res_partner_filter
msgid "Language"
msgstr ""
#~ msgid "Partner"
#~ msgstr "Partneris"
| {
"pile_set_name": "Github"
} |
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
<html>
<head>
<meta http-equiv="content-type" content="text/html; charset=utf-8" />
<link rel="shortcut icon" type="image/ico" href="http://www.datatables.net/media/images/favicon.ico" />
<title>DataTables example</title>
<style type="text/css" title="currentStyle">
@import "../../media/css/demo_page.css";
@import "../../media/css/demo_table.css";
</style>
<script type="text/javascript" language="javascript" src="../../media/js/jquery.js"></script>
<script type="text/javascript" language="javascript" src="../../media/js/jquery.dataTables.js"></script>
<script type="text/javascript" charset="utf-8">
jQuery.fn.dataTableExt.aTypes.push(
function ( sData ) {
return 'html';
}
);
$(document).ready(function() {
$('#example').dataTable();
} );
</script>
</head>
<body id="dt_example">
<div id="container">
<div class="full_width big">
DataTables HTML sorting auto-detection example
</div>
<h1>Preamble</h1>
<p>It can be very useful to have DataTables default to it's built in type of HTML, rather than string, which a column does not fit the requirements of any other type. The way this method works is to put the 'html' type at the end of aTypes, which means it will be set as that type only if none of the other types match the data in question.</p>
<h1>Live example</h1>
<div id="demo">
<table cellpadding="0" cellspacing="0" border="0" class="display" id="example">
<thead>
<tr>
<th>Reflection</th>
<th class="html">Link</th>
</tr>
</thead>
<tbody>
<tr>
<td>DataTables</td>
<td><a href="http://www.sprymedia.co.uk/article/DataTables">DataTables</a></td>
</tr>
<tr>
<td>Integrity</td>
<td><a href="http://www.sprymedia.co.uk/article/Integrity">A link to Integrity</a></td>
</tr>
<tr>
<td>Integrity</td>
<td><a href="http://www.sprymedia.co.uk/article/Integrity">Integrity</a></td>
</tr>
</table>
</div>
<div class="spacer"></div>
<h1>Initialisation code</h1>
<pre class="brush: js;">jQuery.fn.dataTableExt.aTypes.push(
function ( sData ) {
return 'html';
}
);
$(document).ready(function() {
$('#example').dataTable();
} );</pre>
<style type="text/css">
@import "../examples_support/syntax/css/shCore.css";
</style>
<script type="text/javascript" language="javascript" src="../examples_support/syntax/js/shCore.js"></script>
<h1>Other examples</h1>
<div class="demo_links">
<h2>Basic initialisation</h2>
<ul>
<li><a href="../basic_init/zero_config.html">Zero configuration</a></li>
<li><a href="../basic_init/filter_only.html">Feature enablement</a></li>
<li><a href="../basic_init/table_sorting.html">Sorting data</a></li>
<li><a href="../basic_init/multi_col_sort.html">Multi-column sorting</a></li>
<li><a href="../basic_init/multiple_tables.html">Multiple tables</a></li>
<li><a href="../basic_init/hidden_columns.html">Hidden columns</a></li>
<li><a href="../basic_init/complex_header.html">Complex headers - grouping with colspan</a></li>
<li><a href="../basic_init/dom.html">DOM positioning</a></li>
<li><a href="../basic_init/flexible_width.html">Flexible table width</a></li>
<li><a href="../basic_init/state_save.html">State saving</a></li>
<li><a href="../basic_init/alt_pagination.html">Alternative pagination styles</a></li>
<li>Scrolling: <br>
<a href="../basic_init/scroll_x.html">Horizontal</a> /
<a href="../basic_init/scroll_y.html">Vertical</a> /
<a href="../basic_init/scroll_xy.html">Both</a> /
<a href="../basic_init/scroll_y_theme.html">Themed</a> /
<a href="../basic_init/scroll_y_infinite.html">Infinite</a>
</li>
<li><a href="../basic_init/language.html">Change language information (internationalisation)</a></li>
<li><a href="../basic_init/themes.html">ThemeRoller themes (Smoothness)</a></li>
</ul>
<h2>Advanced initialisation</h2>
<ul>
<li>Events: <br>
<a href="../advanced_init/events_live.html">Live events</a> /
<a href="../advanced_init/events_pre_init.html">Pre-init</a> /
<a href="../advanced_init/events_post_init.html">Post-init</a>
</li>
<li><a href="../advanced_init/column_render.html">Column rendering</a></li>
<li><a href="../advanced_init/html_sort.html">Sorting without HTML tags</a></li>
<li><a href="../advanced_init/dom_multiple_elements.html">Multiple table controls (sDom)</a></li>
<li><a href="../advanced_init/length_menu.html">Defining length menu options</a></li>
<li><a href="../advanced_init/complex_header.html">Complex headers and hidden columns</a></li>
<li><a href="../advanced_init/dom_toolbar.html">Custom toolbar (element) around table</a></li>
<li><a href="../advanced_init/highlight.html">Row highlighting with CSS</a></li>
<li><a href="../advanced_init/row_grouping.html">Row grouping</a></li>
<li><a href="../advanced_init/row_callback.html">Row callback</a></li>
<li><a href="../advanced_init/footer_callback.html">Footer callback</a></li>
<li><a href="../advanced_init/sorting_control.html">Control sorting direction of columns</a></li>
<li><a href="../advanced_init/language_file.html">Change language information from a file (internationalisation)</a></li>
<li><a href="../advanced_init/defaults.html">Setting defaults</a></li>
<li><a href="../advanced_init/localstorage.html">State saving with localStorage</a></li>
<li><a href="../advanced_init/dt_events.html">Custom events</a></li>
</ul>
<h2>API</h2>
<ul>
<li><a href="../api/add_row.html">Dynamically add a new row</a></li>
<li><a href="../api/multi_filter.html">Individual column filtering (using "input" elements)</a></li>
<li><a href="../api/multi_filter_select.html">Individual column filtering (using "select" elements)</a></li>
<li><a href="../api/highlight.html">Highlight rows and columns</a></li>
<li><a href="../api/row_details.html">Show and hide details about a particular record</a></li>
<li><a href="../api/select_row.html">User selectable rows (multiple rows)</a></li>
<li><a href="../api/select_single_row.html">User selectable rows (single row) and delete rows</a></li>
<li><a href="../api/editable.html">Editable rows (with jEditable)</a></li>
<li><a href="../api/form.html">Submit form with elements in table</a></li>
<li><a href="../api/counter_column.html">Index column (static number column)</a></li>
<li><a href="../api/show_hide.html">Show and hide columns dynamically</a></li>
<li><a href="../api/api_in_init.html">API function use in initialisation object (callback)</a></li>
<li><a href="../api/tabs_and_scrolling.html">DataTables scrolling and tabs</a></li>
<li><a href="../api/regex.html">Regular expression filtering</a></li>
</ul>
</div>
<div class="demo_links">
<h2>Data sources</h2>
<ul>
<li><a href="../data_sources/dom.html">DOM</a></li>
<li><a href="../data_sources/js_array.html">Javascript array</a></li>
<li><a href="../data_sources/ajax.html">Ajax source</a></li>
<li><a href="../data_sources/server_side.html">Server side processing</a></li>
</ul>
<h2>Server-side processing</h2>
<ul>
<li><a href="../server_side/server_side.html">Obtain server-side data</a></li>
<li><a href="../server_side/custom_vars.html">Add extra HTTP variables</a></li>
<li><a href="../server_side/post.html">Use HTTP POST</a></li>
<li><a href="../server_side/ids.html">Automatic addition of IDs and classes to rows</a></li>
<li><a href="../server_side/object_data.html">Reading table data from objects</a></li>
<li><a href="../server_side/row_details.html">Show and hide details about a particular record</a></li>
<li><a href="../server_side/select_rows.html">User selectable rows (multiple rows)</a></li>
<li><a href="../server_side/jsonp.html">JSONP for a cross domain data source</a></li>
<li><a href="../server_side/editable.html">jEditable integration with DataTables</a></li>
<li><a href="../server_side/defer_loading.html">Deferred loading of Ajax data</a></li>
<li><a href="../server_side/pipeline.html">Pipelining data (reduce Ajax calls for paging)</a></li>
</ul>
<h2>Ajax data source</h2>
<ul>
<li><a href="../ajax/ajax.html">Ajax sourced data (array of arrays)</a></li>
<li><a href="../ajax/objects.html">Ajax sourced data (array of objects)</a></li>
<li><a href="../ajax/defer_render.html">Deferred DOM creation for extra speed</a></li>
<li><a href="../ajax/null_data_source.html">Empty data source columns</a></li>
<li><a href="../ajax/custom_data_property.html">Use a data source other than aaData (the default)</a></li>
<li><a href="../ajax/objects_subarrays.html">Read column data from sub-arrays</a></li>
<li><a href="../ajax/deep.html">Read column data from deeply nested properties</a></li>
</ul>
<h2>Plug-ins</h2>
<ul>
<li><a href="../plug-ins/plugin_api.html">Add custom API functions</a></li>
<li><a href="../plug-ins/sorting_plugin.html">Sorting and automatic type detection</a></li>
<li><a href="../plug-ins/sorting_sType.html">Sorting without automatic type detection</a></li>
<li><a href="../plug-ins/paging_plugin.html">Custom pagination controls</a></li>
<li><a href="../plug-ins/range_filtering.html">Range filtering / custom filtering</a></li>
<li><a href="../plug-ins/dom_sort.html">Live DOM sorting</a></li>
<li><a href="../plug-ins/html_sort.html">Automatic HTML type detection</a></li>
</ul>
</div>
<div id="footer" class="clear" style="text-align:center;">
<p>
Please refer to the <a href="http://www.datatables.net/usage">DataTables documentation</a> for full information about its API properties and methods.<br>
Additionally, there are a wide range of <a href="http://www.datatables.net/extras">extras</a> and <a href="http://www.datatables.net/plug-ins">plug-ins</a> which extend the capabilities of DataTables.
</p>
<span style="font-size:10px;">
DataTables designed and created by <a href="http://www.sprymedia.co.uk">Allan Jardine</a> © 2007-2011<br>
DataTables is dual licensed under the <a href="http://www.datatables.net/license_gpl2">GPL v2 license</a> or a <a href="http://www.datatables.net/license_bsd">BSD (3-point) license</a>.
</span>
</div>
</div>
</body>
</html> | {
"pile_set_name": "Github"
} |
fileFormatVersion: 2
guid: 66a52916ecea64a89b5cada1bfac1a43
timeCreated: 1451239670
licenseType: Free
MonoImporter:
serializedVersion: 2
defaultReferences: []
executionOrder: 0
icon: {instanceID: 0}
userData:
assetBundleName:
assetBundleVariant:
| {
"pile_set_name": "Github"
} |
.. _tutorial-how-to-use-cases:
Use Cases
=========
After reading the Tutorial we recommend to read some of the following use cases:
.. toctree::
:glob:
:titlesonly:
*
| {
"pile_set_name": "Github"
} |
<?php
/**
* Copyright (C) 2015 Mohammad Heydari
* Copyright (C) 2012 Derek J. Lambert
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
namespace CrEOF\Spatial\ORM\Query\AST\Functions\MySql;
use CrEOF\Spatial\ORM\Query\AST\Functions\AbstractSpatialDQLFunction;
/**
* MBRTouches DQL function
* Whether MBRs of two geometries touch
*
* @author Mohammad Heydari <[email protected]>
* @license http://mdhheydari.mit-license.org MIT
*/
class MBRTouches extends AbstractSpatialDQLFunction
{
protected $platforms = array('mysql');
protected $functionName = 'MBRTouches';
protected $minGeomExpr = 2;
protected $maxGeomExpr = 2;
}
| {
"pile_set_name": "Github"
} |
*SPEF "IEEE 1481-1998"
*DESIGN "changes"
*DATE "Tue Dec 02 20:34:05 2014"
*VENDOR "TAU 2015 Contest"
*PROGRAM "Benchmark Parasitic Generator"
*VERSION "0.0"
*DESIGN_FLOW "NETLIST_TYPE_VERILOG"
*DIVIDER /
*DELIMITER :
*BUS_DELIMITER [ ]
*T_UNIT 1 PS
*C_UNIT 1 FF
*R_UNIT 1 KOHM
*L_UNIT 1 UH
*D_NET TAUNET_4 1.7439
*CONN
*I TAUGATE_6:Z O
*I TAUGATE_4:A I
*CAP
1 TAUGATE_6:Z 0.2611
2 TAUNET_4:1 0.4098
3 TAUNET_4:2 0.5009
4 TAUNET_4:3 0.2843
5 TAUGATE_4:A 0.2878
*RES
2 TAUGATE_6:Z TAUNET_4:1 0.1659
3 TAUNET_4:1 TAUNET_4:2 0.2742
4 TAUNET_4:2 TAUNET_4:3 0.3494
5 TAUNET_4:3 TAUGATE_4:A 0.1485
*END
*D_NET TAUNET_9 1.3760
*CONN
*I inst_2:ZN O
*I TAUGATE_9:A I
*CAP
1 inst_2:ZN 0.2110
2 TAUNET_9:1 0.3070
3 TAUNET_9:2 0.3583
4 TAUNET_9:3 0.1395
5 TAUNET_9:4 0.0646
6 TAUNET_9:5 0.0794
7 TAUGATE_9:A 0.2161
*RES
2 inst_2:ZN TAUNET_9:1 0.1614
3 TAUNET_9:1 TAUNET_9:2 0.3312
4 TAUNET_9:2 TAUNET_9:3 0.3755
5 TAUNET_9:3 TAUNET_9:4 0.4644
6 TAUNET_9:4 TAUNET_9:5 0.4559
7 TAUNET_9:5 TAUGATE_9:A 0.4050
*END
*D_NET TAUNET_5 0.4849
*CONN
*I TAUGATE_1:Z O
*I TAUGATE_5:A I
*CAP
1 TAUGATE_1:Z 0.1670
2 TAUNET_5:1 0.2440
3 TAUNET_5:2 0.0332
4 TAUGATE_5:A 0.0406
*RES
2 TAUGATE_1:Z TAUNET_5:1 0.2732
3 TAUNET_5:1 TAUNET_5:2 0.0073
4 TAUNET_5:2 TAUGATE_5:A 0.1308
*END
*D_NET net_2 1.8161
*CONN
*I TAUGATE_9:Z O
*I inst_4:A2 I
*CAP
1 TAUGATE_9:Z 0.2909
2 net_2:1 0.0740
3 net_2:2 0.4871
4 net_2:3 0.4951
5 net_2:4 0.4125
6 inst_4:A2 0.0565
*RES
2 TAUGATE_9:Z net_2:1 0.1285
3 net_2:1 net_2:2 0.0292
4 net_2:2 net_2:3 0.2926
5 net_2:3 net_2:4 0.4507
6 net_2:4 inst_4:A2 0.3356
*END
*D_NET TAUNET_8 1.9602
*CONN
*I TAUGATE_2:Z O
*I TAUGATE_8:A I
*CAP
1 TAUGATE_2:Z 0.3845
2 TAUNET_8:1 0.3056
3 TAUNET_8:2 0.3464
4 TAUNET_8:3 0.0305
5 TAUNET_8:4 0.2383
6 TAUNET_8:5 0.4020
7 TAUGATE_8:A 0.2529
*RES
2 TAUGATE_2:Z TAUNET_8:1 0.2665
3 TAUNET_8:1 TAUNET_8:2 0.4354
4 TAUNET_8:2 TAUNET_8:3 0.1977
5 TAUNET_8:3 TAUNET_8:4 0.0788
6 TAUNET_8:4 TAUNET_8:5 0.3276
7 TAUNET_8:5 TAUGATE_8:A 0.3833
*END
*D_NET nx6 0.8670
*CONN
*I TAUGATE_8:Z O
*I inst_0:A2 I
*CAP
1 TAUGATE_8:Z 0.0716
2 nx6:1 0.1922
3 nx6:2 0.2805
4 nx6:3 0.1322
5 inst_0:A2 0.1905
*RES
2 TAUGATE_8:Z nx6:1 0.4017
3 nx6:1 nx6:2 0.4680
4 nx6:2 nx6:3 0.2976
5 nx6:3 inst_0:A2 0.2331
*END
*D_NET TAUNET_1 1.3358
*CONN
*I TAUGATE_7:Z O
*I TAUGATE_1:A I
*CAP
1 TAUGATE_7:Z 0.3943
2 TAUNET_1:1 0.1955
3 TAUNET_1:2 0.1868
4 TAUNET_1:3 0.4889
5 TAUGATE_1:A 0.0703
*RES
2 TAUGATE_7:Z TAUNET_1:1 0.3298
3 TAUNET_1:1 TAUNET_1:2 0.4131
4 TAUNET_1:2 TAUNET_1:3 0.3310
5 TAUNET_1:3 TAUGATE_1:A 0.2621
*END
*D_NET net_0 0.9833
*CONN
*I TAUGATE_5:Z O
*I inst_5:A1 I
*CAP
1 TAUGATE_5:Z 0.1808
2 net_0:1 0.3990
3 inst_5:A1 0.4035
*RES
2 TAUGATE_5:Z net_0:1 0.0219
3 net_0:1 inst_5:A1 0.0830
*END
*D_NET TAUNET_6 1.6212
*CONN
*I inst_3:ZN O
*I TAUGATE_6:A I
*CAP
1 inst_3:ZN 0.1250
2 TAUNET_6:1 0.4976
3 TAUNET_6:2 0.3591
4 TAUNET_6:3 0.2780
5 TAUNET_6:4 0.2022
6 TAUGATE_6:A 0.1593
*RES
2 inst_3:ZN TAUNET_6:1 0.4382
3 TAUNET_6:1 TAUNET_6:2 0.3498
4 TAUNET_6:2 TAUNET_6:3 0.4055
5 TAUNET_6:3 TAUNET_6:4 0.4519
6 TAUNET_6:4 TAUGATE_6:A 0.4658
*END
*D_NET TAUNET_7 1.1222
*CONN
*I inst_1:ZN O
*I TAUGATE_7:A I
*CAP
1 inst_1:ZN 0.4019
2 TAUNET_7:1 0.2503
3 TAUGATE_7:A 0.4700
*RES
2 inst_1:ZN TAUNET_7:1 0.3789
3 TAUNET_7:1 TAUGATE_7:A 0.3545
*END
| {
"pile_set_name": "Github"
} |
/* Copyright JS Foundation and other contributors, http://js.foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "ecma-builtins.h"
#ifndef CONFIG_DISABLE_ES2015_TYPEDARRAY_BUILTIN
#define ECMA_BUILTINS_INTERNAL
#include "ecma-builtins-internal.h"
#define BUILTIN_INC_HEADER_NAME "ecma-builtin-uint16array-prototype.inc.h"
#define BUILTIN_UNDERSCORED_ID uint16array_prototype
#include "ecma-builtin-internal-routines-template.inc.h"
/** \addtogroup ecma ECMA
* @{
*
* \addtogroup ecmabuiltins
* @{
*
* \addtogroup uint16arrayprototype ECMA Uint16Array.prototype object built-in
* @{
*/
/**
* @}
* @}
* @}
*/
#endif /* !CONFIG_DISABLE_ES2015_TYPEDARRAY_BUILTIN */
| {
"pile_set_name": "Github"
} |
/*
* Author : K.F.Storm
* Email : yk000123 at sina.com
* Website : http://www.kfstorm.com
* */
using System;
using System.Collections.Generic;
namespace DoubanFM.Core
{
/// <summary>
/// 播放列表
/// </summary>
public class PlayList : List<Song>
{
/// <summary>
/// 当获取播放列表失败时发生。
/// </summary>
internal static event EventHandler<PlayListEventArgs> GetPlayListFailed;
static Random random = new Random();
static byte[] bytes = new byte[8];
private static void RaiseGetPlayListFailedEvent(string json)
{
if (GetPlayListFailed != null)
GetPlayListFailed(null, new PlayListEventArgs(json));
}
internal PlayList(Json.PlayList pl)
{
if (pl != null &&pl.song != null)
foreach (var song in pl.song)
{
this.Add(new Song(song));
}
}
/// <summary>
/// 获取播放列表
/// </summary>
/// <param name="playerState">播放器状态</param>
/// <param name="operationType">操作类型</param>
/// <returns>
/// 播放列表
/// </returns>
internal static PlayList GetPlayList(Player.PlayerState playerState, string operationType)
{
//构造链接
Parameters parameters = new Parameters();
parameters["app_name"] = "radio_desktop_win";
parameters["version"] = "100";
parameters["user_id"] = playerState.CurrentUser.UserID;
parameters["token"] = playerState.CurrentUser.Token;
parameters["expire"] = playerState.CurrentUser.Expire;
parameters["from"] = "mainsite";
parameters["context"] = playerState.CurrentChannel.Context;
parameters["sid"] = playerState.CurrentSong != null ? playerState.CurrentSong.SongId : null;
parameters["channel"] = playerState.CurrentChannel.Id;
parameters["type"] = operationType;
random.NextBytes(bytes);
parameters["r"] = (BitConverter.ToUInt64(bytes, 0) % 0xFFFFFFFFFF).ToString("x10");
if (playerState.CurrentUser.IsPro)
{
string kbps = null;
switch (playerState.CurrentUser.ProRate)
{
case ProRate.Kbps64:
kbps = "64";
break;
case ProRate.Kbps128:
kbps = "128";
break;
case ProRate.Kbps192:
kbps = "192";
break;
default:
break;
}
parameters["kbps"] = kbps;
}
string url = ConnectionBase.ConstructUrlWithParameters("http://www.douban.com/j/app/radio/people", parameters);
//获取列表
string json = new ConnectionBase().Get(url, @"application/json, text/javascript, */*; q=0.01", @"http://douban.fm");
var jsonPlayList = Json.JsonHelper.FromJson<Json.PlayList>(json);
if (jsonPlayList != null && jsonPlayList.r)
RaiseGetPlayListFailedEvent(json);
PlayList pl = new PlayList(jsonPlayList);
//将小图更换为大图
foreach (var s in pl)
{
s.Picture = s.Picture.Replace("/mpic/", "/lpic/").Replace("//otho.", "//img3.");
}
//去广告
pl.RemoveAll(s => s.IsAd);
return pl;
}
/// <summary>
/// 播放列表的事件参数
/// </summary>
public class PlayListEventArgs : EventArgs
{
/// <summary>
/// 消息
/// </summary>
public string Message { get; private set; }
internal PlayListEventArgs(string message)
{
Message = message;
}
}
}
}
| {
"pile_set_name": "Github"
} |
import json
import os
import numpy as np
import pytest
from pypfopt import EfficientFrontier
from pypfopt import exceptions
from tests.utilities_for_tests import get_data, setup_efficient_frontier
def test_custom_bounds():
ef = EfficientFrontier(
*setup_efficient_frontier(data_only=True), weight_bounds=(0.02, 0.13)
)
ef.min_volatility()
np.testing.assert_allclose(ef._lower_bounds, np.array([0.02] * ef.n_assets))
np.testing.assert_allclose(ef._upper_bounds, np.array([0.13] * ef.n_assets))
assert ef.weights.min() >= 0.02
assert ef.weights.max() <= 0.13
np.testing.assert_almost_equal(ef.weights.sum(), 1)
def test_custom_bounds_different_values():
bounds = [(0.01, 0.13), (0.02, 0.11)] * 10
ef = EfficientFrontier(
*setup_efficient_frontier(data_only=True), weight_bounds=bounds
)
ef.min_volatility()
assert (0.01 <= ef.weights[::2]).all() and (ef.weights[::2] <= 0.13).all()
assert (0.02 <= ef.weights[1::2]).all() and (ef.weights[1::2] <= 0.11).all()
np.testing.assert_almost_equal(ef.weights.sum(), 1)
bounds = ((0.01, 0.13), (0.02, 0.11)) * 10
assert EfficientFrontier(
*setup_efficient_frontier(data_only=True), weight_bounds=bounds
)
def test_weight_bounds_minus_one_to_one():
ef = EfficientFrontier(
*setup_efficient_frontier(data_only=True), weight_bounds=(-1, 1)
)
assert ef.max_sharpe()
assert ef.min_volatility()
def test_none_bounds():
ef = EfficientFrontier(
*setup_efficient_frontier(data_only=True), weight_bounds=(None, 0.3)
)
ef.min_volatility()
w1 = ef.weights
ef = EfficientFrontier(
*setup_efficient_frontier(data_only=True), weight_bounds=(-1, 0.3)
)
ef.min_volatility()
w2 = ef.weights
np.testing.assert_array_almost_equal(w1, w2)
def test_bound_input_types():
bounds = [0.01, 0.13]
ef = EfficientFrontier(
*setup_efficient_frontier(data_only=True), weight_bounds=bounds
)
assert ef
np.testing.assert_allclose(ef._lower_bounds, np.array([0.01] * ef.n_assets))
np.testing.assert_allclose(ef._upper_bounds, np.array([0.13] * ef.n_assets))
lb = np.array([0.01, 0.02] * 10)
ub = np.array([0.07, 0.2] * 10)
assert EfficientFrontier(
*setup_efficient_frontier(data_only=True), weight_bounds=(lb, ub)
)
bounds = ((0.01, 0.13), (0.02, 0.11)) * 10
assert EfficientFrontier(
*setup_efficient_frontier(data_only=True), weight_bounds=bounds
)
def test_bound_failure():
# Ensure optimisation fails when lower bound is too high or upper bound is too low
ef = EfficientFrontier(
*setup_efficient_frontier(data_only=True), weight_bounds=(0.06, 0.13)
)
with pytest.raises(exceptions.OptimizationError):
ef.min_volatility()
ef = EfficientFrontier(
*setup_efficient_frontier(data_only=True), weight_bounds=(0, 0.04)
)
with pytest.raises(exceptions.OptimizationError):
ef.min_volatility()
def test_bounds_errors():
assert EfficientFrontier(
*setup_efficient_frontier(data_only=True), weight_bounds=(0, 1)
)
with pytest.raises(TypeError):
EfficientFrontier(
*setup_efficient_frontier(data_only=True), weight_bounds=(0.06, 1, 3)
)
with pytest.raises(TypeError):
# Not enough bounds
bounds = [(0.01, 0.13), (0.02, 0.11)] * 5
EfficientFrontier(
*setup_efficient_frontier(data_only=True), weight_bounds=bounds
)
def test_clean_weights():
ef = setup_efficient_frontier()
ef.min_volatility()
number_tiny_weights = sum(ef.weights < 1e-4)
cleaned = ef.clean_weights(cutoff=1e-4, rounding=5)
cleaned_weights = cleaned.values()
clean_number_tiny_weights = sum(i < 1e-4 for i in cleaned_weights)
assert clean_number_tiny_weights == number_tiny_weights
# Check rounding
cleaned_weights_str_length = [len(str(i)) for i in cleaned_weights]
assert all([length == 7 or length == 3 for length in cleaned_weights_str_length])
def test_clean_weights_short():
ef = setup_efficient_frontier()
ef = EfficientFrontier(
*setup_efficient_frontier(data_only=True), weight_bounds=(-1, 1)
)
ef.min_volatility()
# In practice we would never use such a high cutoff
number_tiny_weights = sum(np.abs(ef.weights) < 0.05)
cleaned = ef.clean_weights(cutoff=0.05)
cleaned_weights = cleaned.values()
clean_number_tiny_weights = sum(abs(i) < 0.05 for i in cleaned_weights)
assert clean_number_tiny_weights == number_tiny_weights
def test_clean_weights_error():
ef = setup_efficient_frontier()
with pytest.raises(AttributeError):
ef.clean_weights()
ef.min_volatility()
with pytest.raises(ValueError):
ef.clean_weights(rounding=1.3)
with pytest.raises(ValueError):
ef.clean_weights(rounding=0)
assert ef.clean_weights(rounding=3)
def test_clean_weights_no_rounding():
ef = setup_efficient_frontier()
ef.min_volatility()
# ensure the call does not fail
# in previous commits, this call would raise a ValueError
cleaned = ef.clean_weights(rounding=None, cutoff=0)
assert cleaned
np.testing.assert_array_almost_equal(
np.sort(ef.weights), np.sort(list(cleaned.values()))
)
def test_efficient_frontier_init_errors():
df = get_data()
mean_returns = df.pct_change().dropna(how="all").mean()
with pytest.raises(TypeError):
EfficientFrontier("test", "string")
with pytest.raises(TypeError):
EfficientFrontier(mean_returns, mean_returns)
def test_set_weights():
ef = setup_efficient_frontier()
w1 = ef.min_volatility()
test_weights = ef.weights
ef.min_volatility()
ef.set_weights(w1)
np.testing.assert_array_almost_equal(test_weights, ef.weights)
def test_save_weights_to_file():
ef = setup_efficient_frontier()
ef.min_volatility()
ef.save_weights_to_file("tests/test.txt")
with open("tests/test.txt", "r") as f:
file = f.read()
parsed = json.loads(file.replace("'", '"'))
assert ef.clean_weights() == parsed
ef.save_weights_to_file("tests/test.json")
with open("tests/test.json", "r") as f:
parsed = json.load(f)
assert ef.clean_weights() == parsed
os.remove("tests/test.txt")
os.remove("tests/test.json")
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>org.jolie-lang</groupId>
<artifactId>distribution</artifactId>
<relativePath>../pom.xml</relativePath>
<version>1.0.0</version>
</parent>
<groupId>org.jolie-lang</groupId>
<artifactId>test</artifactId>
<version>${jolie.version}</version>
<packaging>pom</packaging>
<name>test</name>
<description>The Jolie interpreter test suite.</description>
<profiles>
<profile>
<id>Windows</id>
<activation>
<os>
<family>Windows</family>
</os>
</activation>
<properties>
<jolie.launcher>${project.parent.basedir}\${jolie.installation.directory.executable}\windows\jolie.bat</jolie.launcher>
<jolie.home>${project.parent.basedir}\${jolie.installation.directory}</jolie.home>
<test.dir>${project.parent.basedir}\test</test.dir>
</properties>
</profile>
<profile>
<id>unix</id>
<activation>
<os>
<family>unix</family>
</os>
</activation>
<properties>
<jolie.launcher>${project.parent.basedir}/${jolie.installation.directory.executable}/unix/jolie</jolie.launcher>
<jolie.home>${project.parent.basedir}/${jolie.installation.directory}</jolie.home>
<test.dir>${project.parent.basedir}/test</test.dir>
</properties>
</profile>
</profiles>
<modules>
<module>extensions/private/WS-test</module>
</modules>
<build>
<plugins>
<plugin>
<groupId>org.codehaus.mojo</groupId>
<artifactId>exec-maven-plugin</artifactId>
<version>1.2.1</version>
<executions>
<execution>
<id>test</id>
<phase>install</phase>
<goals>
<goal>exec</goal>
</goals>
</execution>
</executions>
<configuration>
<environmentVariables>
<JOLIE_HOME>${jolie.home}</JOLIE_HOME>
</environmentVariables>
<workingDirectory>${test.dir}</workingDirectory>
<executable>${jolie.launcher}</executable>
<arguments>
<argument>--stackTraces</argument>
<argument>test.ol</argument>
</arguments>
</configuration>
</plugin>
</plugins>
</build>
<dependencies>
<!-- Basic libs -->
<dependency>
<groupId>${project.groupId}</groupId>
<artifactId>libjolie</artifactId>
<version>${jolie.version}</version>
</dependency>
<dependency>
<groupId>${project.groupId}</groupId>
<artifactId>jolie</artifactId>
<version>${jolie.version}</version>
</dependency>
<!-- Libraries -->
<dependency>
<groupId>${project.groupId}</groupId>
<artifactId>jolie-js</artifactId>
<version>${jolie.version}</version>
</dependency>
<dependency>
<groupId>${project.groupId}</groupId>
<artifactId>jolie-ssl</artifactId>
<version>${jolie.version}</version>
</dependency>
<dependency>
<groupId>${project.groupId}</groupId>
<artifactId>jolie-xml</artifactId>
<version>${jolie.version}</version>
</dependency>
<dependency>
<groupId>${project.groupId}</groupId>
<artifactId>xsom</artifactId>
<version>${jolie.version}</version>
</dependency>
<!-- Extensions -->
<dependency>
<groupId>${project.groupId}</groupId>
<artifactId>auto</artifactId>
<version>${jolie.version}</version>
</dependency>
<dependency>
<groupId>${project.groupId}</groupId>
<artifactId>btl2cap</artifactId>
<version>${jolie.version}</version>
</dependency>
<dependency>
<groupId>${project.groupId}</groupId>
<artifactId>http</artifactId>
<version>${jolie.version}</version>
</dependency>
<dependency>
<groupId>${project.groupId}</groupId>
<artifactId>https</artifactId>
<version>${jolie.version}</version>
</dependency>
<dependency>
<groupId>${project.groupId}</groupId>
<artifactId>javascript</artifactId>
<version>${jolie.version}</version>
</dependency>
<dependency>
<groupId>${project.groupId}</groupId>
<artifactId>jsonrpc</artifactId>
<version>${jolie.version}</version>
</dependency>
<dependency>
<groupId>${project.groupId}</groupId>
<artifactId>local</artifactId>
<version>${jolie.version}</version>
</dependency>
<dependency>
<groupId>${project.groupId}</groupId>
<artifactId>localsocket</artifactId>
<version>${jolie.version}</version>
</dependency>
<dependency>
<groupId>${project.groupId}</groupId>
<artifactId>rmi</artifactId>
<version>${jolie.version}</version>
</dependency>
<dependency>
<groupId>${project.groupId}</groupId>
<artifactId>soap</artifactId>
<version>${jolie.version}</version>
</dependency>
<dependency>
<groupId>${project.groupId}</groupId>
<artifactId>soaps</artifactId>
<version>${jolie.version}</version>
</dependency>
<dependency>
<groupId>${project.groupId}</groupId>
<artifactId>sodep</artifactId>
<version>${jolie.version}</version>
</dependency>
<dependency>
<groupId>${project.groupId}</groupId>
<artifactId>sodeps</artifactId>
<version>${jolie.version}</version>
</dependency>
<dependency>
<groupId>${project.groupId}</groupId>
<artifactId>xmlrpc</artifactId>
<version>${jolie.version}</version>
</dependency>
<!-- Support -->
<dependency>
<groupId>${project.groupId}</groupId>
<artifactId>jolie-java</artifactId>
<version>${jolie.version}</version>
</dependency>
<dependency>
<groupId>${project.groupId}</groupId>
<artifactId>metaservice-java</artifactId>
<version>${jolie.version}</version>
</dependency>
<!-- Tools -->
<dependency>
<groupId>${project.groupId}</groupId>
<artifactId>jolie2java</artifactId>
<version>${jolie.version}</version>
</dependency>
<dependency>
<groupId>${project.groupId}</groupId>
<artifactId>jolie2plasma</artifactId>
<version>${jolie.version}</version>
</dependency>
<dependency>
<groupId>${project.groupId}</groupId>
<artifactId>jolie2surface</artifactId>
<version>${jolie.version}</version>
</dependency>
<dependency>
<groupId>${project.groupId}</groupId>
<artifactId>jolie2wsdl</artifactId>
<version>${jolie.version}</version>
</dependency>
<dependency>
<groupId>${project.groupId}</groupId>
<artifactId>joliec</artifactId>
<version>${jolie.version}</version>
</dependency>
<!-- Java services -->
<dependency>
<groupId>${project.groupId}</groupId>
<artifactId>coreJavaServices</artifactId>
<version>${jolie.version}</version>
</dependency>
<dependency>
<groupId>${project.groupId}</groupId>
<artifactId>monitorJavaServices</artifactId>
<version>${jolie.version}</version>
</dependency>
</dependencies>
</project>
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="UTF-8" ?>
<class xmlns="http://xml.phpdox.net/src" full="mysql_xdevapi\ColumnResult" namespace="mysql_xdevapi" name="ColumnResult">
<method name="getCharacterSetName" abstract="false" static="false" visibility="public" final="false">
<docblock>
<description compact=""/>
<return type="string"/>
</docblock>
</method>
<method name="getCollationName" abstract="false" static="false" visibility="public" final="false">
<docblock>
<description compact=""/>
<return type="string"/>
</docblock>
</method>
<method name="getColumnLabel" abstract="false" static="false" visibility="public" final="false">
<docblock>
<description compact=""/>
<return type="string"/>
</docblock>
</method>
<method name="getColumnName" abstract="false" static="false" visibility="public" final="false">
<docblock>
<description compact=""/>
<return type="string"/>
</docblock>
</method>
<method name="getFractionalDigits" abstract="false" static="false" visibility="public" final="false">
<docblock>
<description compact=""/>
<return type="integer"/>
</docblock>
</method>
<method name="getLength" abstract="false" static="false" visibility="public" final="false">
<docblock>
<description compact=""/>
<return type="integer"/>
</docblock>
</method>
<method name="getSchemaName" abstract="false" static="false" visibility="public" final="false">
<docblock>
<description compact=""/>
<return type="string"/>
</docblock>
</method>
<method name="getTableLabel" abstract="false" static="false" visibility="public" final="false">
<docblock>
<description compact=""/>
<return type="string"/>
</docblock>
</method>
<method name="getTableName" abstract="false" static="false" visibility="public" final="false">
<docblock>
<description compact=""/>
<return type="string"/>
</docblock>
</method>
<method name="getType" abstract="false" static="false" visibility="public" final="false">
<docblock>
<description compact=""/>
<return type="integer"/>
</docblock>
</method>
<method name="isNumberSigned" abstract="false" static="false" visibility="public" final="false">
<docblock>
<description compact=""/>
<return type="integer"/>
</docblock>
</method>
<method name="isPadded" abstract="false" static="false" visibility="public" final="false">
<docblock>
<description compact=""/>
<return type="integer"/>
</docblock>
</method>
</class> | {
"pile_set_name": "Github"
} |
/** @license ISC License (c) copyright 2017 original and current authors */
/** @author Ian Hofmann-Hicks (evil) */
const hasAlg = require('./hasAlg')
/** isContravariant :: a -> Boolean */
function isContravariant(m) {
return !!m && hasAlg('contramap', m)
}
module.exports = isContravariant
| {
"pile_set_name": "Github"
} |
#tb 0: 1/22050
#media_type 0: audio
#codec_id 0: pcm_s16le
#sample_rate 0: 22050
#channel_layout 0: 4
0, 0, 0, 23620, 47240, 0x9974897c
0, 23620, 23620, 1564, 3128, 0x7e4064b4
0, 25184, 25184, 1564, 3128, 0x80883301
0, 26748, 26748, 1568, 3136, 0x2ad2d341
0, 28316, 28316, 1564, 3128, 0xda8468e3
0, 29880, 29880, 1568, 3136, 0x9d6f6cdf
0, 31448, 31448, 1564, 3128, 0x1aaa64b5
0, 33012, 33012, 1564, 3128, 0x9182728b
0, 34576, 34576, 1568, 3136, 0xfa8e17b3
0, 36144, 36144, 1564, 3128, 0x0dc3c1cf
0, 37708, 37708, 1568, 3136, 0x0109639d
0, 39276, 39276, 1564, 3128, 0x6d8a12d9
0, 40840, 40840, 1564, 3128, 0x4b9a9597
0, 42404, 42404, 1568, 3136, 0x9112710e
0, 43972, 43972, 1564, 3128, 0x8cccf522
0, 45536, 45536, 1564, 3128, 0x6594bbf3
0, 47100, 47100, 1568, 3136, 0xd878a7d5
0, 48668, 48668, 1564, 3128, 0xaa6e3905
0, 50232, 50232, 1568, 3136, 0x2a062e04
0, 51800, 51800, 1564, 3128, 0x84e4006a
0, 53364, 53364, 1564, 3128, 0x85183633
0, 54928, 54928, 1568, 3136, 0xb62d4b02
0, 56496, 56496, 1564, 3128, 0xe209462a
0, 58060, 58060, 1568, 3136, 0x57c4824b
0, 59628, 59628, 1564, 3128, 0x664a9163
0, 61192, 61192, 1564, 3128, 0xb4287874
0, 62756, 62756, 1568, 3136, 0xde626885
0, 64324, 64324, 1564, 3128, 0x919763c2
0, 65888, 65888, 1564, 3128, 0xa4f664e1
0, 67452, 67452, 1568, 3136, 0xa0bab0d4
0, 69020, 69020, 1564, 3128, 0xe938939c
0, 70584, 70584, 1568, 3136, 0x3679bfc7
0, 72152, 72152, 1564, 3128, 0xc96c55c3
0, 73716, 73716, 1564, 3128, 0x119114d6
0, 75280, 75280, 1568, 3136, 0x42f3800f
0, 76848, 76848, 1564, 3128, 0x4250c4ad
0, 78412, 78412, 1568, 3136, 0x5cdd4925
0, 79980, 79980, 1564, 3128, 0xa4c12360
0, 81544, 81544, 1564, 3128, 0x849f48de
0, 83108, 83108, 1568, 3136, 0x6acd8ff9
0, 84676, 84676, 1564, 3128, 0xb2758556
0, 86240, 86240, 1564, 3128, 0x10f2fcb1
0, 87804, 87804, 1568, 3136, 0xf0f02b23
0, 89372, 89372, 1564, 3128, 0x64f759c6
0, 90936, 90936, 1568, 3136, 0x7ec075e3
0, 92504, 92504, 1564, 3128, 0xf981d51e
0, 94068, 94068, 1564, 3128, 0xc622e8b9
0, 95632, 95632, 1568, 3136, 0xf632e2f8
0, 97200, 97200, 1564, 3128, 0xda561864
0, 98764, 98764, 1568, 3136, 0x14d2e888
0, 100332, 100332, 1564, 3128, 0x015bb869
0, 101896, 101896, 1564, 3128, 0xedb1fb62
0, 103460, 103460, 1568, 3136, 0xe0560c41
0, 105028, 105028, 1564, 3128, 0x14773c9a
0, 106592, 106592, 1568, 3136, 0x850f1c82
0, 108160, 108160, 1564, 3128, 0xb0bd5347
0, 109724, 109724, 1564, 3128, 0x8f82edbf
0, 111288, 111288, 1568, 3136, 0x493abee2
0, 112856, 112856, 1564, 3128, 0xf5daff3f
0, 114420, 114420, 1564, 3128, 0x78ad2690
0, 115984, 115984, 1568, 3136, 0x490ebafc
0, 117552, 117552, 1564, 3128, 0x70333fd2
0, 119116, 119116, 1568, 3136, 0x8cb1c350
0, 120684, 120684, 1564, 3128, 0x8bd057cb
0, 122248, 122248, 1564, 3128, 0x161b3dbc
0, 123812, 123812, 1568, 3136, 0xb47fb88a
0, 125380, 125380, 1564, 3128, 0x474b381e
0, 126944, 126944, 1568, 3136, 0x07c519bb
0, 128512, 128512, 1564, 3128, 0x15b916c8
0, 130076, 130076, 1564, 3128, 0x0ed7f6fb
0, 131640, 131640, 1568, 3136, 0x54d6397b
0, 133208, 133208, 1564, 3128, 0x437242bb
0, 134772, 134772, 1564, 3128, 0x38f05c4d
0, 136336, 136336, 1568, 3136, 0x5d000e59
0, 137904, 137904, 1564, 3128, 0xdeab2d04
0, 139468, 139468, 1568, 3136, 0x77de6880
0, 141036, 141036, 1564, 3128, 0xbc87ef25
0, 142600, 142600, 1564, 3128, 0xc1638ade
0, 144164, 144164, 1568, 3136, 0xcfb64a5f
0, 145732, 145732, 1564, 3128, 0x90b1b826
0, 147296, 147296, 1568, 3136, 0x00000000
0, 148864, 148864, 1564, 3128, 0x00000000
0, 150428, 150428, 1564, 3128, 0x00000000
0, 151992, 151992, 1568, 3136, 0x00000000
0, 153560, 153560, 1564, 3128, 0x00000000
0, 155124, 155124, 1428, 2856, 0x00000000
| {
"pile_set_name": "Github"
} |
const { expect } = require('chai');
const { StreamingHttpClient } = require('../../lib');
describe('StreamingHttpClient', function() {
this.timeout(3000);
it('should construct when provided a server', () => {
const server = { isConnected: true };
const client = new StreamingHttpClient(server);
expect(client.server).to.equal(server);
});
it('should throw an error if missing the "server" parameter', () => {
try {
new StreamingHttpClient();
} catch (err) {
expect(err.message).to.contain('StreamingHttpClient: Expected server.');
}
});
it('should throw an error on sendRequest if missing "httpRequest" parameter', async () => {
const client = new StreamingHttpClient({});
try {
await client.sendRequest();
} catch (err) {
expect(err).to.be.instanceOf(Error);
expect(err.message).to.contain('StreamingHttpClient.sendRequest(): missing "httpRequest" parameter');
}
});
it('should throw an error on sendRequest if internal server is not connected', async () => {
const client = new StreamingHttpClient({});
try {
await client.sendRequest({});
} catch (err) {
expect(err).to.be.instanceOf(Error);
expect(err.message).to.contain('StreamingHttpClient.sendRequest(): Streaming connection is disconnected');
}
});
});
| {
"pile_set_name": "Github"
} |
## Spark Miscellaneous - Info, commands and tips
- Workload profile with [sparkMeasure](Spark_Performace_Tool_sparkMeasure.md)
```
bin/spark-shell --packages ch.cern.sparkmeasure:spark-measure_2.11:0.15
val stageMetrics = ch.cern.sparkmeasure.StageMetrics(spark)
stageMetrics.runAndMeasure(spark.sql("select count(*) from range(1000) cross join range(1000)").show)
```
---
- Build Spark Session from API
```
// Scala
import org.apache.spark.sql._
val spark = SparkSession.
builder().
appName("my app").
master("local[*]"). // use master("yarn") for a YARN cluster
config("spark.driver.memory","2g"). // set all the parameters as needed
getOrCreate()
# Python
from pyspark.sql import SparkSession
spark = SparkSession.builder \
.appName("my app") \
.master("yarn") \
.config("spark.driver.memory","8g") \
.config("spark.executor.memory","14g") \
.config("spark.executor.cores","4") \
.config("spark.executor.instances","8") \
.config("spark.dynamicAllocation.enabled","false") \
.getOrCreate()
```
---
- Spark commit and PRs, see what's new
- Spark commits to master: https://github.com/apache/spark/commits/master
- Spark PRs: https://spark-prs.appspot.com/
- Documentation:
- https://github.com/apache/spark/tree/master/docs
- https://spark.apache.org/docs/latest/
- SQL grammar https://github.com/apache/spark/blob/master/sql/catalyst/src/main/antlr4/org/apache/spark/sql/catalyst/parser/SqlBase.g4
- https://docs.databricks.com/index.html
---
- How to build Spark
- see also https://spark.apache.org/docs/latest/building-spark.html
```
git clone https://github.com/apache/spark.git
cd spark
# export MAVEN_OPTS="-Xmx2g -XX:ReservedCodeCacheSize=512m"
./dev/make-distribution.sh --name custom-spark --tgz --pip -Phadoop-2.7 -Phive -Pyarn -Pkubernetes
# Compile for a specific Hadoop version, for example use this to compile for Hadoop 3.2
./dev/make-distribution.sh --name custom-spark --tgz --pip -Phadoop-3.2 -Pyarn -Pkubernetes
# old versions: ./dev/make-distribution.sh --name custom-spark --tgz --pip -Phadoop-2.7 -Dhadoop.version=3.2.0 -Pyarn -Pkubernetes
# Compile Spark 3.0 with Hadoop 3.2.1, this currently requires a workaround for Guava vesion compatibility
./dev/make-distribution.sh --name custom_spark --pip --tgz -Pyarn -Pkubernetes -Phadoop-3.2 -Dhadoop.version=3.2.1 -Dguava.version=27.0-jre
# compile a version with cherry-picked changes
# git checkout branch-2.3
# git cherry-pick xxxx
```
---
- Spark executor plugins
Executor plugins allow to run custom code at Executor startup. The y can be useful to extend instrumentation/monitoring and for
advanced configuration. See examples at:
- [Spark executo plugin for Spark 2.4](https://github.com/cerndb/SparkExecutorPlugins2.4)
- [Spark executor plugin for Spark 3.0](https://github.com/cerndb/SparkExecutorPlugins)
---
- Spark configuration
configuration files are: in SPARK_CONF_DIR (defaults SPARK_HOME/conf)
```Scala
// get configured parameters from running Spark Session with
spark.conf.getAll.foreach(println)
// get list of driver and executors from Spark Context:
sc.getExecutorMemoryStatus.foreach(println)
```
```
# PySpark
from pyspark.conf import SparkConf
conf = SparkConf()
print(conf.toDebugString())
```
---
- Read and set configuration variables of Hadoop environment from Spark.
Note this code works with the local JVM, i.e. the driver (will not read/write on executors's JVM)
```
// Scala:
sc.hadoopConfiguration.get("dfs.blocksize")
sc.hadoopConfiguration.getValByRegex(".").toString.split(", ").sorted.foreach(println)
sc.hadoopConfiguration.setInt("parquet.block.size", 256*1024*1024)
```
```
# PySpark
sc._jsc.hadoopConfiguration().get("dfs.blocksize")
sc.hadoopConfiguration.set(key,value)
```
---
- Read filesystem statistics from all registered filesystem in Hadoop (notably HDFS and local, also s3a if used).
Note: this code reports statistics for the local JVM, i.e. the driver (will not read stats from executors)
Note: when using this programmatically, use `org.apache.hadoop.fs.FileSystem.getAllStatistics`,
`org.apache.hadoop.fs.FileSystem.getStatistics` also works. These options are being/have been deprecated.
See also extended statistics with the API getGlobalStorageStatistics example below.
```
scala> org.apache.hadoop.fs.FileSystem.printStatistics()
FileSystem org.apache.hadoop.hdfs.DistributedFileSystem: 0 bytes read, 4130784 bytes written, 1 read ops, 0 large read ops, 3 write ops
FileSystem org.apache.hadoop.fs.s3a.S3AFileSystem: 23562931 bytes read, 0 bytes written, 14591 read ops, 0 large read ops, 0 write ops
FileSystem org.apache.hadoop.fs.RawLocalFileSystem: 0 bytes read, 0 bytes written, 0 read ops, 0 large read ops, 0 write ops
```
- Read extended filesystem statistics, applies to Hadoop 2.8.0 and higher.
```scala
val stats=org.apache.hadoop.fs.FileSystem.getGlobalStorageStatistics.iterator
stats.forEachRemaining {entry =>
println(s"Stats for scheme: ${entry.getScheme}")
entry.getLongStatistics.forEachRemaining(println)
println
}
```
}
---
How to use the Spark Scala REPL to access Hadoop Filesystem API. Example for HDFS and s3a metrics:
```
// get Hadoop filesystem object
val fs = org.apache.hadoop.fs.FileSystem.get(sc.hadoopConfiguration)
// alternative:
val fs = org.apache.hadoop.fs.FileSystem.get(spark.sessionState.newHadoopConf)
//get local filesystem
val fslocal = org.apache.hadoop.fs.FileSystem.getLocal(spark.sessionState.newHadoopConf)
// get S3A Filesystem,
// Use if you want to read metrics for s3a stats (or another Hadoop compatible filesystem)
val fullPathUri = java.net.URI.create("s3a://luca/")
val fs = org.apache.hadoop.fs.FileSystem.get(fullPathUri, spark.sessionState.newHadoopConf)
// alternative:
val fs = org.apache.hadoop.fs.FileSystem.get(fullPathUri,sc.hadoopConfiguration).asInstanceOf[org.apache.hadoop.fs.s3a.S3AFileSystem]
// Note, in the case of S3A/Hadoop v2.8.0 or higher this prints extended filesystem stats and S3A instrumentation values:
print(fs.toString)
// List of available statistics
fs.getStorageStatistics.forEach(println)
fs.getStorageStatistics.getLongStatistics.forEachRemaining(println)
// Get a single metric value:
fs.getInstrumentation.getCounterValue("stream_bytes_read")
fs.getStorageStatistics.getLong("stream_bytes_read")
// Similarly for HDFS you can use this to explicitly cast to HDFS Client class:
val fullPathUri = java.net.URI.create("hdfs://myHDFSCLuster/")
val fs = org.apache.hadoop.fs.FileSystem.get(fullPathUri,sc.hadoopConfiguration).asInstanceOf[org.apache.hadoop.hdfs.DistributedFileSystem]
// get file status
fs.getFileStatus(new org.apache.hadoop.fs.Path("<file_path>"))
scala> fs.getFileStatus(new org.apache.hadoop.fs.Path("<file_path>")).toString.split("; ").foreach(println)
FileStatus{path=hdfs://cluster/user/myusername/cms-dataset-20/20005/DE909CD0-F878-E211-AB7A-485B398971EA.root
isDirectory=false
length=2158964874
replication=3
blocksize=268435456
modification_time=1542653647906
access_time=1543245001357
owner=myusername
group=supergroup
permission=rw-r--r--
isSymlink=false}
fs.getBlockSize(new org.apache.hadoop.fs.Path("<file_path>"))
fs.getLength(new org.apache.hadoop.fs.Path("<file_path>"))
// get block map
scala> fs.getFileBlockLocations(new org.apache.hadoop.fs.Path("<file_path>"), 0L, 2000000000000000L).foreach(println)
0,268435456,host1.cern.ch,host2.cern.ch,host3.cern.ch
268435456,268435456,host4.cern.ch,host5.cern.ch,host6.cern.ch
...
```
---
Example analysis of Hadoop file data block locations using Spark SQL
```
bin/spark-shell
// get filesystem object
val fs = org.apache.hadoop.fs.FileSystem.get(sc.hadoopConfiguration)
// get blocks list (with replicas)
val l1=fs.getFileBlockLocations(new org.apache.hadoop.fs.Path("mydataset-20/20005/myfile1.parquet.snappy"), 0L, 2000000000000000L)
// transform into a Spark Dataframe
l1.flatMap(x => x.getHosts).toList.toDF("hostname").createOrReplaceTempView("filemap")
// query
spark.sql("select hostname, count(*) from filemap group by hostname").show
+-----------------+--------+
| hostname|count(1)|
+-----------------+--------+
|mynode01.cern.ch| 5|
|mynode12.cern.ch| 4|
|mynode02.cern.ch| 4|
|mynode08.cern.ch| 3|
|mynode06.cern.ch| 6|
|... | |
+-----------------+--------+
```
---
- Print Properties
```
println(System.getProperties)
System.getProperties.toString.split(',').map(_.trim).foreach(println)
```
---
- Spark SQL execution plan, explain cost and code generation
```
sql("select count(*) from range(10) cross join range(10)").explain(true)
sql("explain select count(*) from range(10) cross join range(10)").collect.foreach(println)
// CBO
sql("explain cost select count(*) from range(10) cross join range(10)").collect.foreach(println)
// Print Code generation
sql("select count(*) from range(10) cross join range(10)").queryExecution.debug.codegen
sql("explain codegen select count(*) from range(10) cross join range(10)").collect.foreach(println)
for longer plans:
df.queryExecution.debug.codegenToSeq -> dumps to sequence of strings
df.queryExecution.debug.toFile -> dumps to filesystem file
// New in Spark 3.0, explain foramtted
sql("explain formatted select count(*) from range(10) cross join range(10)").collect.foreach(println)
```
---
- Spark SQL measure time spent in query plan parsing and optimization (Spark 3.0)
```
scala> val df=sql("select 1")
scala> df.queryExecution.tracker.
measureTime phases recordRuleInvocation rules topRulesByTime
scala> sql("select 1").queryExecution.tracker.
measurePhase phases recordRuleInvocation rules topRulesByTime
scala> sql("select 1").queryExecution.tracker.phases
scala> df.queryExecution.tracker.phases
resX: Map[String,org.apache.spark.sql.catalyst.QueryPlanningTracker.PhaseSummary] = Map(planning -> PhaseSummary(1547411782661, 1547411782824), optimization -> PhaseSummary(1547411782509, 1547411782648), parsing -> PhaseSummary(1547411764974, 1547411765852), analysis -> PhaseSummary(1547411765854, 1547411766069))
```
---
- Table and column statistics
Examples: as preparation create test tables and views
```
sql("create view v1 as select id, 't1' from range(10)") // vew in the default db namespace
sql("cache table my_cachedquery1 as select id, 'v2' from range(10)") //temporary table
sql("create table t1 as select id, 't1' from range(10)") // this requires hive support
```
Display catalog info:
```
spark.catalog.listDatabases.show(false)
spark.catalog.listTables.show(false)
```
Compute statistics on tables and cached views:
```
sql("analyze table t1 compute statistics")
// new in Spark 3.0, stats can be collected for cached views
sql("cache lazy table v1")
sql("analyze table v1 compute statistics")
```
Display table/view stats:
```
spark.table("v1").queryExecution.optimizedPlan.stats
spark.table("v1").queryExecution.stringWithStats
sql("explain cost select * from v1").show(false)
```
Compute and display column stats on tables, cached queries and cached views:
```
sql("analyze table t1 compute statistics for all columns")
// Spark 3, allows to compute column stats on cached views in addition
// to computing table defined in Hive metastore
sql("analyze table my_cachedquery1 compute statistics for all columns")
sql("analyze table v1 compute statistics for all columns")
spark.table("t1").queryExecution.optimizedPlan.stats.attributeStats
spark.table("my_cachedquery1").queryExecution.optimizedPlan.stats.attributeStats
spark.table("v1").queryExecution.optimizedPlan.stats.attributeStats
spark.table("t1").queryExecution.optimizedPlan.stats.attributeStats.foreach{case (k, v) => println(s"[$k]: $v")}
[id#0L]: ColumnStat(Some(10),Some(0),Some(9),Some(0),Some(8),Some(8),None,2)
```
Table statistics and column statistics histograms
```
sql("SET spark.sql.cbo.enabled=true")
sql("SET spark.sql.statistics.histogram.enabled=true")
spark.range(1000).selectExpr("id % 33 AS c0", "rand() AS c1", "0 AS c2").write.saveAsTable("t")
sql("ANALYZE TABLE t COMPUTE STATISTICS FOR COLUMNS c0, c1, c2")
spark.table("t").groupBy("c0").agg(count("c1").as("v1"), sum("c2").as("v2")).createTempView("temp")
spark.table("t").queryExecution.optimizedPlan.stats.attributeStats.foreach{case (k, v) => println(s"[$k]: $v")}
[c0#24320L]: ColumnStat(Some(33),Some(0),Some(32),Some(0),Some(8),Some(8),Some(Histogram(3.937007874015748,[Lorg.apache.spark.sql.catalyst.plans.logical.HistogramBin;@77c9db55)),2)
[c1#24321]: ColumnStat(Some(896),Some(7.45430597672625E-4),Some(0.9986498874940231),Some(0),Some(8),Some(8),Some(Histogram(3.937007874015748,[Lorg.apache.spark.sql.catalyst.plans.logical.HistogramBin;@258f3e5)),2)
[c2#24322]: ColumnStat(Some(1),Some(0),Some(0),Some(0),Some(4),Some(4),Some(Histogram(3.937007874015748,[Lorg.apache.spark.sql.catalyst.plans.logical.HistogramBin;@45f675a4)),2)
spark.table("temp").queryExecution.optimizedPlan.stats.attributeStats.foreach{case (k, v) => println(s"[$k]: $v")}
[c0#12161L]: ColumnStat(Some(33),Some(0),Some(32),Some(0),Some(8),Some(8),Some(Histogram(3.937007874015748,[Lorg.apache.spark.sql.catalyst.plans.logical.HistogramBin;@4d6cfa5)),2)
```
---
- Example command line for spark-shell/pyspark/spark-submit on YARN
`spark-shell --master yarn --num-executors 5 --executor-cores 4 --executor-memory 7g --driver-memory 7g`
---
- Basic Scala methods to trigger actions for testing
This fetches the output and discards
```
sql("select id from range(10)").show
sql("select id from range(10)").collect
sql("select id from range(10)").foreach(_ => ()) // discards output
```
---
- Specify JAVA_HOME to use when running Spark on a YARN cluster
```
export JAVA_HOME=/usr/lib/jvm/myJAvaHome # this is the JAVA_HOME of the driver
bin/spark-shell --conf spark.yarn.appMasterEnv.JAVA_HOME=/usr/lib/jvm/myJAvaHome --conf spark.executorEnv.JAVA_HOME=/usr/lib/jvm/myJAvaHome
```
---
- Run Pyspark on a Jupyter notebook
```
export PYSPARK_DRIVER_PYTHON=jupyter-notebook
# export PYSPARK_DRIVER_PYTHON=jupyter-lab
export PYSPARK_DRIVER_PYTHON_OPTS="--ip=`hostname` --no-browser --port=8888"
pyspark ...<add options here>
```
---
- Python UDF and pandas_udf, examples and tests
Examples of udf and pandas_udf (of type SCALAR) using Spark SQL.
Note: time.sleep is introduced for testing purposes
```python
def slowf(s):
for i in range(10000):
a = 2**i
return a
import time
def slowf(s):
for i in range(10000):
a = 2**i
time.sleep(10)
return a
spark.udf.register("slowf", slowf)
sql("select slowf(1)").show()
sql("select avg(slowf(id)) from range(1000)").show()
```
```python
import pandas as pd
import time
from pyspark.sql.functions import col, pandas_udf
from pyspark.sql.types import LongType
def multiply_func(a, b):
time.sleep(10)
return a * b
multiply = pandas_udf(multiply_func, returnType=LongType())
spark.udf.register("multiply_func", multiply)
time.time()
sql("select multiply_func(1,1)").show()
time.time()
# By default pandas_udf batch 10000 rows (for each concurrently executing task)
# You expect that the execution time for 10k rows is the same as for 1 row for this example
time.time()
sql("select avg(multiply_func(id,2)) from range(10000)").show()
time.time()
```
Example without registering pandas_udf as SQL function
```python
df = sql("select cast(1.0 as double) col1, rand(42) col2, Array(rand(42),rand(42),rand(42)) col3 from range(1e8)")
df.printSchema()
root
|-- col1: double (nullable = false)
|-- col2: double (nullable = false)
|-- col3: array (nullable = false)
| |-- element: double (containsNull = false)
from pyspark.sql.functions import pandas_udf
from pyspark.sql.functions import col, pandas_udf, PandasUDFType
from pyspark.sql.types import *
@pandas_udf(ArrayType(DoubleType()), PandasUDFType.SCALAR)
def test_pandas(col1):
return col1*col1
# dry run
df.withColumn('test', test_pandas(df.col3)).selectExpr("max(test)").show()
import time
start = time.time()
df.withColumn('test', test_pandas(df.col3)).write.format("noop").mode("overwrite").save()
end = time.time()
print(end - start)
```
---
- Change Garbage Collector algorithm
- For a discussion on tests with different GC algorithms for spark see the post [Tuning Java Garbage Collection for Apache Spark Applications](https://databricks.com/blog/2015/05/28/tuning-java-garbage-collection-for-spark-applications.html)
- Example of how to use G1 GC: `--conf spark.driver.extraJavaOptions="-XX:+UseG1GC" --conf spark.executor.extraJavaOptions="-XX:+UseG1GC"`
---
- Set log level in spark-shell and PySpark
If you have a SparkContext, use `sc.setLogLevel(newLevel)`
Otherwise edit or create the file log4j.properties in $SPARK_CONF_DIR (default SPARK_HOME/conf)
/bin/vi conf/log4j.properties
Example for the logging level of PySpark REPL
```
log4j.logger.org.apache.spark.api.python.PythonGatewayServer=INFO
#log4j.logger.org.apache.spark.api.python.PythonGatewayServer=DEBUG
```
Example for the logging level of the Scala REPL:
`log4j.logger.org.apache.spark.repl.Main=INFO`
---
- Caching dataframes using off-heap memory
```
bin/spark-shell --master local[*] --driver-memory 64g --conf spark.memory.offHeap.enabled=true --conf spark.memory.offHeap.size=64g --jars ../spark-measure_2.11-0.11-SNAPSHOT.jar
val df = sql("select * from range(1000) cross join range(10000)")
df.persist(org.apache.spark.storage.StorageLevel.OFF_HEAP)
```
---
- Other options for caching dataframes
```
df.persist(org.apache.spark.storage.StorageLevel.
DISK_ONLY MEMORY_AND_DISK MEMORY_AND_DISK_SER MEMORY_ONLY MEMORY_ONLY_SER
DISK_ONLY_2 MEMORY_AND_DISK_2 MEMORY_AND_DISK_SER_2 MEMORY_ONLY_2 MEMORY_ONLY_SER_2 OFF_HEAP)
```
---
- Spark-root, read high energy physics data in ROOT format into Spark dataframes
```
bin/spark-shell --packages org.diana-hep:spark-root_2.11:0.1.16
val df = spark.read.format("org.dianahep.sparkroot").load("<path>/myrootfile.root")
val df = spark.read.format("org.dianahep.sparkroot.experimental").load("<path>/myrootfile.root")
```
---
- How to deploy Spark shell or a notebook behind a firewall
- This is relevant when using spark-shell or pyspark or a Jupyter Notebook,
running the Spark driver on a client machine with a local firewall and
accessing Spark executors remotely on a cluster
- The driver listens on 2 TCP ports that need to be accessed by the executors on the cluster.
This is how you can specify the port numbers (35000 and 35001 are picked just as an example):
```
--conf spark.driver.port=35000
--conf spark.driver.blockManager.port=35001
```
- You can set up the firewall rule on the driver to to allow connections from cluster node.
This is a simplified example of rule when using iptables:
```
-A INPUT -m state --state NEW -m tcp -p tcp -s 10.1.0.0/16 --dport 35000 -j ACCEPT
-A INPUT -m state --state NEW -m tcp -p tcp -s 10.1.0.0/16 --dport 35001 -j ACCEPT
```
- In addition clients may want to access the port for the WebUI (4040 by default)
---
- Get username and security details via Apache Hadoop security API
```
scala> org.apache.hadoop.security.UserGroupInformation.getCurrentUser()
res1: org.apache.hadoop.security.UserGroupInformation = [email protected] (auth:KERBEROS)
```
---
- Distribute the Kerberos TGT cache to the executors
```bash
kinit # get a Kerberos TGT if you don't already have one
klist -l # list details of Kerberos credentials file
spark-shell --master yarn --files <path to kerberos credentials file>#krbcache --conf spark.executorEnv.KRB5CCNAME='FILE:./krbcache'
pyspark --master yarn --files path to kerberos credentials file>#krbcache --conf spark.executorEnv.KRB5CCNAME='FILE:./krbcache'
```
---
- Run OS commands from Spark
```scala
// Scala, runs locally on the driver
import sys.process._
"uname -a".! // with one !, returns exit status
"uname -a".!! // with 2 !, returns output as String
```
- This execute OS commands on Spark executors (relevant for cluster deployments).
It is expected to run on each executor and for each "core"/task allocated.
However, the actual result and order are not guaranteed, a more solid approach is needed
```scala
// Scala, runs on the executors/tasks in a cluster
import sys.process._
sc.parallelize(1 to sc.defaultParallelism).map(_ => "uname -a" !).collect()
sc.parallelize(1 to sc.defaultParallelism).map(_ => "uname -a" !!).collect().foreach(println)
```
Alternative method to run OS commands on Spark executors in Scala
```
val a = sc.parallelize(1 to sc.defaultParallelism).map(x => org.apache.hadoop.util.Shell.execCommand("uname","-a")).collect()
val a = sc.parallelize(1 to sc.defaultParallelism).map(x => org.apache.hadoop.util.Shell.execCommand("/usr/bin/bash","-c","echo $PWD")).collect()
```
```
# Python, run on the executors (see comments in the Scala version)
# method 1
import os
sc.parallelize(range(0, sc.defaultParallelism)).map(lambda i: os.system("uname -a")).collect()
# method 2
import subprocess
sc.parallelize(range(0, sc.defaultParallelism)).map(lambda i: subprocess.call(["uname", "-a"])).collect()
sc.parallelize(range(0, sc.defaultParallelism)).map(lambda i: subprocess.check_output(["uname", "-a"])).collect()
```
---
- Parquet tables
```
// Read
spark.read.parquet("fileNameAndPath")
// relevant configuration:
spark.conf.set("spark.sql.files.maxPartitionBytes", ..) // default 128MB, small files are grouped into partitions up to this size
// Write
df.coalesce(N_partitions).write // optionally use coalesce if you want to reduce the number of output partitions (beware that it also affects num of concurrent write tasks)
.partitionBy("colPartition1", "colOptionalSubPart") // partitioning column(s)
.bucketBy(numBuckets, "colBucket") // This feature currently gives error with save, follow SPARK-19256 or use saveAsTable (Hive)
.format("parquet")
.save("filePathandName") // you can use saveAsTable as an alternative
//Options
.option("parquet.block.size", <blockSize>) // defalut 128MB, see also c.hadoopConfiguration.setInt("parquet.block.size", <blocksize>
.option("compression", <compression_codec>) // default snappy, see also spark.sql.parquet.compression.codec
// relevant configuration parameters:
sc.hadoopConfiguration.setInt("parquet.block.size", .. ) // default to 128 MB parquet block size (size of the column groups)
spark.conf.set("spark.sql.parquet.compression.codec","xxx") // xxx= none, gzip, lzo, snappy, {zstd, brotli, lz4}
spark.conf.set("spark.sql.files.maxRecordsPerFile", ...) // defaults to 0, use if you need to limit size of files being written
```
// Example with use of savemode:
df.coalesce(4).write.mode(org.apache.spark.sql.SaveMode.Overwrite).parquet("..PATH..")
---
- Repartition / Compact Parquet tables
Parquet table repartition is an operation that you may want to use in the case you ended up with
multiple small files into each partition folder and want to compact them in a smaller number of larger files.
Example:
```
val df = spark.read.parquet("myPartitionedTableToComapct")
df.repartition('colPartition1,'colOptionalSubPartition)
.write.partitionBy("colPartition1","colOptionalSubPartition")
.format("parquet")
.save("filePathandName")
```
---
- Read from Oracle via JDBC, example from [Spark_Oracle_JDBC_Howto.md](Spark_Oracle_JDBC_Howto.md)
```
val df = spark.read.format("jdbc")
.option("url", "jdbc:oracle:thin:@dbserver:port/service_name")
.option("driver", "oracle.jdbc.driver.OracleDriver")
.option("dbtable", "MYSCHEMA.MYTABLE")
.option("user", "MYORAUSER")
.option("password", "XXX")
.option("fetchsize",10000).load()
// test
df.printSchema
df.show(5)
// write data as compressed Parquet files
df.write.parquet("MYHDFS_TARGET_DIR/MYTABLENAME")
```
---
- Configuration to switch back to use datasource V1 (as opposed to use datasource V2).
- See also bug SPARK-29304 Input Bytes Metric for Datasource v2 is absent
- Example for parquet:
`bin/spark-shell --master local[*] --conf spark.sql.sources.useV1SourceList="parquet"`
---
- Enable short-circuit reads for Spark on a Hadoop cluster
- Spark executors need to have libhadoop.so in the library path
- Short-circuit is a good feature to enable for Spark running on a Hadoop clusters as it improves performance of I/O
that is local to the Spark executors.
- Note: the warning message "WARN shortcircuit.DomainSocketFactory: The short-circuit local reads feature cannot be used because libhadoop cannot be loaded"
is generated after checking on the driver machine. This can be misleading if the driver is not part of the Hadoop cluster, as what is important is that short-circuit is enabled on the executors!
- if the library path of the executors as set up on the system defaults does not yet allow to find libhadoop.so, this can be used:
`--conf spark.executor.extraLibraryPath=/usr/lib/hadoop/lib/native --conf spark.driver.extraLibraryPath=/usr/lib/hadoop/lib/native`
---
- Spark-shell power mode and change config to avoid truncating print for long strings
- Enter power mode set max print string to 1000:
- BTW, see more spark shell commands: `:help`
```
spark-shell
scala> :power
Power mode enabled. :phase is at typer.
import scala.tools.nsc._, intp.global._, definitions._
Try :help or completions for vals._ and power._
vals.isettings.maxPrintString=1000
```
---
- Examples of DataFrame creation for testing
```
// SQL
sql("select * from values (1, 'aa'), (2,'bb'), (3,'cc') as (id,desc)").show
+---+----+
| id|desc|
+---+----+
| 1| aa|
| 2| bb|
| 3| cc|
+---+----+
sql("select * from values (1, 'aa'), (2,'bb'), (3,'cc') as (id,desc)").createOrReplaceTempView("t1")
spark.table("t1").printSchema
root
|-- id: integer (nullable = false)
|-- desc: string (nullable = false)
spark.sql("create or replace temporary view outer_v1 as select * from values (1, 'aa'), (2,'bb'), (3,'cc') as (id,desc)")
sql("select id, floor(200*rand()) bucket, floor(1000*rand()) val1, floor(10*rand()) val2 from range(10)").show(3)
+---+------+----+----+
| id|bucket|val1|val2|
+---+------+----+----+
| 0| 1| 223| 5|
| 1| 26| 482| 5|
| 2| 42| 384| 7|
+---+------+----+----+
only showing top 3 rows
# Python
df = spark.createDataFrame([(1, "event1"), (2,"event2"), (3, "event3")], ("id","name"))
df = spark.createDataFrame([[1, "a string", (1,2,3), ("aa","bb","cc")]],"long_col long, string_col string, array_col array<long>, struct_col struct<col1:string,col2:string,col3:string>")
df.printSchema()
root
|-- long_col: long (nullable = true)
|-- string_col: string (nullable = true)
|-- array_col: array (nullable = true)
| |-- element: long (containsNull = true)
|-- struct_col: struct (nullable = true)
| |-- col1: string (nullable = true)
| |-- col2: string (nullable = true)
| |-- col3: string (nullable = true)
// Scala
scala> val df=Seq((1, "aaa", Map(1->"a") ,Array(1,2,3), Vector(1.1,2.1,3.1)), (2, "bbb", Map(2->"b") ,Array(4,5,6), Vector(4.1,5.1,6.1))).toDF("id","name","map","array","vector")
df: org.apache.spark.sql.DataFrame = [id: int, name: string ... 3 more fields]
df.printSchema
root
|-- id: integer (nullable = false)
|-- name: string (nullable = true)
|-- map: map (nullable = true)
| |-- key: integer
| |-- value: string (valueContainsNull = true)
|-- array: array (nullable = true)
| |-- element: integer (containsNull = false)
|-- vector: array (nullable = true)
| |-- element: double (containsNull = false)
df.show
+---+----+-----------+---------+---------------+
| id|name| map| array| vector|
+---+----+-----------+---------+---------------+
| 1| aaa|Map(1 -> a)|[1, 2, 3]|[1.1, 2.1, 3.1]|
| 2| bbb|Map(2 -> b)|[4, 5, 6]|[4.1, 5.1, 6.1]|
+---+----+-----------+---------+---------------+
// using case class
scala> case class myclass(id: Integer, name: String, myArray: Array[Double])
scala> val df=Seq(myclass(1, "aaaa", Array(1.1,2.1,3.1)),myclass(2, "bbbb", Array(4.1,5.1,6.1))).toDF
scala> df..show
+---+----+---------------+
| id|name| myArray|
+---+----+---------------+
| 1|aaaa|[1.1, 2.1, 3.1]|
| 2|bbbb|[4.1, 5.1, 6.1]|
+---+----+---------------+
// case class with struct
case class myclass2(id: Integer, name: String)
case class myclass(id: Integer, name: String, myArray: Array[Double], mynested: myclass2)
val df=Seq(myclass(1, "aaaa", Array(1.1,2.1,3.1), myclass2(11, "zzzz")),myclass(2, "bbbb", Array(4.1,5.1,6.1),myclass2(22,"www"))).toDF
df.printSchema
root
|-- id: integer (nullable = true)
|-- name: string (nullable = true)
|-- myArray: array (nullable = true)
| |-- element: double (containsNull = false)
|-- mynested: struct (nullable = true)
| |-- id: integer (nullable = true)
| |-- name: string (nullable = true)
df.show
+---+----+---------------+----------+
| id|name| myArray| mynested|
+---+----+---------------+----------+
| 1|aaaa|[1.1, 2.1, 3.1]|[11, zzzz]|
| 2|bbbb|[4.1, 5.1, 6.1]| [22, www]|
+---+----+---------------+----------+
// Dataset API
scala> df.as[myclass]
res75: org.apache.spark.sql.Dataset[myclass] = [id: int, name: string ... 1 more field]
scala> df.as[myclass].map(v => v.id + 1).reduce(_ + _)
res76: Int = 5
// Manipulating rows, columns and arrays
// collect_list agregates columns into rows
sql("select collect_list(col1) from values 1,2,3").show
+------------------+
|collect_list(col1)|
+------------------+
| [1, 2, 3]|
+------------------+
// explode transforms aggregates into columns
sql("select explode(Array(1,2,3))").show
+---+
|col|
+---+
| 1|
| 2|
| 3|
+---+
sql("select col1, explode(Array(1,2,3)) from values Array(1,2,3)").show()
+---------+---+
| col1|col|
+---------+---+
|[1, 2, 3]| 1|
|[1, 2, 3]| 2|
|[1, 2, 3]| 3|
+---------+---+
// collect_list and explode combined, return to orginial values
sql("select collect_list(col1) from values 1,2,3").show
sql("select collect_list(col) from (select explode(Array(1,2,3)))").show
+-----------------+
|collect_list(col)|
+-----------------+
| [1, 2, 3]|
+-----------------+
// How to push a filter on a nested field in a DataFrame
// The general strategy is to unpack the array, apply a filter then repack
// Note, Higher order functions in Spark SQL and other topics relatedon how to improve this
// are discussed at https://databricks.com/blog/2017/05/24/working-with-nested-data-using-higher-order-functions-in-sql-on-databricks.html
// Example:
sql("select col1, collect_list(col) from (select col1, explode(col1) as col from values Array(1,2,3),Array(4,5,6)) where col%2 = 0 group by col1").show()
+---------+-----------------+
| col1|collect_list(col)|
+---------+-----------------+
|[1, 2, 3]| [2]|
|[4, 5, 6]| [4, 6]|
+---------+-----------------+
// Example of usage of laterral view
sql("select * from values 'a','b' lateral view explode(Array(1,2)) tab1").show()
+----+---+
|col1|col|
+----+---+
| a| 1|
| a| 2|
| b| 1|
| b| 2|
+----+---+
```
---
- Load numpy arrays into a Spark Dataframe
- example load MNIST dataset from keras.datasets
```
$ pyspark --master local[*] --driver-memory 4g
import tensorflow as tf
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
# normalize features to 0..1
x_train = x_train / 255.0
# flatten 28x28 arrays with images into one array of 784 elements
x_train = x_train.reshape((x_train.shape[0], -1))
data = [(x_train[i].astype(float).tolist(), int(y_train[i])) for i in range(len(y_train))]
from pyspark.sql.types import *
schema = StructType([StructField("features", ArrayType(FloatType())),
StructField("labels_raw", LongType())])
# Use this instead if not flattening the 28x28 array
#schema = StructType([StructField("features", ArrayType(ArrayType(FloatType()))),
# StructField("labels_raw", LongType())])
# this is slow, only 1 thread used
df = spark.createDataFrame(data, schema)
from pyspark.ml.feature import OneHotEncoderEstimator
encoder = OneHotEncoderEstimator(inputCols=['labels_raw'],outputCols=['labels'],dropLast=False)
model = encoder.fit(df)
df_train = model.transform(df).select("features","labels")
>>> df_train.printSchema()
root
|-- features: array (nullable = true)
| |-- element: float (containsNull = true)
|-- labels: vector (nullable = true)
####
# test dataset
#####
x_test = x_test / 255.0
data = [(x_test[i].astype(float).tolist(), int(y_test[i])) for i in range(len(y_test))]
df = spark.createDataFrame(data, schema)
model = encoder.fit(df)
df_test = model.transform(df).select("features","labels")
```
---
- Additional examples of dealing with nested structures in Spark SQL
```
scala> dsMuons.printSchema
root
|-- muons: array (nullable = true)
| |-- element: struct (containsNull = true)
| | |-- reco::Candidate: struct (nullable = true)
| | |-- qx3_: integer (nullable = true)
| | |-- pt_: float (nullable = true)
| | |-- eta_: float (nullable = true)
| | |-- phi_: float (nullable = true)
| | |-- mass_: float (nullable = true)
| | |-- vertex_: struct (nullable = true)
| | | |-- fCoordinates: struct (nullable = true)
| | | | |-- fX: float (nullable = true)
| | | | |-- fY: float (nullable = true)
| | | | |-- fZ: float (nullable = true)
| | |-- pdgId_: integer (nullable = true)
| | |-- status_: integer (nullable = true)
| | |-- cachePolarFixed_: struct (nullable = true)
| | |-- cacheCartesianFixed_: struct (nullable = true)
// the following 2 are equivalent and transform an array of struct into a table-like format
// explode can be used to deal withArrays
// to deal with structs use "col.*"
dsMuons.createOrReplaceTempView("t1")
sql("select element.* from (select explode(muons) as element from t1)").show(2)
dsMuons.selectExpr("explode(muons) as element").selectExpr("element.*").show(2)
+---------------+----+---------+----------+----------+----------+--------------------+------+-------+----------------+--------------------+
|reco::Candidate|qx3_| pt_| eta_| phi_| mass_| vertex_|pdgId_|status_|cachePolarFixed_|cacheCartesianFixed_|
+---------------+----+---------+----------+----------+----------+--------------------+------+-------+----------------+--------------------+
| []| -3|1.7349417|-1.6098186| 0.6262487|0.10565837|[[0.08413784,0.03...| 13| 0| []| []|
| []| -3| 5.215807|-1.7931011|0.99229723|0.10565837|[[0.090448655,0.0...| 13| 0| []| []|
+---------------+----+---------+----------+----------+----------+--------------------+------+-------+----------------+--------------------+
```
---
- Multi select statements in Spark sql
Example:
```
scala> sql("from range(10) select id where id>5 select id+10 where id<4").show
+---+
| id|
+---+
| 6|
| 7|
| 8|
| 9|
| 10|
| 11|
| 12|
| 13|
+---+
```
---
- Fun with Spark SQL, FizBuzz
```
sql("""
select case
when id % 15 = 0 then 'FizzBuzz'
when id % 3 = 0 then 'Fizz'
when id % 5 = 0 then 'Buzz'
else cast(id as string)
end as FizzBuzz
from range(20)
order by id""").show()
```
---
- Classic join example with parent-child relationship using Departments and Employees tables
```
# Create test tables
# Python
emp = spark.createDataFrame([(1, "Emp1", 10), (2,"Emp2", 10), (3, "Emp3", 20)], ("id","name","dep_id"))
dep = spark.createDataFrame([(10, "Department1"), (20, "Department2"), (30, "Department3")], ("id","name"))
// Scala
val emp = Seq((1, "Emp1", 10), (2,"Emp2", 10), (3, "Emp3", 20)).toDF("id","name","dep_id")
val dep = Seq((10, "Department1"), (20, "Department2"), (30, "Department3")).toDF("id","name")
emp.createOrReplaceTempView("employees")
dep.createOrReplaceTempView("departments")
# Inner join
spark.sql("""
select employees.id, employees.name emp_name, departments.name dep_name
from employees join departments
on employees.dep_id = departments.id
order by employees.id""").show()
# Outer join
spark.sql("""
select departments.id, departments.name dep_name, employees.name emp_name
from departments left outer join employees
on employees.dep_id = departments.id
order by departments.id""").show()
```
---
- Spark SQL aggregate functions, SQL vs. declarative API
- spark-shell:
```
val df=sql("select id, id % 3 id2 from range(10)")
df.groupBy('id2).agg(avg('id)).show
```
- sql:
```
sql("select id, id % 3 id2 from range(10)").createOrReplaceTempView("t1")
sql("select id2, avg(id) from t1 group by id2").show
```
---
Columns count/Frequency histograms with Spark SQL
As I write this, Spark SQL oes not yet implement the width_bucket and/or other histogram-related functions.
An example of how to use Spark SQL to work around this. Spark shell:
```
sql("select id from range(10)").createOrReplaceTempView("t1")
val df=spark.table("t1")
val maxID = df.select(max('id)).collect()(0)(0)
val minID = df.select(min('id)).collect()(0)(0)
val numBuckets = 3
// debug code
//spark.sql(s"select id, least(floor(round((id-$minID)/($maxID-$minID)*$numBuckets,2)),$numBuckets-1) bucketId from t1").show
spark.sql(s"select count(*) id_count, least(floor(round((id-$minID)/($maxID-$minID)*$numBuckets,2)),$numBuckets-1) bucketId from t1 group by bucketId order by bucketId").show
+--------+--------+
|id_count|bucketId|
+--------+--------+
| 3| 0|
| 3| 1|
| 4| 2|
+--------+--------+
```
Note that Spark RDD API has a histogram function [see doc](https://spark.apache.org/docs/latest/api/python/pyspark.html)
It can be used with Spark Dataframes as a workaround as in:
```
sql("select cast(id as double) from t1").rdd.map(x => x(0).asInstanceOf[Double]).histogram(3)
res1: (Array[Double], Array[Long]) = (Array(0.0, 3.0, 6.0, 9.0),Array(3, 3, 4))
```
See [link](http://www.silota.com/docs/recipes/sql-histogram-summary-frequency-distribution.html) for
additional examples on creating histograms with SQL.
---
Spark binaryfile format (Spark 3.0)
Example:
```
scala> val df = spark.read.format("binaryFile").load("README.md")
df: org.apache.spark.sql.DataFrame = [path: string, modificationTime: timestamp ... 2 more fields]
scala> df.count
res2: Long = 1
scala> df.show
+--------------------+-------------------+------+--------------------+
| path| modificationTime|length| content|
+--------------------+-------------------+------+--------------------+
|file:///home/luca...|2019-04-24 21:20:23| 4620|[23 20 41 70 61 6...|
+--------------------+-------------------+------+--------------------+
```
---
- Spark TPCDS benchmark
- Download and build the Spark package from [https://github.com/databricks/spark-sql-perf]
- Download and build tpcds-kit for generating data from [https://github.com/databricks/tpcds-kit]
- Testing
1. Generate schema
2. Run benchmark
3. Extract results
See instructions at the [spark-sql-perf](https://github.com/databricks/spark-sql-perf) git repo
for additional info on how to generate data and tun the package. Here some pointers/examples:
```
///// 1. Generate schema
bin/spark-shell --master yarn --num-executors 25 --driver-memory 12g --executor-memory 12g --executor-cores 4 --jars /home/luca/spark-sql-perf-new/target/scala-2.11/spark-sql-perf_2.11-0.5.1-SNAPSHOT.jar
NOTES:
- Each executor will spawn dsdgen to create data, using the parameters for size (e.g. 10000) and number of partitions (e.g. 1000)
- Example: bash -c cd /home/luca/tpcds-kit/tools && ./dsdgen -table catalog_sales -filter Y -scale 10000 -RNGSEED 100 -parallel 1000 -child 107
- Each "core" in the executor spawns one dsdgen
- This workloads is memory hungry, to avoid excessive GC activity, allocate abundant memory per executor core
val tables = new com.databricks.spark.sql.perf.tpcds.TPCDSTables(spark.sqlContext, "/home/luca/tpcds-kit/tools", "10000")
tables.genData("/user/luca/TPCDS/tpcds_10000", "parquet", true, true, true, false, "", 100)
///// 2. Run Benchmark
export SPARK_CONF_DIR=/usr/hdp/spark/conf
export HADOOP_CONF_DIR=/etc/hadoop/conf
export LD_LIBRARY_PATH=/usr/hdp/hadoop/lib/native/
cd spark-2.4.3-bin-hadoop2.7
bin/spark-shell --master yarn --num-executors 32 --executor-cores 8 --driver-memory 8g --executor-memory 16g --jars /home/luca/spark-sql-perf-new/target/scala-2.11/spark-sql-perf_2.11-0.5.1-SNAPSHOT.jar --conf spark.sql.shuffle.partitions=512 --conf spark.sql.crossJoin.enabled=true --conf spark.eventLog.enabled=false --conf spark.sql.autoBroadcastJoinThreshold=100000000
// when using a large number of cores consider bumping up conf spark.sql.shuffle.partitions (defaiut is 200)
// if running on k8s client mode, add: --conf spark.task.maxDirectResultSize=100000000000 to work around SPARK-26087
sql("SET spark.sql.perf.results=/user/luca/TPCDS/perftest_results")
import com.databricks.spark.sql.perf.tpcds.TPCDSTables
val tables = new TPCDSTables(spark.sqlContext, "/home/luca/tpcds-kit/tools","10000")
///// 3. Setup tables and run benchmask
tables.createTemporaryTables("/user/luca/TPCDS/tpcds_10000", "parquet")
val tpcds = new com.databricks.spark.sql.perf.tpcds.TPCDS(spark.sqlContext)
// Run benchmark
val experiment = tpcds.runExperiment(tpcds.tpcds2_4Queries)
// optionally: experiment.waitForFinish(timeout)
--------------------
// Example of how to put exclude list (or similarly use for include lists) to limit number of querries:
//val benchmarkQueries = for (q <- tpcds.tpcds1_4Queries if !q.name.matches("q14a-v1.4|q14b-v1.4|q72-v1.4")) yield(q)
//val experiment = tpcds.runExperiment(benchmarkQueries)
///// 4. Extract results
// simply print execution time results
df.selectExpr("name", "round(executionTime/1000,3) as exec_time_sec").show(1000)
// or use this:
experiment.currentResults.toDF.createOrReplaceTempView("currentResults")
spark.sql("select name, min(executiontime) as MIN_Exec, max(executiontime) as MAX_Exec, avg(executiontime) as AVG_Exec_Time_ms from currentResults group by name order by name").show(200)
spark.sql("select name, min(executiontime) as MIN_Exec, max(executiontime) as MAX_Exec, avg(executiontime) as AVG_Exec_Time_ms from currentResults group by name order by name").repartition(1).write.csv("TPCDS/test_results_<optionally_add_date_suffix>.csv")
///// Use CBO, modify step 3 as follows
// one-off: setup tables using catalog (do not use temporary tables as in example above
tables.createExternalTables("/user/luca/TPCDS/tpcds_1500", "parquet", "tpcds1500", overwrite = true, discoverPartitions = true)
// compute statistics
tables.analyzeTables("tpcds1500", analyzeColumns = true)
tables.createExternalTables("/user/luca/TPCDS/tpcds_1500", "parquet", "tpcds10000", overwrite = true, discoverPartitions = true)
tables.analyzeTables("tpcds10000", analyzeColumns = true)
spark.conf.set("spark.sql.cbo.enabled",true)
// --conf spark.sql.cbo.enabled=true
sql("use tpcds10000")
sql("show tables").show
spark.conf.set("spark.sql.cbo.enabled",true)
// --conf spark.sql.cbo.enabled=true
////// Eperiment with caching large tables prior to running the benchmak (you need to have enough memory allocated to the executors)
//Tables/views to cache
//val list_tables=List("catalog_returns","inventory","store_sales","store_returns","web_sales","web_returns","call_center","catalog_page","customer","customer_address","customer_demographics","date_dim","household_demographics","income_band","item","promotion","reason","ship_mode","store","time_dim","warehouse","web_page","web_site")
val list_tables=List("catalog_returns","inventory","store_sales","store_returns","web_sales","web_returns","customer")
for (t <- list_tables) spark.table(t).persist(org.apache.spark.storage.StorageLevel.MEMORY_ONLY).count()
```
---
- Generate simple benchmark load, CPU-bound with Spark
- Note: scale up the tests by using larger test tables, that is extending the (xx) value in "range(xx)"
```
bin/spark-shell --master local[*]
// 1. Test Query 1
spark.time(sql("select count(*) from range(10000) cross join range(1000) cross join range(100)").show)
// 2. Test Query 2
// this other example exercices more code path in Spark execution
sql("select id, floor(200*rand()) bucket, floor(1000*rand()) val1, floor(10*rand()) val2 from range(1000000)").cache().createOrReplaceTempView("t1")
sql("select count(*) from t1").show()
spark.time(sql("select a.bucket, sum(a.val2) tot from t1 a, t1 b where a.bucket=b.bucket and a.val1+b.val1<1000 group by a.bucket order by a.bucket").show())
```
---
- Generate a simple I/O intensive benchmark load with Spark
- Setup or copy a large test table, using TPCDS schema
- query a large fact table, for example store_sales with a filter condition that forces a full scan
- use the noop data source to write data "to dev/null" as in: `df.write.format("noop").mode("overwrite").save`
- Previously (Spark 2x) I have used this workaround instead:
- Use a filter condition that returns 0 (or very few rows) and use "select *" (all columns)
- Check the execution plan: you want to confirm that Spark is not using partition pruning nor is managing to push down filters successfully
- In the following example this is achieved adding a filter condition with a decimal value that has higher precision than the table values
- Use Spark dashboard and/or sparkMeasure and/or OS tools to make sure the query runs as intended, i.e. performing a full table scan.
- Example query:
```
val df=spark.read.parquet("/TPCDS/tpcds_1500/store_sales")
df.write.format("noop").mode("overwrite").save
// workaround used for Spark 2.x -> df.where("ss_sales_price=37.8688").collect
# SQL version
df.createOrReplaceTempView("store_sales")
spark.sql("select * from store_sales").write.format("noop").mode("overwrite").save
// workaround used for Spark 2.x -> spark.sql("select * from store_sales where ss_sales_price=37.8688").collect
```
---
- Monitor Spark workloads with Dropwizard metrics for Spark, Influxdb Grafana
- Three main steps: (A) configure [Dropwizard (codahale) Metrics library](https://metrics.dropwizard.io) for Spark
(B) sink the metrics to influxdb
(C) Setup Grafana dashboards to read the metrics from InfluxDB
- See [Spark Performance Dashboard](../Spark_Dashboard)
---
- Spark has 2 configurable metrics sources in the driver introduced by [SPARK-26285](https://issues.apache.org/jira/browse/SPARK-26285)
- The namespace is AccumulatorSource
- The metrics are: DoubleAccumulatorSource, LongAccumulatorSource
- They allow to export accumulator variables (LongAccumulator and DoubleAccumulator)
. These metrics can be used in the grafana dashboard or with other sinks
- Example:
```
import org.apache.spark.util.{AccumulatorV2, DoubleAccumulator, LongAccumulator}
import org.apache.spark.metrics.source.{DoubleAccumulatorSource, LongAccumulatorSource}
val acc1 = new LongAccumulator()
LongAccumulatorSource.register(spark.sparkContext, Map("my-accumulator-1" -> acc1))
scala> acc1.value
res5: Long = 0
scala> acc1.add(1L)
scala> acc1.value
This will appear in the sink, for example as a record:
my-accumulator-1,applicationid=application_1549330477085_0257,namespace=AccumulatorSource,process=driver,username=luca
```
---
- How to access AWS s3 Filesystem with Spark
- Deploy the jars for hadoop-aws with the implementation of S3A as an Hadoop filesystem.
- The following lists multiple (redundant) ways to set the Hadoop client configuration
for s3a in the Spark driver JVM.Spark executors will take care of setting the Hadoop client
configuration in the classpath of executors's JVM (see org.apache.spark.deploy.SparkHadoopUtil.scala).
```
export AWS_SECRET_ACCESS_KEY="XXXX..." # either set this or use spark conf as listed below: multiple ways to config
export AWS_ACCESS_KEY_ID="YYYY..."
bin/spark-shell \
--conf spark.hadoop.fs.s3a.endpoint="https://s3.cern.ch" \
--conf spark.hadoop.fs.s3a.impl="org.apache.hadoop.fs.s3a.S3AFileSystem" \
--conf spark.hadoop.fs.s3a.secret.key="XXX..." \
--conf spark.hadoop.fs.s3a.access.key="YYY..." \
--packages org.apache.hadoop:hadoop-aws:2.7.7 # edit hadoop-aws version to match Spark's Hadoop
# example of how to use
val df=spark.read.parquet("s3a://datasets/tpcds-1g/web_sales")
df.count
```
- Note, I have tested this on Spark compiled for Hadoop 3.2 and with Hadoop 2.7.
I have noticed that Hadoop 3.2/hadoop-aws 3.2 reading from s3.cern.ch gets stuck when listing
directories with a large number of files (as in the TPCDS benchmark). The workaround is:
```
--packages org.apache.hadoop:hadoop-aws:3.2.0
--conf spark.hadoop.fs.s3a.list.version=1
```
- hadoop-aws package will also cause the pull of dependencies from com.amazonaws:aws-java-sdk:version
- note: use `s3cmd la` to list available s3 buckets
- More configuration options (alternatives to the recipe above):
- Set config in driver's Hadoop client
```
sc.hadoopConfiguration.set("fs.s3a.endpoint", "https://s3.cern.ch")
sc.hadoopConfiguration.set("fs.s3a.impl", "org.apache.hadoop.fs.s3a.S3AFileSystem")
sc.hadoopConfiguration.set("fs.s3a.secret.key", "XXXXXXXX..")
sc.hadoopConfiguration.set("fs.s3a.access.key", "YYYYYYY...")
// note for Python/PySpark use sc._jsc.hadoopConfiguration().set(...)
```
- Set config in Hadoop client core-site.xml
```
<property>
<name>fs.s3a.secret.key</name>
<value>XXXX</value>
</property>
<property>
<name>fs.s3a.access.key</name>
<value>YYYY</value>
</property>
<property>
<name>fs.s3a.endpoint</name>
<value>https://s3.cern.ch</value>
</property>
<property>
<name>fs.s3a.impl</name>
<value>org.apache.hadoop.fs.s3a.S3AFileSystem</value>
</property>
```
---
- How to use/choose pyspark version to import from python
- simple way to make import pyspark work in python (`pip install pyspark`)
- more sofisticated: you want to choose the Spark version and/or (re)use an existing Spark home:
```
pip install findspark
python
import findspark
findspark.init('/home/luca/Spark/spark-2.4.0-bin-hadoop2.7') #set path to SPARK_HOME
```
- note: when using bin/pyspark, this is not relevant,
as pyspark from the current SPARK_HOME will be used in this case
---
- How to add a description to a Spark job:
- spark.sparkContext.setJobDescription("job description")
- Note: in Spark 3.0, when using Spark SQL/Dataframes: "job description" will be displayed in SQL tab
- See also: spark.sparkContext.setJobGroup(groupId: String,description: String,interruptOnCancel: Boolean)
---
Salting SQL joins to work around problems with data skew on large tables, example:
Add a salt column to the tables to be joined:
```
val df1b = df1.selectExpr("id1", "key1", "name1", "int(rand()*10) as salt1")
val df2b = df2.selectExpr("id2", "key2", "name2", "int(rand()*10) as salt2")
```
Transform the query
```
// original join
df1.join(df2, 'key1==='key2)
// join using the salt column
df1b.join(df2b, 'key1==='key2 and 'salt1==='salt2)
```
---
Spark SQL hints for join are:
"broadcast" -> Spark 2.x,
"merge", "shuffle_hash", "shuffle_replicate_nl" -> Spark 3.0
Examples, note for sql use /*+ hint_name(t1, t2)*/:
```
val df1 = spark.sql("select id as id1, id % 2 as key1, 'aaaaaaaa' name1 from range(100)")
val df2 = spark.sql("select id+5 id2, id % 2 as key2, 'bbbbbbbbb' name2 from range(100)")
df1.join(df2, 'key1==='key2).explain(true)
df1.hint("broadcast").join(df2, 'key1==='key2).explain(true)
df1.hint("merge").join(df2, 'key1==='key2).explain(true)
df1.hint("shuffle_hash").join(df2, 'key1==='key2).explain(true)
df1.hint("shiffle_replicate_nl").join(df2, 'key1==='key2).explain(true)
```
---
Use of regular expression in Spark SQL, example and gotcha:
When using SQL or selectExpr, you need to double the backslashes (maybe a bug?)
```
val df=sql("select id, 'aadd ffggg sss wwwaaa' name from range(10)")
df.selectExpr("regexp_extract(name, '(\\\\w+)', 0)").show(2)
```
All OK with the direct use of the function:
```
df.select(regexp_extract(col("name"), "(\\w+)", 0)).show(2)
```
| {
"pile_set_name": "Github"
} |
/*
Copyright (C) 2011 by MarkLogic Corporation
Author: Mike Brevoort <[email protected]>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
.cm-s-xq-dark.CodeMirror { background: #0a001f; color: #f8f8f8; }
.cm-s-xq-dark div.CodeMirror-selected { background: #27007A; }
.cm-s-xq-dark .CodeMirror-line::selection, .cm-s-xq-dark .CodeMirror-line > span::selection, .cm-s-xq-dark .CodeMirror-line > span > span::selection { background: rgba(39, 0, 122, 0.99); }
.cm-s-xq-dark .CodeMirror-line::-moz-selection, .cm-s-xq-dark .CodeMirror-line > span::-moz-selection, .cm-s-xq-dark .CodeMirror-line > span > span::-moz-selection { background: rgba(39, 0, 122, 0.99); }
.cm-s-xq-dark .CodeMirror-gutters { background: #0a001f; border-right: 1px solid #aaa; }
.cm-s-xq-dark .CodeMirror-guttermarker { color: #FFBD40; }
.cm-s-xq-dark .CodeMirror-guttermarker-subtle { color: #f8f8f8; }
.cm-s-xq-dark .CodeMirror-linenumber { color: #f8f8f8; }
.cm-s-xq-dark .CodeMirror-cursor { border-left: 1px solid white; }
.cm-s-xq-dark span.cm-keyword { color: #FFBD40; }
.cm-s-xq-dark span.cm-atom { color: #6C8CD5; }
.cm-s-xq-dark span.cm-number { color: #164; }
.cm-s-xq-dark span.cm-def { color: #FFF; text-decoration:underline; }
.cm-s-xq-dark span.cm-variable { color: #FFF; }
.cm-s-xq-dark span.cm-variable-2 { color: #EEE; }
.cm-s-xq-dark span.cm-variable-3, .cm-s-xq-dark span.cm-type { color: #DDD; }
.cm-s-xq-dark span.cm-property {}
.cm-s-xq-dark span.cm-operator {}
.cm-s-xq-dark span.cm-comment { color: gray; }
.cm-s-xq-dark span.cm-string { color: #9FEE00; }
.cm-s-xq-dark span.cm-meta { color: yellow; }
.cm-s-xq-dark span.cm-qualifier { color: #FFF700; }
.cm-s-xq-dark span.cm-builtin { color: #30a; }
.cm-s-xq-dark span.cm-bracket { color: #cc7; }
.cm-s-xq-dark span.cm-tag { color: #FFBD40; }
.cm-s-xq-dark span.cm-attribute { color: #FFF700; }
.cm-s-xq-dark span.cm-error { color: #f00; }
.cm-s-xq-dark .CodeMirror-activeline-background { background: #27282E; }
.cm-s-xq-dark .CodeMirror-matchingbracket { outline:1px solid grey; color:white !important; }
| {
"pile_set_name": "Github"
} |
--- runfiles/texmf-dist/scripts/latex2man/latex2man.orig 2017-08-06 21:56:45.722169852 -0400
+++ runfiles/texmf-dist/scripts/latex2man/latex2man 2017-08-06 22:02:41.815668681 -0400
@@ -1324,7 +1324,7 @@
last SWITCH;
};
# LaTeX macros with two arguments
- /\\([a-zA-Z]+){([^}]*)}{([^}]*)}/
+ /\\([a-zA-Z]+)\{([^}]*)}\{([^}]*)}/
&& do {$s=$`;$m=$1;$a1=$2;$a2=$3;$r=$'; #'
check_Macro2 $m;
interpret_word $s;
@@ -1338,7 +1338,7 @@
last SWITCH;
};
# Special Handling of Email and URL LaTeX macros with one argument
- /\\(URL|Email){([^}]*)}/ && ($opt_H)
+ /\\(URL|Email)\{([^}]*)}/ && ($opt_H)
&& do {$s=$`;$m=$1;$a1=$2;$r=$'; #'
interpret_word $s;
PrintM $Macro2a->{$m};
@@ -1351,7 +1351,7 @@
last SWITCH;
};
# LaTeX macros with one argument
- /\\([a-zA-Z]+){([^}]*)}/ && do {$s=$`;$m=$1;$a1=$2;$r=$'; #'
+ /\\([a-zA-Z]+)\{([^}]*)}/ && do {$s=$`;$m=$1;$a1=$2;$r=$'; #'
check_Macro1 $m;
interpret_word $s;
PrintM $Macro1a->{$m};
@@ -1518,7 +1518,7 @@
$join = $cnt % 2 != 0;
$kind = 3;
} else {
- my @x = $_ =~ /[^\\]{/g;
+ my @x = $_ =~ /[^\\]\{/g;
my @y = $_ =~ /[^\\]}/g;
$join = $#x != $#y;
$kind = 2;
@@ -1679,7 +1679,7 @@
my $line = $_; chop $line;
print "--- \`$line'\n";
}
- if (/^\s*\\input{([^}]*)}\s*/) {
+ if (/^\s*\\input\{([^}]*)}\s*/) {
# handle \input{fn}
my $fn = $1;
printf DEST "%%%%%%%%%%%%%%%%%% start of \\input{%s}\n", $fn;
@@ -1731,7 +1731,7 @@
my $line = $_; chop $line;
print "--- \`$line'\n";
}
- if (/^\s*\\input{([^}]*)}\s*/) {
+ if (/^\s*\\input\{([^}]*)}\s*/) {
# handle \input{fn}
my $fn = $1;
if ($opt_M) {
@@ -1760,7 +1760,7 @@
next if ($skip[-1] == 1);
if ($inside_verb) {
- if (/^\s*\\end{verbatim}/) {
+ if (/^\s*\\end\{verbatim}/) {
if ($started == 1) {
&{$Prefix . "VerbatimEnd"};
$inside_verb = 0;
@@ -1789,19 +1789,19 @@
$rcs_date,$rcs_time,$rcs_owner,$rcs_status,$rcs_locker) = split(/\s/,$1);
$date = date2str ($rcs_date);
$Macro->{'today'} = $date;
- } elsif (/^\s*\\setDate{\\rcsInfoLongDate}/) {
+ } elsif (/^\s*\\setDate\{\\rcsInfoLongDate}/) {
$Macro->{'Date'} = $date;
- } elsif (/^\s*\\setDate{\\today}/) {
+ } elsif (/^\s*\\setDate\{\\today}/) {
$Macro->{'Date'} = $date;
- } elsif (/^\s*\\setDate{([^}]*)}/) {
+ } elsif (/^\s*\\setDate\{([^}]*)}/) {
$date = $1;
$date =~ s/~/$Macro->{'~'}/g;
$Macro->{'Date'} = $date;
- } elsif (/^\s*\\setVersion{([^}]*)}/) {
+ } elsif (/^\s*\\setVersion\{([^}]*)}/) {
$version = $1;
$versin =~ s/~/$Macro->{'~'}/g;
$Macro->{'Version'} = $version;
- } elsif (/^\s*\\begin{Name}{([^}]*)}{([^}]*)}{([^}]*)}{([^}]*)}{([^}]*)}/) {
+ } elsif (/^\s*\\begin\{Name}\{([^}]*)}\{([^}]*)}\{([^}]*)}\{([^}]*)}\{([^}]*)}/) {
$section = "Name";
$chapter = $1;
$name = $2;
@@ -1815,9 +1815,9 @@
$section_cnt = 0;
&{$Prefix . "Start"} ($name, $chapter, $author, $tool, $title);
&{$Prefix . "NameStart"} ($name, $chapter, $author, $tool, $title);
- } elsif (/^\s*\\end{Name}/) {
+ } elsif (/^\s*\\end\{Name}/) {
&{$Prefix . "NameEnd"} ($name, $chapter, $author, $tool);
- } elsif (/^\s*\\begin{Table}(\[([^]]*)\])?{([^}]*)}/) {
+ } elsif (/^\s*\\begin\{Table}(\[([^]]*)\])?\{([^}]*)}/) {
# \begin{Table}[width]{columns}
if ($started == 1) {
$columns = $3;
@@ -1826,74 +1826,74 @@
$first_column = 1;
&{$Prefix . "TableStart"} ($columns, $2);
}
- } elsif (/^\s*\\end{Table}/) {
+ } elsif (/^\s*\\end\{Table}/) {
if ($started == 1) {
$inside_table = 0;
$first_column = 0;
&{$Prefix . "TableEnd"} ($columns);
}
- } elsif (/^\s*\\begin{Description}(\[[^]]*\])?/) {
+ } elsif (/^\s*\\begin\{Description}(\[[^]]*\])?/) {
if ($started == 1) {
$list_nest++;
$cur_list[$list_nest] = 'descr';
$item_nr[$list_nest] = 0;
&{$Prefix . "DescriptionStart"};
}
- } elsif (/^\s*\\end{Description}/) {
+ } elsif (/^\s*\\end\{Description}/) {
if ($started == 1) {
&{$Prefix . "DescriptionEnd"};
$list_nest--;
}
- } elsif (/^\s*\\begin{description}/) {
+ } elsif (/^\s*\\begin\{description}/) {
if ($started == 1) {
$list_nest++;
$cur_list[$list_nest] = 'descr';
$item_nr[$list_nest] = 0;
&{$Prefix . "DescriptionStart"};
}
- } elsif (/^\s*\\end{description}/) {
+ } elsif (/^\s*\\end\{description}/) {
if ($started == 1) {
&{$Prefix . "DescriptionEnd"};
$list_nest--;
}
- } elsif (/^\s*\\begin{center}/) {
+ } elsif (/^\s*\\begin\{center}/) {
if ($started == 1) {
&{$Prefix . "CenterStart"};
}
- } elsif (/^\s*\\end{center}/) {
+ } elsif (/^\s*\\end\{center}/) {
if ($started == 1) {
&{$Prefix . "CenterEnd"};
}
- } elsif (/^\s*\\begin{enumerate}/) {
+ } elsif (/^\s*\\begin\{enumerate}/) {
if ($started == 1) {
$list_nest++;
$cur_list[$list_nest] = 'enum';
$item_nr[$list_nest] = 0;
&{$Prefix . "EnumStart"} ;
}
- } elsif (/^\s*\\end{enumerate}/) {
+ } elsif (/^\s*\\end\{enumerate}/) {
if ($started == 1) {
&{$Prefix . "EnumEnd"} ;
$list_nest--;
}
- } elsif (/^\s*\\begin{itemize}/) {
+ } elsif (/^\s*\\begin\{itemize}/) {
if ($started == 1) {
$list_nest++;
$cur_list[$list_nest] = 'item';
$item_nr[$list_nest] = 0;
&{$Prefix . "ItemStart"} ;
}
- } elsif (/^\s*\\end{itemize}/) {
+ } elsif (/^\s*\\end\{itemize}/) {
if ($started == 1) {
&{$Prefix . "ItemEnd"} ;
$list_nest--;
}
- } elsif (/^\s*\\begin{verbatim}/) {
+ } elsif (/^\s*\\begin\{verbatim}/) {
if ($started == 1) {
&{$Prefix . "VerbatimStart"};
$inside_verb = 1;
}
- } elsif (/^\s*\\(subsubsection|subsection|section){([^}]*)}/) {
+ } elsif (/^\s*\\(subsubsection|subsection|section)\{([^}]*)}/) {
$kind = $1;
$section = $2;
$section_cnt ++;
@@ -1904,7 +1904,7 @@
}
} elsif (/^\s*\\LatexManEnd/) {
last;
- } elsif (/^\s*((\\begin{Name|Table|Description})|(\\(sub)?section))/) {
+ } elsif (/^\s*((\\begin\{Name|Table|Description})|(\\(sub)?section))/) {
die "$CMD: in line $.\n " .
"Arguments of $1 are not contained in a single " .
"line.\n " .
| {
"pile_set_name": "Github"
} |
/**********************************************************************
* gosthash.h *
* Copyright (c) 2005-2006 Cryptocom LTD *
* This file is distributed under the same license as OpenSSL *
* *
* Declaration of GOST R 34.11-94 hash functions *
* uses and gost89.h Doesn't need OpenSSL *
**********************************************************************/
#ifndef GOSTHASH_H
# define GOSTHASH_H
# include "gost89.h"
# include <stdlib.h>
# if (defined(_WIN32) || defined(_WIN64)) && !defined(__MINGW32__)
typedef __int64 ghosthash_len;
# elif defined(__arch64__)
typedef long ghosthash_len;
# else
typedef long long ghosthash_len;
# endif
typedef struct gost_hash_ctx {
ghosthash_len len;
gost_ctx *cipher_ctx;
int left;
byte H[32];
byte S[32];
byte remainder[32];
} gost_hash_ctx;
/* Initalizes gost hash ctx, including creation of gost cipher ctx */
int init_gost_hash_ctx(gost_hash_ctx * ctx,
const gost_subst_block * subst_block);
void done_gost_hash_ctx(gost_hash_ctx * ctx);
/*
* Cleans up all fields, except cipher ctx preparing ctx for computing of new
* hash value
*/
int start_hash(gost_hash_ctx * ctx);
/* Hashes block of data */
int hash_block(gost_hash_ctx * ctx, const byte * block, size_t length);
/*
* Finalizes computation of hash and fills buffer (which should be at least
* 32 bytes long) with value of computed hash.
*/
int finish_hash(gost_hash_ctx * ctx, byte * hashval);
#endif
| {
"pile_set_name": "Github"
} |
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>{{ page.title }} » {{site.abbr}} lambda-architecture.net</title>
{% if page.description %}<meta name="description" content="{{ page.description }}">{% endif %}
<meta name="author" content="{{ site.author.name }}">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<meta name="google-site-verification" content="9aiIoTSv6RSDchHvJfuPXfGj8uzHNjuDAxFazFPq4iM">
<!-- Bootstrap core CSS -->
<link href="//cdnjs.cloudflare.com/ajax/libs/twitter-bootstrap/3.0.2/css/bootstrap.min.css" rel="stylesheet">
<link href="//cdnjs.cloudflare.com/ajax/libs/font-awesome/4.0.3/css/font-awesome.min.css" rel="stylesheet">
<link href="{{ ASSET_PATH }}/css/social-buttons-3.css" rel="stylesheet" type="text/css" media="all">
<link href="{{ ASSET_PATH }}/css/table.css" rel="stylesheet" type="text/css" media="all">
<link href="{{ ASSET_PATH }}/css/style.css?body=1" rel="stylesheet" type="text/css" media="all">
<!-- HTML5 shim and Respond.js IE8 support of HTML5 elements and media queries -->
<!--[if lt IE 9]>
<script src="//cdnjs.cloudflare.com/ajax/libs/html5shiv/3.7/html5shiv.min.js"></script>
<script src="//cdnjs.cloudflare.com/ajax/libs/respond.js/1.3.0/respond.min.js"></script>
<![endif]-->
<!-- Le fav and touch icons -->
<link rel="shortcut icon" href="/favicon.ico">
<!-- Update these with your own images
<link rel="apple-touch-icon" href="{{ ASSET_PATH }}/images/apple-touch-icon.png">
<link rel="apple-touch-icon" sizes="72x72" href="{{ ASSET_PATH }}/images/apple-touch-icon-72x72.png">
<link rel="apple-touch-icon" sizes="114x114" href="{{ ASSET_PATH }}/images/apple-touch-icon-114x114.png">
-->
<!-- atom & rss feed -->
<link href="{{ BASE_PATH }}{{ site.JB.atom_path }}" type="application/atom+xml" rel="alternate" title="Sitewide ATOM Feed">
<link href="{{ BASE_PATH }}{{ site.JB.rss_path }}" type="application/rss+xml" rel="alternate" title="Sitewide RSS Feed">
</head>
<body>
<div class="navbar navbar-default" role="navigation">
<div class="container">
<div class="navbar-header">
<button type="button" class="navbar-toggle" data-toggle="collapse" data-target=".navbar-collapse">
<span class="sr-only">Toggle navigation</span>
<span class="icon-bar"></span>
<span class="icon-bar"></span>
<span class="icon-bar"></span>
</button>
<a class="navbar-brand" href="{{ HOME_PATH }}"><h3>{{ site.abbr }} <small>{{ site.title }}</small></h3></a>
</div>
<div class="collapse navbar-collapse">
<ul class="nav navbar-nav navbar-right">
{% assign pages_list = site.navigation %}
{% include JB/pages_list %}
</ul>
</div><!--/.nav-collapse -->
</div>
</div>
<div class="container">
<div class="content">
{{ content }}
</div>
<hr>
<footer>
<p>© {{ site.time | date: '%Y' }} <a href="http://mhausenblas.info/">Michael Hausenblas</a> & <a href="http://nathan.gs">Nathan Bijnens</a>, inspired by <a href="http://manning.com/marz/">Nathan Marz</a>.
</p>
</footer>
</div>
<a href="/contribute"><img style="position: absolute; top: 0; right: 0; border: 0;" src="https://s3.amazonaws.com/github/ribbons/forkme_right_orange_ff7600.png" alt="Fork me on GitHub"></a>
{% include JB/analytics %}
<script src="//ajax.googleapis.com/ajax/libs/jquery/2.0.3/jquery.min.js"></script>
<script src="//cdnjs.cloudflare.com/ajax/libs/twitter-bootstrap/3.0.2/js/bootstrap.min.js"></script>
<script src="{{ ASSET_PATH }}/js/jquery.popupwindow.js"></script>
<script type="text/javascript">
$(function()
{
$(".popup").popupwindow();
});
</script>
</body>
</html>
| {
"pile_set_name": "Github"
} |
\section{inc2}
| {
"pile_set_name": "Github"
} |
const { createValidator } = require('../validator/helper');
const schema = {
type: 'object',
additionalProperties: false,
properties: {
issuer_url: {
type: 'string',
format: 'uri'
},
clients: {
type: 'array',
items: {
type: 'object',
properties: {
// optional, will override the top level one
issuer_url: {
type: 'string',
format: 'uri'
},
client_id: {
type: 'string',
minLength: 1
},
client_secret: {
type: 'string',
minLength: 1
}
}
}
}
},
required: [
'clients'
]
};
module.exports = {
rights: {
padmin: 'rw'
},
validator: createValidator(schema)
};
| {
"pile_set_name": "Github"
} |
---
order: 2
title:
zh-CN: 骨架按钮、头像和输入框。
en-US: Skeleton button and avatar
---
## zh-CN
骨架按钮、头像和输入框。
## en-US
Skeleton button, avatar and input.
```jsx
import { Skeleton, Switch, Form, Radio } from 'choerodon-ui';
class Demo extends React.Component {
state = {
buttonActive: false,
avatarActive: false,
inputActive: false,
buttonSize: 'default',
avatarSize: 'default',
inputSize: 'default',
buttonShape: 'default',
avatarShape: 'circle',
};
handleActiveChange = prop => checked => {
this.setState({ [prop]: checked });
};
handleSizeChange = prop => e => {
this.setState({ [prop]: e.target.value });
};
handleShapeChange = prop => e => {
this.setState({ [prop]: e.target.value });
};
render() {
const {
buttonActive,
avatarActive,
inputActive,
buttonSize,
avatarSize,
inputSize,
buttonShape,
avatarShape,
} = this.state;
return (
<div>
<div>
<Form layout="inline" style={{ marginBottom: 16 }}>
<Form.Item label="ButtonActive">
<Switch checked={buttonActive} onChange={this.handleActiveChange('buttonActive')} />
</Form.Item>
<Form.Item label="ButtonSize">
<Radio.Group value={buttonSize} onChange={this.handleSizeChange('buttonSize')}>
<Radio.Button value="default">Default</Radio.Button>
<Radio.Button value="large">Large</Radio.Button>
<Radio.Button value="small">Small</Radio.Button>
</Radio.Group>
</Form.Item>
<Form.Item label="ButtonShape">
<Radio.Group value={buttonShape} onChange={this.handleShapeChange('buttonShape')}>
<Radio.Button value="default">Default</Radio.Button>
<Radio.Button value="round">Round</Radio.Button>
<Radio.Button value="circle">Circle</Radio.Button>
</Radio.Group>
</Form.Item>
</Form>
<Skeleton.Button active={buttonActive} size={buttonSize} shape={buttonShape} />
</div>
<br />
<div>
<Form layout="inline" style={{ marginBottom: 16 }}>
<Form.Item label="AvatarActive">
<Switch checked={avatarActive} onChange={this.handleActiveChange('avatarActive')} />
</Form.Item>
<Form.Item label="AvatarSize">
<Radio.Group value={avatarSize} onChange={this.handleSizeChange('avatarSize')}>
<Radio.Button value="default">Default</Radio.Button>
<Radio.Button value="large">Large</Radio.Button>
<Radio.Button value="small">Small</Radio.Button>
</Radio.Group>
</Form.Item>
<Form.Item label="AvatarShape">
<Radio.Group value={avatarShape} onChange={this.handleShapeChange('avatarShape')}>
<Radio.Button value="square">Square</Radio.Button>
<Radio.Button value="circle">Circle</Radio.Button>
</Radio.Group>
</Form.Item>
</Form>
<Skeleton.Avatar active={avatarActive} size={avatarSize} shape={avatarShape} />
</div>
<br />
<div>
<Form layout="inline" style={{ marginBottom: 16 }}>
<Form.Item label="InputActive">
<Switch checked={inputActive} onChange={this.handleActiveChange('inputActive')} />
</Form.Item>
<Form.Item label="InputSize">
<Radio.Group value={inputSize} onChange={this.handleSizeChange('inputSize')}>
<Radio.Button value="default">Default</Radio.Button>
<Radio.Button value="large">Large</Radio.Button>
<Radio.Button value="small">Small</Radio.Button>
</Radio.Group>
</Form.Item>
</Form>
<Skeleton.Input style={{ width: '300px' }} active={inputActive} size={inputSize} />
</div>
</div>
);
}
}
ReactDOM.render(<Demo />, mountNode);
```
| {
"pile_set_name": "Github"
} |
FROM ubuntu:18.04
MAINTAINER dreamcat4 <[email protected]>
ENV _clean="rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*"
ENV _apt_clean="eval apt-get clean && $_clean"
# Install s6-overlay
ENV s6_overlay_version="1.22.1.0"
ADD https://github.com/just-containers/s6-overlay/releases/download/v${s6_overlay_version}/s6-overlay-amd64.tar.gz /tmp/
RUN tar hzxf /tmp/s6-overlay-amd64.tar.gz -C / --exclude=usr/bin/execlineb \
&& tar hzxf /tmp/s6-overlay-amd64.tar.gz -C /usr ./bin/execlineb && $_clean
ENV S6_LOGGING="1"
# ENV S6_KILL_GRACETIME="3000"
# Install pipework
ADD https://github.com/jpetazzo/pipework/archive/master.tar.gz /tmp/pipework-master.tar.gz
RUN tar hzxf /tmp/pipework-master.tar.gz -C /tmp && cp /tmp/pipework-master/pipework /sbin/ && $_clean
# Install forked-daapd
RUN apt-get update && apt-get install -y sudo iproute2 net-tools forked-daapd && $_apt_clean
# Setup daapd user
RUN groupadd -o -g 3689 daapd \
&& usermod -o -u 3689 -g daapd --shell /bin/sh -d /config daapd \
&& install -o daapd -g daapd -d /config /music
# Configuration file
ADD forked-daapd.conf /etc/forked-daapd.conf.docker
# Start scripts
ENV S6_LOGGING="0"
ADD services.d /etc/services.d
# Default container settings
VOLUME /config /music
EXPOSE 3689 6600
ENTRYPOINT ["/init"]
| {
"pile_set_name": "Github"
} |
/*
* Copyright (C) 1999-2002, 2016 Free Software Foundation, Inc.
* This file is part of the GNU LIBICONV Library.
*
* The GNU LIBICONV Library is free software; you can redistribute it
* and/or modify it under the terms of the GNU Library General Public
* License as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* The GNU LIBICONV Library is distributed in the hope that it will be
* useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
*
* You should have received a copy of the GNU Library General Public
* License along with the GNU LIBICONV Library; see the file COPYING.LIB.
* If not, see <http://www.gnu.org/licenses/>.
*/
/*
* TDS565
*/
static const unsigned short tds565_2uni[64] = {
/* 0x40 */
0x0040, 0x0041, 0x0042, 0x00c7, 0x0044, 0x0045, 0x00c4, 0x0046,
0x0047, 0x0048, 0x0049, 0x004a, 0x017d, 0x004b, 0x004c, 0x004d,
/* 0x50 */
0x004e, 0x0147, 0x004f, 0x00d6, 0x0050, 0x0052, 0x0053, 0x015e,
0x0054, 0x0055, 0x00dc, 0x0057, 0x0059, 0x00dd, 0x005a, 0x005f,
/* 0x60 */
0x2116, 0x0061, 0x0062, 0x00e7, 0x0064, 0x0065, 0x00e4, 0x0066,
0x0067, 0x0068, 0x0069, 0x006a, 0x017e, 0x006b, 0x006c, 0x006d,
/* 0x70 */
0x006e, 0x0148, 0x006f, 0x00f6, 0x0070, 0x0072, 0x0073, 0x015f,
0x0074, 0x0075, 0x00fc, 0x0077, 0x0079, 0x00fd, 0x007a, 0x007f,
};
static int
tds565_mbtowc (conv_t conv, ucs4_t *pwc, const unsigned char *s, size_t n)
{
unsigned char c = *s;
if (c < 0x40) {
*pwc = (ucs4_t) c;
return 1;
}
else if (c < 0x80) {
*pwc = (ucs4_t) tds565_2uni[c-0x40];
return 1;
}
return RET_ILSEQ;
}
static const unsigned char tds565_page00[64] = {
0x40, 0x41, 0x42, 0x00, 0x44, 0x45, 0x47, 0x48, /* 0x40-0x47 */
0x49, 0x4a, 0x4b, 0x4d, 0x4e, 0x4f, 0x50, 0x52, /* 0x48-0x4f */
0x54, 0x00, 0x55, 0x56, 0x58, 0x59, 0x00, 0x5b, /* 0x50-0x57 */
0x00, 0x5c, 0x5e, 0x00, 0x00, 0x00, 0x00, 0x5f, /* 0x58-0x5f */
0x00, 0x61, 0x62, 0x00, 0x64, 0x65, 0x67, 0x68, /* 0x60-0x67 */
0x69, 0x6a, 0x6b, 0x6d, 0x6e, 0x6f, 0x70, 0x72, /* 0x68-0x6f */
0x74, 0x00, 0x75, 0x76, 0x78, 0x79, 0x00, 0x7b, /* 0x70-0x77 */
0x00, 0x7c, 0x7e, 0x00, 0x00, 0x00, 0x00, 0x7f, /* 0x78-0x7f */
};
static const unsigned char tds565_page00_1[64] = {
0x00, 0x00, 0x00, 0x00, 0x46, 0x00, 0x00, 0x43, /* 0xc0-0xc7 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x53, 0x00, /* 0xd0-0xd7 */
0x00, 0x00, 0x00, 0x00, 0x5a, 0x5d, 0x00, 0x00, /* 0xd8-0xdf */
0x00, 0x00, 0x00, 0x00, 0x66, 0x00, 0x00, 0x63, /* 0xe0-0xe7 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x73, 0x00, /* 0xf0-0xf7 */
0x00, 0x00, 0x00, 0x00, 0x7a, 0x7d, 0x00, 0x00, /* 0xf8-0xff */
};
static const unsigned char tds565_page01[64] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x51, /* 0x40-0x47 */
0x71, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x57, 0x77, /* 0x58-0x5f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x4c, 0x6c, 0x00, /* 0x78-0x7f */
};
static int
tds565_wctomb (conv_t conv, unsigned char *r, ucs4_t wc, size_t n)
{
unsigned char c = 0;
if (wc < 0x0040) {
*r = wc;
return 1;
}
else if (wc >= 0x0040 && wc < 0x0080)
c = tds565_page00[wc-0x0040];
else if (wc >= 0x00c0 && wc < 0x0100)
c = tds565_page00_1[wc-0x00c0];
else if (wc >= 0x0140 && wc < 0x0180)
c = tds565_page01[wc-0x0140];
else if (wc == 0x2116)
c = 0x60;
if (c != 0) {
*r = c;
return 1;
}
return RET_ILUNI;
}
| {
"pile_set_name": "Github"
} |
var arr = [];
var newimage = [];
var gems = [];
var hearts = [];
var key = 0;
var run = false;
var done = [];
var lives = 3;
var collected = 0;
var direction = 0;
var level = 1;
var enemyY = [];
var enemyX = [];
var frame = 0;
var completegame = 0;
function runCanvas() {
(function() {
var requestAnimationFrame = window.requestAnimationFrame ||
window.mozRequestAnimationFrame ||
window.webkitRequestAnimationFrame ||
window.msRequestAnimationFrame;
window.requestAnimationFrame = requestAnimationFrame;
})();
//Initialization
var movex = true;
var canvas = document.getElementById("canvas");
var ctx = canvas.getContext("2d");
var width = 720;
var height = 450;
var player = {
x : done[3]["Level"+level][0]["x"],
y : done[3]["Level"+level][0]["y"],
width : 30,
height : 39,
speed: 3.2,
velX: 0,
velY: 0,
jumping: false,
};
var keys = [];
var friction = 0.8;
var gravity = 0.19;
canvas.width = width;
canvas.height = height;
function update(){
// check keys
if (keys[38] || keys[32]) {
// up arrow or space
if(!player.jumping) {
player.jumping = true;
player.velY = -player.speed*2;
}
}
if (keys[39]) {
// right arrow
if (player.velX < player.speed && movex) {
player.velX++;
direction = 0;
}
}
if (keys[37] && movex) {
// left arrow
if (player.velX > -player.speed) {
player.velX--;
direction = 1;
}
}
player.velX *= friction;
player.velY += gravity;
player.x += player.velX;
player.y += player.velY;
// Stay in the canvas
if (player.x >= width-player.width) {
player.x = width-player.width;
}
else if (player.x <= 0) {
player.x = 0;
}
if(player.y >= height-player.height){
player.y = height - player.height;
player.jumping = false;
}
// Spawn the player
function newPlayer(x,y,w,h) {
if (player.jumping) {
var img = newimage[done[4].indexOf("PlayerUp")];
} else if (direction == 1) {
var img = newimage[done[4].indexOf("PlayerLeft")];
} else {
var img = newimage[done[4].indexOf("Player")];
}
ctx.drawImage(img,x,y,w,h);
}
// Spawn a floating enemy
function spawnFloat(x,y,w,h,id,i) {
var img = newimage[done[4].indexOf(id)];
if (typeof enemyY[i] == 'undefined') {
var obj = {y:y, direction:1};
enemyY.push(obj);
}
ctx.drawImage(img,x,enemyY[i].y,w,h);
if (enemyY[i].y <= y+40 && enemyY[i].direction == 1) {
enemyY[i].y+= 1;
if (enemyY[i].y >= y+40) {
enemyY[i]["direction"] = 0;
enemyY[i].y-= 1;
}
} else if (enemyY[i].y >= y-40 && enemyY[i].direction == 0) {
enemyY[i].y-= 1;
if (enemyY[i].y <= y-40) {
enemyY[i]["direction"] = 1;
enemyY[i].y+= 1;
}
}
if(player.x+player.width>=x && player.x <=x+w && player.y<=enemyY[i].y+h && player.y+player.height>=enemyY[i].y) {
player.y = done[3]["Level"+level][0]["y"];
player.x = done[3]["Level"+level][0]["x"];
player.velY = 0;
player.velX = 0;
lives--;
}
}
// Spawn a flying enemy
function spawnFly(x,y,w,h,id,i) {
if (frame > 10 && frame <= 20) {
var img = newimage[done[4].indexOf("EnemyFly2")];
} else if (frame > 20 && frame <= 30) {
var img = newimage[done[4].indexOf("EnemyFly3")];
} else if (frame > 30 && frame <= 40) {
var img = newimage[done[4].indexOf("EnemyFly2")];
if (frame == 40) {
frame = 0;
}
} else {
var img = newimage[done[4].indexOf("EnemyFly1")];
}
if (typeof enemyX[i] == 'undefined') {
var obj = {x:x, direction:1};
enemyX.push(obj);
}
ctx.drawImage(img,enemyX[i].x,y,w,h);
if (enemyX[i].x <= x+40 && enemyX[i].direction == 1) {
enemyX[i].x+= 1;
if (enemyX[i].x >= x+40) {
enemyX[i]["direction"] = 0;
enemyX[i].x-= 1;
}
} else if (enemyX[i].x >= x-40 && enemyX[i].direction == 0) {
enemyX[i].x-= 1;
if (enemyX[i].x <= x-40) {
enemyX[i]["direction"] = 1;
enemyX[i].x+= 1;
}
}
if(player.x+player.width>=enemyX[i].x && player.x <=enemyX[i].x+w && player.y<=y+h && player.y+player.height>=y) {
player.y = done[3]["Level"+level][0]["y"];
player.x = done[3]["Level"+level][0]["x"];
player.velY = 0;
player.velX = 0;
lives--;
}
}
// Render the background
function background(id) {
var img = newimage[done[4].indexOf(id)];
ctx.drawImage(img,0,0,width,height);
}
// Make a platform
function newPlatform(x,y,w,h,name) {
var img = newimage[done[4].indexOf(name)];
ctx.drawImage(img,x,y,w,h);
if(player.y+player.height >= y && player.y+player.height <= y+(h/2) && player.x < x+w-2 && player.x+player.width > x+2) {
player.y = y-player.height;
player.velY = 0;
player.jumping = false;
}
if(player.y <= y+h && player.y >= y+(h/2) && player.x < x+w-2 && player.x+player.width > x+2) {
player.y = y+h;
player.velY = 0;
}
if (player.y >= y+1 && player.y <= y+h && player.x+player.width<x+2 && player.x+player.width>x-5) {
player.x = player.x-2;
movex = false;
player.velX = 0;
} else if (player.y >= y+1 && player.y <= y+h && player.x<x+w+5 && player.x>x+w-10) {
player.x = player.x+2;
movex = false;
player.velX = 0;
} else {
movex = true;
}
}
// Make a spike
function newSpike(x,y,w,h,id) {
var img = newimage[done[4].indexOf(id)];
ctx.drawImage(img,x,y,w,h);
if(player.y+player.height >= y && player.y+player.height <= y+(h/2) && player.x < x+w-2 && player.x+player.width > x+2) {
player.y = done[3]["Level"+level][0]["y"];
player.x = done[3]["Level"+level][0]["x"];
player.velY = 0;
player.velX = 0;
player.jumping = false;
lives--;
}
if (player.x+player.width>=x && player.x <=x+w && player.y<=y+h && player.y+player.height>=y) {
player.y = done[3]["Level"+level][0]["y"];
player.x = done[3]["Level"+level][0]["x"];
player.velY = 0;
player.velX = 0;
lives--;
}
}
// Make a gem
function newGem(x,y,w,h,i,id) {
var img = newimage[done[4].indexOf(id)];
if (typeof gems[i] == 'undefined') {
gems.push("1");
ctx.drawImage(img,x,y,w,h);
} else {
if (gems[i] == "1") {
ctx.drawImage(img,x,y,w,h);
}
}
if(gems[i] == "1" && player.x+player.width>=x && player.x <=x+w && player.y<=y+h && player.y+player.height>=y) {
gems[i] = "0";
collected++;
}
}
// Spawn the exit
function spawnExit(x,y,w,h,id) {
if (key == 0) {
var img = newimage[done[4].indexOf(id)];
} else if (key == 1) {
var img = newimage[done[4].indexOf("Exit")];
}
ctx.drawImage(img,x,y,w,h);
if (id.includes("Locked")) {
if (key == 1) {
if(player.x+player.width>=x && player.x <=x+w && player.y<=y+h && player.y+player.height>=y) {
level++;
gems = [];
enemyY = [];
enemyX = [];
hearts = [];
frame = 0;
key = 0;
if (level<=Object.keys(done[2]).length) {
player.x = done[3]["Level"+level][0]["x"];
player.y = done[3]["Level"+level][0]["y"];
} else {
level = 1;
completegame = 1;
}
}
}
} else {
if(player.x+player.width>=x && player.x <=x+w && player.y<=y+h && player.y+player.height>=y) {
level++;
key = 0;
gems = [];
enemyY = [];
enemyX = [];
hearts = [];
frame = 0;
if (level<=Object.keys(done[2]).length) {
player.x = done[3]["Level"+level][0]["x"];
player.y = done[3]["Level"+level][0]["y"];
} else {
level = 1;
completegame = 1;
}
}
}
}
// Spawn key
function spawnKey(x,y,w,h,id) {
var img = newimage[done[4].indexOf(id)];
if (key == 0) {
ctx.drawImage(img,x,y,w,h);
}
if(player.x+player.width>=x && player.x <=x+w && player.y<=y+h && player.y+player.height>=y) {
key = 1;
}
}
// Spawn heart
function newHeart(x,y,w,h,id,i) {
var img = newimage[done[4].indexOf(id)];
if (typeof hearts[i] == 'undefined') {
hearts.push("1");
ctx.drawImage(img,x,y,w,h);
} else {
if (hearts[i] == "1") {
ctx.drawImage(img,x,y,w,h);
}
}
if(hearts[i] == "1" && playerx+player.width>=x && player.x <=x+w && player.y<=y+h && player.y+player.height>=y) {
hearts[i] = "0";
if (lives < 3) {
lives++;
}
}
}
// Show GUI
function makeGUI() {
var img = newimage[done[4].indexOf("Heart")];
if (lives >= 1) {
ctx.drawImage(img,width-70,10,60,60);
}
if (lives >= 2) {
ctx.drawImage(img,width-110,10,60,60);
}
if (lives >= 3) {
ctx.drawImage(img,width-150,10,60,60);
}
var img2 = newimage[done[4].indexOf("Container")];
ctx.drawImage(img2, 0,0,179,68);
var img3 = newimage[done[4].indexOf("Gem")];
ctx.drawImage(img3,25,20,24,22);
ctx.font = "30px Ilisarniq Black";
ctx.fillStyle = "#FFF";
ctx.fillText(collected,60,42);
}
// Game over text
function gameOver() {
var img = newimage[done[4].indexOf("Game Over")];
ctx.drawImage(img,0,0,width,height);
}
// Next level
function complete() {
var img = newimage[done[4].indexOf("Complete")];
ctx.drawImage(img,0,0,width,height);
}
ctx.clearRect(0,0,width,height);
if (lives == 0) {
ctx.fillStyle = "#54447B";
ctx.fillRect(0,0,width, height);
gameOver();
} else if (completegame == 1) {
ctx.fillStyle = "#54447B";
ctx.fillRect(0,0,width, height);
complete();
} else {
background(done[2]["Level"+level][(done[2]["Level"+level].findIndex(element => element["name"].includes("Background")))]["name"]);
var o = 0;
var u = 0;
var d = 0;
var w = 0;
frame++;
for (var i = 0; i < done[2]["Level"+level].length; i++) {
if (completegame != 1) {
if (done[2]["Level"+level][i]["name"].includes("Tile")) {
newPlatform(done[2]["Level"+level][i]["x"],done[2]["Level"+level][i]["y"],58,58, done[2]["Level"+level][i]["name"]);
} else if (done[2]["Level"+level][i]["name"].includes("Spikes") || done[2]["Level"+level][i]["name"].includes("Fluid")) {
newSpike(done[2]["Level"+level][i]["x"], done[2]["Level"+level][i]["y"], done[2]["Level"+level][i]["width"], done[2]["Level"+level][i]["height"],done[2]["Level"+level][i]["name"]);
} else if (done[2]["Level"+level][i]["name"].includes("Gem")) {
newGem(done[2]["Level"+level][i]["x"], done[2]["Level"+level][i]["y"], done[2]["Level"+level][i]["width"], done[2]["Level"+level][i]["height"], o,done[2]["Level"+level][i]["name"]);
o++;
} else if (done[2]["Level"+level][i]["name"].includes("Exit") || done[2]["Level"+level][i]["name"].includes("Locked")) {
spawnExit(done[2]["Level"+level][i]["x"], done[2]["Level"+level][i]["y"], done[2]["Level"+level][i]["width"], done[2]["Level"+level][i]["height"],done[2]["Level"+level][i]["name"]);
} else if (done[2]["Level"+level][i]["name"].includes("Key")) {
spawnKey(done[2]["Level"+level][i]["x"], done[2]["Level"+level][i]["y"], done[2]["Level"+level][i]["width"], done[2]["Level"+level][i]["height"],done[2]["Level"+level][i]["name"]);
} else if (done[2]["Level"+level][i]["name"].includes("EnemyFloat")) {
spawnFloat(done[2]["Level"+level][i]["x"], done[2]["Level"+level][i]["y"], done[2]["Level"+level][i]["width"], done[2]["Level"+level][i]["height"],done[2]["Level"+level][i]["name"],u);
u++;
} else if (done[2]["Level"+level][i]["name"].includes("EnemyFly")) {
spawnFly(done[2]["Level"+level][i]["x"], done[2]["Level"+level][i]["y"], done[2]["Level"+level][i]["width"], done[2]["Level"+level][i]["height"],done[2]["Level"+level][i]["name"],d, frame);
d++;
} else if (done[2]["Level"+level][i]["name"].includes("HeartItem")) {
newHeart(done[2]["Level"+level][i]["x"], done[2]["Level"+level][i]["y"], done[2]["Level"+level][i]["width"], done[2]["Level"+level][i]["height"],done[2]["Level"+level][i]["name"],w);
w++;
}
} else {
break;
}
}
if (completegame != 1) {
newPlayer(player.x, player.y, player.width, player.height);
makeGUI();
}
requestAnimationFrame(update);
}
}
document.body.addEventListener("keydown", function(e) {
keys[e.keyCode] = true;
});
document.body.addEventListener("keyup", function(e) {
keys[e.keyCode] = false;
});
update();
}
$(document).ready(function(){
if ($("#code").val() != "0" && $("#state").val() != "0") {
var code = $("#code").val();
var state = $("#state").val();
$.ajax({
url: "request.php",
type:"post",
data:{code:code, state:state},
success: function(result){
console.log(result.toString());
done = JSON.parse(result);
for (var i = 0; i<done[1].length; i++) {
newimage[i] = new Image();
newimage[i].onload = function (e) {
arr.push(e.target);
if (arr.length == done[1].length) {
run = true;
$("#canvas").addClass("running");
runCanvas();
}
}
newimage[i].src = done[0][done[1][i]];
};
}
});
}
});
| {
"pile_set_name": "Github"
} |
_id: 9b21c2a0-68c0-11e7-89b4-79fbd5ed8e2b
message: "@Albus - To start I would follow that repo's instructions. It should be as simple as adding they're JavaScript as instructed. I haven't used that lightbox gallery so don't have experience with it.\r\n\r\nMM ships with [MagnificPopup's lightbox gallery](http://dimsemenov.com/plugins/magnific-popup/) so you might have to rip that out to use lightGallery."
name: Michael Rose
email: 1ce71bc10b86565464b612093d89707e
url: 'https://mademistakes.com'
hidden: ''
date: '2017-07-14T18:16:50.776Z'
| {
"pile_set_name": "Github"
} |
package com.kelvinapps.rxfirebase.exceptions;
import android.support.annotation.NonNull;
import com.google.firebase.database.DatabaseError;
/**
* Created by Nick Moskalenko on 16/05/2016.
*/
public class RxFirebaseDataException extends Exception {
protected DatabaseError error;
public RxFirebaseDataException(@NonNull DatabaseError error) {
this.error = error;
}
public DatabaseError getError() {
return error;
}
@Override
public String toString() {
return "RxFirebaseDataException{" +
"error=" + error +
'}';
}
}
| {
"pile_set_name": "Github"
} |
System.register([], function () {
'use strict';
return {
execute: function () {
}
};
});
| {
"pile_set_name": "Github"
} |
{
"name": "hubot-test-helper",
"description": "Helper for testing hubot script",
"main": "./src/index.js",
"scripts": {
"test": "mocha --compilers coffee:coffee-script/register test",
"autotest": "mocha --compilers coffee:coffee-script/register test -w",
"semantic-release": "semantic-release"
},
"keywords": [
"hubot"
],
"dependencies": {
"hubot": ">=3.0.0 <10 || 0.0.0-development"
},
"devDependencies": {
"chai": "latest",
"co": "latest",
"mocha": "latest",
"coffee-script": "latest",
"semantic-release": "latest"
},
"author": "Fumiaki MATSUSHIMA",
"license": "MIT",
"repository": {
"type": "git",
"url": "https://github.com/mtsmfm/hubot-test-helper"
}
}
| {
"pile_set_name": "Github"
} |
" last night " could have an " aka " tagged on the end of it that says , " it's the end of the world , and we feel fine " .
" last night " is about just that .
the last night of planet earth's existence .
the world is going to end at precisely midnight , january 1st , 2000 .
how scientists were so accurately able to predict when this catastrophic event would occur is never explained .
nor is the event itself .
throughout the entire movie , no explanation is given as to why this is happening .
in doing this , mckellar ( who also wrote and directed the film ) seems to be trying to transcend the genre .
he's attempted to make a film about people , and how each of them deals with their last night of life .
the film contains no science fiction elements , which allows the viewer to concentrate on the feelings and actions of the characters .
mckellar is largely successful in his attempt to rise above his material .
the film is set in toronto , and follows several people in their various escapades on the last night of existence .
there's mckellar , who plays a man who just wants to be alone when the moment occurs , but keeps getting interrupted by a woman ( sandra oh ) in search of her husband .
her husband , played by david cronenberg , is spending the day working at the gas company , informing people that they will attempt to keep providing gas right up until the end .
finally , there's mckellar's friend , played by callum keith rennie , a man who has spent his final months trying out every sexual perversion you can think of .
mckellar has crafted a gang of the some of the most interesting characters i've seen in a film in months .
these people all felt real to me , especially the character played by sandra oh .
we can see her desperation , her utter need to be with her husband in the final hours .
rennie is good , too , as the man who wants to literally try everything before he dies .
he exudes such charm and likeablity , it's not surprising that we're rooting for him to get what he wants .
but for me , the biggest surprise in the movie is mckellar .
i knew that he was a talented director , but i had no idea he could act as well .
his insistence to be alone , much to the dismay of his parents , is something that most people would find difficult to understand , but as played by mckellar , we can understand his reasons .
he doesn't buy into the whole notion that just because everyone's going to die , he should immediately find some companionship .
he realizes the relationship would be forced , and they would only be together for the sake of being together .
mckellar gives a brilliant performance , and i hope he alternates between directing and acting .
my only quibble with the film is that we never find out why the world is ending .
i really wanted to know , and my need to know was hanging over the film at all times .
i appreciate the fact that mckellar didn't want the material to rule the film , and that he wanted the movie to be more of a study in human behavious .
however , in not telling the audience what the cause of earth's destruction is , he's left a big question that is in their minds throughout the film .
another problem is we never see anybody panicking .
i would imagine if the world was ending , and there was nothing to be done about it , a large segment of the population would be going out of their minds .
instead , we are treated to shots of a giant crowd partying like it's new year's eve .
i just didn't buy the fact that everyone had completely accepted their fate .
no one had the urge to " rage against the dying of the light " .
despite these small problems , " last night " is one of the best movies of the year .
as opposed to the moronic " armageddon " , " last night " treats the subject at hand with maturity and believabilty .
and bruce willis never shows up to save the day .
| {
"pile_set_name": "Github"
} |
;; Test interesting integer "expressions". These tests contain code
;; patterns which tempt common value-changing optimizations.
;; Test that x+1<y+1 is not folded to x<y.
(module
(func (export "i32.no_fold_cmp_s_offset") (param $x i32) (param $y i32) (result i32)
(i32.lt_s (i32.add (get_local $x) (i32.const 1)) (i32.add (get_local $y) (i32.const 1))))
(func (export "i32.no_fold_cmp_u_offset") (param $x i32) (param $y i32) (result i32)
(i32.lt_u (i32.add (get_local $x) (i32.const 1)) (i32.add (get_local $y) (i32.const 1))))
(func (export "i64.no_fold_cmp_s_offset") (param $x i64) (param $y i64) (result i32)
(i64.lt_s (i64.add (get_local $x) (i64.const 1)) (i64.add (get_local $y) (i64.const 1))))
(func (export "i64.no_fold_cmp_u_offset") (param $x i64) (param $y i64) (result i32)
(i64.lt_u (i64.add (get_local $x) (i64.const 1)) (i64.add (get_local $y) (i64.const 1))))
)
(assert_return (invoke "i32.no_fold_cmp_s_offset" (i32.const 0x7fffffff) (i32.const 0)) (i32.const 1))
(assert_return (invoke "i32.no_fold_cmp_u_offset" (i32.const 0xffffffff) (i32.const 0)) (i32.const 1))
(assert_return (invoke "i64.no_fold_cmp_s_offset" (i64.const 0x7fffffffffffffff) (i64.const 0)) (i32.const 1))
(assert_return (invoke "i64.no_fold_cmp_u_offset" (i64.const 0xffffffffffffffff) (i64.const 0)) (i32.const 1))
;; Test that wrap(extend_s(x)) is not folded to x.
(module
(func (export "i64.no_fold_wrap_extend_s") (param $x i64) (result i64)
(i64.extend_s/i32 (i32.wrap/i64 (get_local $x))))
)
(assert_return (invoke "i64.no_fold_wrap_extend_s" (i64.const 0x0010203040506070)) (i64.const 0x0000000040506070))
(assert_return (invoke "i64.no_fold_wrap_extend_s" (i64.const 0x00a0b0c0d0e0f0a0)) (i64.const 0xffffffffd0e0f0a0))
;; Test that wrap(extend_u(x)) is not folded to x.
(module
(func (export "i64.no_fold_wrap_extend_u") (param $x i64) (result i64)
(i64.extend_u/i32 (i32.wrap/i64 (get_local $x))))
)
(assert_return (invoke "i64.no_fold_wrap_extend_u" (i64.const 0x0010203040506070)) (i64.const 0x0000000040506070))
;; Test that x<<n>>n is not folded to x.
(module
(func (export "i32.no_fold_shl_shr_s") (param $x i32) (result i32)
(i32.shr_s (i32.shl (get_local $x) (i32.const 1)) (i32.const 1)))
(func (export "i32.no_fold_shl_shr_u") (param $x i32) (result i32)
(i32.shr_u (i32.shl (get_local $x) (i32.const 1)) (i32.const 1)))
(func (export "i64.no_fold_shl_shr_s") (param $x i64) (result i64)
(i64.shr_s (i64.shl (get_local $x) (i64.const 1)) (i64.const 1)))
(func (export "i64.no_fold_shl_shr_u") (param $x i64) (result i64)
(i64.shr_u (i64.shl (get_local $x) (i64.const 1)) (i64.const 1)))
)
(assert_return (invoke "i32.no_fold_shl_shr_s" (i32.const 0x80000000)) (i32.const 0))
(assert_return (invoke "i32.no_fold_shl_shr_u" (i32.const 0x80000000)) (i32.const 0))
(assert_return (invoke "i64.no_fold_shl_shr_s" (i64.const 0x8000000000000000)) (i64.const 0))
(assert_return (invoke "i64.no_fold_shl_shr_u" (i64.const 0x8000000000000000)) (i64.const 0))
;; Test that x>>n<<n is not folded to x.
(module
(func (export "i32.no_fold_shr_s_shl") (param $x i32) (result i32)
(i32.shl (i32.shr_s (get_local $x) (i32.const 1)) (i32.const 1)))
(func (export "i32.no_fold_shr_u_shl") (param $x i32) (result i32)
(i32.shl (i32.shr_u (get_local $x) (i32.const 1)) (i32.const 1)))
(func (export "i64.no_fold_shr_s_shl") (param $x i64) (result i64)
(i64.shl (i64.shr_s (get_local $x) (i64.const 1)) (i64.const 1)))
(func (export "i64.no_fold_shr_u_shl") (param $x i64) (result i64)
(i64.shl (i64.shr_u (get_local $x) (i64.const 1)) (i64.const 1)))
)
(assert_return (invoke "i32.no_fold_shr_s_shl" (i32.const 1)) (i32.const 0))
(assert_return (invoke "i32.no_fold_shr_u_shl" (i32.const 1)) (i32.const 0))
(assert_return (invoke "i64.no_fold_shr_s_shl" (i64.const 1)) (i64.const 0))
(assert_return (invoke "i64.no_fold_shr_u_shl" (i64.const 1)) (i64.const 0))
;; Test that x/n*n is not folded to x.
(module
(func (export "i32.no_fold_div_s_mul") (param $x i32) (result i32)
(i32.mul (i32.div_s (get_local $x) (i32.const 6)) (i32.const 6)))
(func (export "i32.no_fold_div_u_mul") (param $x i32) (result i32)
(i32.mul (i32.div_u (get_local $x) (i32.const 6)) (i32.const 6)))
(func (export "i64.no_fold_div_s_mul") (param $x i64) (result i64)
(i64.mul (i64.div_s (get_local $x) (i64.const 6)) (i64.const 6)))
(func (export "i64.no_fold_div_u_mul") (param $x i64) (result i64)
(i64.mul (i64.div_u (get_local $x) (i64.const 6)) (i64.const 6)))
)
(assert_return (invoke "i32.no_fold_div_s_mul" (i32.const 1)) (i32.const 0))
(assert_return (invoke "i32.no_fold_div_u_mul" (i32.const 1)) (i32.const 0))
(assert_return (invoke "i64.no_fold_div_s_mul" (i64.const 1)) (i64.const 0))
(assert_return (invoke "i64.no_fold_div_u_mul" (i64.const 1)) (i64.const 0))
;; Test that x/x is not folded to 1.
(module
(func (export "i32.no_fold_div_s_self") (param $x i32) (result i32)
(i32.div_s (get_local $x) (get_local $x)))
(func (export "i32.no_fold_div_u_self") (param $x i32) (result i32)
(i32.div_u (get_local $x) (get_local $x)))
(func (export "i64.no_fold_div_s_self") (param $x i64) (result i64)
(i64.div_s (get_local $x) (get_local $x)))
(func (export "i64.no_fold_div_u_self") (param $x i64) (result i64)
(i64.div_u (get_local $x) (get_local $x)))
)
(assert_trap (invoke "i32.no_fold_div_s_self" (i32.const 0)) "integer divide by zero")
(assert_trap (invoke "i32.no_fold_div_u_self" (i32.const 0)) "integer divide by zero")
(assert_trap (invoke "i64.no_fold_div_s_self" (i64.const 0)) "integer divide by zero")
(assert_trap (invoke "i64.no_fold_div_u_self" (i64.const 0)) "integer divide by zero")
;; Test that x%x is not folded to 0.
(module
(func (export "i32.no_fold_rem_s_self") (param $x i32) (result i32)
(i32.rem_s (get_local $x) (get_local $x)))
(func (export "i32.no_fold_rem_u_self") (param $x i32) (result i32)
(i32.rem_u (get_local $x) (get_local $x)))
(func (export "i64.no_fold_rem_s_self") (param $x i64) (result i64)
(i64.rem_s (get_local $x) (get_local $x)))
(func (export "i64.no_fold_rem_u_self") (param $x i64) (result i64)
(i64.rem_u (get_local $x) (get_local $x)))
)
(assert_trap (invoke "i32.no_fold_rem_s_self" (i32.const 0)) "integer divide by zero")
(assert_trap (invoke "i32.no_fold_rem_u_self" (i32.const 0)) "integer divide by zero")
(assert_trap (invoke "i64.no_fold_rem_s_self" (i64.const 0)) "integer divide by zero")
(assert_trap (invoke "i64.no_fold_rem_u_self" (i64.const 0)) "integer divide by zero")
;; Test that x*n/n is not folded to x.
(module
(func (export "i32.no_fold_mul_div_s") (param $x i32) (result i32)
(i32.div_s (i32.mul (get_local $x) (i32.const 6)) (i32.const 6)))
(func (export "i32.no_fold_mul_div_u") (param $x i32) (result i32)
(i32.div_u (i32.mul (get_local $x) (i32.const 6)) (i32.const 6)))
(func (export "i64.no_fold_mul_div_s") (param $x i64) (result i64)
(i64.div_s (i64.mul (get_local $x) (i64.const 6)) (i64.const 6)))
(func (export "i64.no_fold_mul_div_u") (param $x i64) (result i64)
(i64.div_u (i64.mul (get_local $x) (i64.const 6)) (i64.const 6)))
)
(assert_return (invoke "i32.no_fold_mul_div_s" (i32.const 0x80000000)) (i32.const 0))
(assert_return (invoke "i32.no_fold_mul_div_u" (i32.const 0x80000000)) (i32.const 0))
(assert_return (invoke "i64.no_fold_mul_div_s" (i64.const 0x8000000000000000)) (i64.const 0))
(assert_return (invoke "i64.no_fold_mul_div_u" (i64.const 0x8000000000000000)) (i64.const 0))
;; Test that x/n where n is a known power of 2 is not folded to shr_s.
(module
(func (export "i32.no_fold_div_s_2") (param $x i32) (result i32)
(i32.div_s (get_local $x) (i32.const 2)))
(func (export "i64.no_fold_div_s_2") (param $x i64) (result i64)
(i64.div_s (get_local $x) (i64.const 2)))
)
(assert_return (invoke "i32.no_fold_div_s_2" (i32.const -11)) (i32.const -5))
(assert_return (invoke "i64.no_fold_div_s_2" (i64.const -11)) (i64.const -5))
;; Test that x%n where n is a known power of 2 is not folded to and.
(module
(func (export "i32.no_fold_rem_s_2") (param $x i32) (result i32)
(i32.rem_s (get_local $x) (i32.const 2)))
(func (export "i64.no_fold_rem_s_2") (param $x i64) (result i64)
(i64.rem_s (get_local $x) (i64.const 2)))
)
(assert_return (invoke "i32.no_fold_rem_s_2" (i32.const -11)) (i32.const -1))
(assert_return (invoke "i64.no_fold_rem_s_2" (i64.const -11)) (i64.const -1))
;; Test that x/0 works.
(module
(func (export "i32.div_s_0") (param $x i32) (result i32)
(i32.div_s (get_local $x) (i32.const 0)))
(func (export "i32.div_u_0") (param $x i32) (result i32)
(i32.div_u (get_local $x) (i32.const 0)))
(func (export "i64.div_s_0") (param $x i64) (result i64)
(i64.div_s (get_local $x) (i64.const 0)))
(func (export "i64.div_u_0") (param $x i64) (result i64)
(i64.div_u (get_local $x) (i64.const 0)))
)
(assert_trap (invoke "i32.div_s_0" (i32.const 71)) "integer divide by zero")
(assert_trap (invoke "i32.div_u_0" (i32.const 71)) "integer divide by zero")
(assert_trap (invoke "i64.div_s_0" (i64.const 71)) "integer divide by zero")
(assert_trap (invoke "i64.div_u_0" (i64.const 71)) "integer divide by zero")
;; Test that x/3 works.
(module
(func (export "i32.div_s_3") (param $x i32) (result i32)
(i32.div_s (get_local $x) (i32.const 3)))
(func (export "i32.div_u_3") (param $x i32) (result i32)
(i32.div_u (get_local $x) (i32.const 3)))
(func (export "i64.div_s_3") (param $x i64) (result i64)
(i64.div_s (get_local $x) (i64.const 3)))
(func (export "i64.div_u_3") (param $x i64) (result i64)
(i64.div_u (get_local $x) (i64.const 3)))
)
(assert_return (invoke "i32.div_s_3" (i32.const 71)) (i32.const 23))
(assert_return (invoke "i32.div_s_3" (i32.const 0x60000000)) (i32.const 0x20000000))
(assert_return (invoke "i32.div_u_3" (i32.const 71)) (i32.const 23))
(assert_return (invoke "i32.div_u_3" (i32.const 0xc0000000)) (i32.const 0x40000000))
(assert_return (invoke "i64.div_s_3" (i64.const 71)) (i64.const 23))
(assert_return (invoke "i64.div_s_3" (i64.const 0x3000000000000000)) (i64.const 0x1000000000000000))
(assert_return (invoke "i64.div_u_3" (i64.const 71)) (i64.const 23))
(assert_return (invoke "i64.div_u_3" (i64.const 0xc000000000000000)) (i64.const 0x4000000000000000))
;; Test that x/5 works.
(module
(func (export "i32.div_s_5") (param $x i32) (result i32)
(i32.div_s (get_local $x) (i32.const 5)))
(func (export "i32.div_u_5") (param $x i32) (result i32)
(i32.div_u (get_local $x) (i32.const 5)))
(func (export "i64.div_s_5") (param $x i64) (result i64)
(i64.div_s (get_local $x) (i64.const 5)))
(func (export "i64.div_u_5") (param $x i64) (result i64)
(i64.div_u (get_local $x) (i64.const 5)))
)
(assert_return (invoke "i32.div_s_5" (i32.const 71)) (i32.const 14))
(assert_return (invoke "i32.div_s_5" (i32.const 0x50000000)) (i32.const 0x10000000))
(assert_return (invoke "i32.div_u_5" (i32.const 71)) (i32.const 14))
(assert_return (invoke "i32.div_u_5" (i32.const 0xa0000000)) (i32.const 0x20000000))
(assert_return (invoke "i64.div_s_5" (i64.const 71)) (i64.const 14))
(assert_return (invoke "i64.div_s_5" (i64.const 0x5000000000000000)) (i64.const 0x1000000000000000))
(assert_return (invoke "i64.div_u_5" (i64.const 71)) (i64.const 14))
(assert_return (invoke "i64.div_u_5" (i64.const 0xa000000000000000)) (i64.const 0x2000000000000000))
;; Test that x/7 works.
(module
(func (export "i32.div_s_7") (param $x i32) (result i32)
(i32.div_s (get_local $x) (i32.const 7)))
(func (export "i32.div_u_7") (param $x i32) (result i32)
(i32.div_u (get_local $x) (i32.const 7)))
(func (export "i64.div_s_7") (param $x i64) (result i64)
(i64.div_s (get_local $x) (i64.const 7)))
(func (export "i64.div_u_7") (param $x i64) (result i64)
(i64.div_u (get_local $x) (i64.const 7)))
)
(assert_return (invoke "i32.div_s_7" (i32.const 71)) (i32.const 10))
(assert_return (invoke "i32.div_s_7" (i32.const 0x70000000)) (i32.const 0x10000000))
(assert_return (invoke "i32.div_u_7" (i32.const 71)) (i32.const 10))
(assert_return (invoke "i32.div_u_7" (i32.const 0xe0000000)) (i32.const 0x20000000))
(assert_return (invoke "i64.div_s_7" (i64.const 71)) (i64.const 10))
(assert_return (invoke "i64.div_s_7" (i64.const 0x7000000000000000)) (i64.const 0x1000000000000000))
(assert_return (invoke "i64.div_u_7" (i64.const 71)) (i64.const 10))
(assert_return (invoke "i64.div_u_7" (i64.const 0xe000000000000000)) (i64.const 0x2000000000000000))
;; Test that x%3 works.
(module
(func (export "i32.rem_s_3") (param $x i32) (result i32)
(i32.rem_s (get_local $x) (i32.const 3)))
(func (export "i32.rem_u_3") (param $x i32) (result i32)
(i32.rem_u (get_local $x) (i32.const 3)))
(func (export "i64.rem_s_3") (param $x i64) (result i64)
(i64.rem_s (get_local $x) (i64.const 3)))
(func (export "i64.rem_u_3") (param $x i64) (result i64)
(i64.rem_u (get_local $x) (i64.const 3)))
)
(assert_return (invoke "i32.rem_s_3" (i32.const 71)) (i32.const 2))
(assert_return (invoke "i32.rem_s_3" (i32.const 0x60000000)) (i32.const 0))
(assert_return (invoke "i32.rem_u_3" (i32.const 71)) (i32.const 2))
(assert_return (invoke "i32.rem_u_3" (i32.const 0xc0000000)) (i32.const 0))
(assert_return (invoke "i64.rem_s_3" (i64.const 71)) (i64.const 2))
(assert_return (invoke "i64.rem_s_3" (i64.const 0x3000000000000000)) (i64.const 0))
(assert_return (invoke "i64.rem_u_3" (i64.const 71)) (i64.const 2))
(assert_return (invoke "i64.rem_u_3" (i64.const 0xc000000000000000)) (i64.const 0))
;; Test that x%5 works.
(module
(func (export "i32.rem_s_5") (param $x i32) (result i32)
(i32.rem_s (get_local $x) (i32.const 5)))
(func (export "i32.rem_u_5") (param $x i32) (result i32)
(i32.rem_u (get_local $x) (i32.const 5)))
(func (export "i64.rem_s_5") (param $x i64) (result i64)
(i64.rem_s (get_local $x) (i64.const 5)))
(func (export "i64.rem_u_5") (param $x i64) (result i64)
(i64.rem_u (get_local $x) (i64.const 5)))
)
(assert_return (invoke "i32.rem_s_5" (i32.const 71)) (i32.const 1))
(assert_return (invoke "i32.rem_s_5" (i32.const 0x50000000)) (i32.const 0))
(assert_return (invoke "i32.rem_u_5" (i32.const 71)) (i32.const 1))
(assert_return (invoke "i32.rem_u_5" (i32.const 0xa0000000)) (i32.const 0))
(assert_return (invoke "i64.rem_s_5" (i64.const 71)) (i64.const 1))
(assert_return (invoke "i64.rem_s_5" (i64.const 0x5000000000000000)) (i64.const 0))
(assert_return (invoke "i64.rem_u_5" (i64.const 71)) (i64.const 1))
(assert_return (invoke "i64.rem_u_5" (i64.const 0xa000000000000000)) (i64.const 0))
;; Test that x%7 works.
(module
(func (export "i32.rem_s_7") (param $x i32) (result i32)
(i32.rem_s (get_local $x) (i32.const 7)))
(func (export "i32.rem_u_7") (param $x i32) (result i32)
(i32.rem_u (get_local $x) (i32.const 7)))
(func (export "i64.rem_s_7") (param $x i64) (result i64)
(i64.rem_s (get_local $x) (i64.const 7)))
(func (export "i64.rem_u_7") (param $x i64) (result i64)
(i64.rem_u (get_local $x) (i64.const 7)))
)
(assert_return (invoke "i32.rem_s_7" (i32.const 71)) (i32.const 1))
(assert_return (invoke "i32.rem_s_7" (i32.const 0x70000000)) (i32.const 0))
(assert_return (invoke "i32.rem_u_7" (i32.const 71)) (i32.const 1))
(assert_return (invoke "i32.rem_u_7" (i32.const 0xe0000000)) (i32.const 0))
(assert_return (invoke "i64.rem_s_7" (i64.const 71)) (i64.const 1))
(assert_return (invoke "i64.rem_s_7" (i64.const 0x7000000000000000)) (i64.const 0))
(assert_return (invoke "i64.rem_u_7" (i64.const 71)) (i64.const 1))
(assert_return (invoke "i64.rem_u_7" (i64.const 0xe000000000000000)) (i64.const 0))
;; Test that x/-1 is not folded to -x.
(module
(func (export "i32.no_fold_div_neg1") (param $x i32) (result i32)
(i32.div_s (get_local $x) (i32.const -1)))
(func (export "i64.no_fold_div_neg1") (param $x i64) (result i64)
(i64.div_s (get_local $x) (i64.const -1)))
)
(assert_trap (invoke "i32.no_fold_div_neg1" (i32.const 0x80000000)) "integer overflow")
(assert_trap (invoke "i64.no_fold_div_neg1" (i64.const 0x8000000000000000)) "integer overflow")
| {
"pile_set_name": "Github"
} |
#!/usr/bin/perl
# This file is part of Koha.
#
# Koha is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# Koha is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Koha; if not, see <http://www.gnu.org/licenses>.
use Modern::Perl;
use Test::More tests => 1;
use t::lib::TestBuilder;
use Koha::Database;
use Koha::Tags::Indexes;
my $schema = Koha::Database->schema;
my $builder = t::lib::TestBuilder->new;
subtest 'search() tests' => sub {
plan tests => 1;
$schema->storage->txn_begin;
my $current_count = Koha::Tags::Indexes->search->count;
my $approval_1 = $builder->build_object({ class => 'Koha::Tags::Indexes' });
my $approval_2 = $builder->build_object({ class => 'Koha::Tags::Indexes' });
is( Koha::Tags::Indexes->search->count, $current_count + 2, 'Indexes count consistent' );
$schema->storage->txn_rollback;
};
| {
"pile_set_name": "Github"
} |
class Range
include Enumerable
def member?(val)
# implementation to conform to rubyspecs, Pickaxe docs incorrect
fr = @_st_from
if fr._isNumeric && val._isNumeric
if @_st_excludeEnd
val >= fr && val < @_st_to
else
val >= fr && val <= @_st_to
end
else
self.step(1) { |i|
if i == val
return true
end
}
false
end
end
alias include? ===
end
| {
"pile_set_name": "Github"
} |
#pragma once
#include <filesystem>
#include <fstream>
#include <boost/asio.hpp>
namespace opossum {
using AsioStreamDescriptor = boost::asio::posix::stream_descriptor;
// Instead of writing data to the network device this class puts the information into a file. This allows an easier
// verification of input and output and works independently from the network.
class MockSocket {
public:
static constexpr char filename[] = "socket_file";
MockSocket() : _path(test_data_path + filename) {
_file_descriptor = open(_path.c_str(), O_RDWR | O_CREAT | O_APPEND, 0755);
_stream = std::make_shared<AsioStreamDescriptor>(_io_service, _file_descriptor);
_io_service.run();
}
~MockSocket() {
close(_file_descriptor);
std::filesystem::remove(std::filesystem::path{_path});
}
std::shared_ptr<AsioStreamDescriptor> get_socket() { return _stream; }
void write(const std::string& value) { std::ofstream(std::filesystem::path{_path}, std::ios_base::app) << value; }
std::string read() {
std::ifstream AsioStreamDescriptor(_path);
return {std::istreambuf_iterator<char>(AsioStreamDescriptor), std::istreambuf_iterator<char>()};
}
bool empty() { return std::ifstream(std::filesystem::path{_path}).peek() == std::ifstream::traits_type::eof(); }
private:
const std::string _path;
int _file_descriptor;
boost::asio::io_service _io_service;
std::shared_ptr<AsioStreamDescriptor> _stream;
};
// Helper class to convert integers from network byte order to host byte order.
class NetworkConversionHelper {
public:
static uint32_t get_message_length(std::string::const_iterator start) {
uint32_t network_value = 0;
std::copy_n(start, sizeof(uint32_t), reinterpret_cast<char*>(&network_value));
return ntohl(network_value);
}
static uint16_t get_small_int(std::string::const_iterator start) {
uint16_t network_value = 0;
std::copy_n(start, sizeof(uint16_t), reinterpret_cast<char*>(&network_value));
return ntohs(network_value);
}
};
} // namespace opossum
| {
"pile_set_name": "Github"
} |
using System;
using System.Threading.Tasks;
using Marten;
using PricingService.Domain;
namespace PricingService.DataAccess.Marten
{
public class MartenDataStore : IDataStore
{
private readonly IDocumentSession session;
private readonly ITariffRepository tariffs;
public MartenDataStore(IDocumentStore documentStore)
{
session = documentStore.LightweightSession();
tariffs = new MartenTariffRepository(session);
}
public ITariffRepository Tariffs => tariffs;
public async Task CommitChanges()
{
await session.SaveChangesAsync();
}
public void Dispose()
{
Dispose(true);
GC.SuppressFinalize(this);
}
protected virtual void Dispose(bool disposing)
{
if (disposing)
{
session.Dispose();
}
}
}
}
| {
"pile_set_name": "Github"
} |
oro:
rfp:
requestproduct:
product:
blank: 'Product cannot be empty.'
request_product_items:
blank: 'Please add one or more requests.'
| {
"pile_set_name": "Github"
} |
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
static int next;
void
f00 (void)
{
puts ("f00");
if (next-- != 0)
_exit (1);
}
void
f01 (void)
{
puts ("f01");
if (next-- != 1)
_exit (1);
}
void
f02 (void)
{
puts ("f02");
if (next-- != 2)
_exit (1);
}
void
f03 (void)
{
puts ("f03");
if (next-- != 3)
_exit (1);
}
void
f04 (void)
{
puts ("f04");
if (next-- != 4)
_exit (1);
}
void
f05 (void)
{
puts ("f05");
if (next-- != 5)
_exit (1);
}
void
f06 (void)
{
puts ("f06");
if (next-- != 6)
_exit (1);
}
void
f07 (void)
{
puts ("f07");
if (next-- != 7)
_exit (1);
}
void
f08 (void)
{
puts ("f08");
if (next-- != 8)
_exit (1);
}
void
f09 (void)
{
puts ("f09");
if (next-- != 9)
_exit (1);
}
void
f10 (void)
{
puts ("f10");
if (next-- != 10)
_exit (1);
}
void
f11 (void)
{
puts ("f11");
if (next-- != 11)
_exit (1);
}
void
f12 (void)
{
puts ("f12");
if (next-- != 12)
_exit (1);
}
void
f13 (void)
{
puts ("f13");
if (next-- != 13)
_exit (1);
}
void
f14 (void)
{
puts ("f14");
if (next-- != 14)
_exit (1);
}
void
f15 (void)
{
puts ("f15");
if (next-- != 15)
_exit (1);
}
void
f16 (void)
{
puts ("f16");
if (next-- != 16)
_exit (1);
}
void
f17 (void)
{
puts ("f17");
if (next-- != 17)
_exit (1);
}
void
f18 (void)
{
puts ("f18");
if (next-- != 18)
_exit (1);
}
void
f19 (void)
{
puts ("f19");
if (next-- != 19)
_exit (1);
}
void
f20 (void)
{
puts ("f20");
if (next-- != 20)
_exit (1);
}
void
f21 (void)
{
puts ("f21");
if (next-- != 21)
_exit (1);
}
void
f22 (void)
{
puts ("f22");
if (next-- != 22)
_exit (1);
}
void
f23 (void)
{
puts ("f23");
if (next-- != 23)
_exit (1);
}
void
f24 (void)
{
puts ("f24");
if (next-- != 24)
_exit (1);
}
void
f25 (void)
{
puts ("f25");
if (next-- != 25)
_exit (1);
}
void
f26 (void)
{
puts ("f26");
if (next-- != 26)
_exit (1);
}
void
f27 (void)
{
puts ("f27");
if (next-- != 27)
_exit (1);
}
void
f28 (void)
{
puts ("f28");
if (next-- != 28)
_exit (1);
}
void
f29 (void)
{
puts ("f29");
if (next-- != 29)
_exit (1);
}
void
f30 (void)
{
puts ("f30");
if (next-- != 30)
_exit (1);
}
void
f31 (void)
{
puts ("f31");
if (next-- != 31)
_exit (1);
}
void
f32 (void)
{
puts ("f32");
if (next-- != 32)
_exit (1);
}
void
f33 (void)
{
puts ("f33");
if (next-- != 33)
_exit (1);
}
void
f34 (void)
{
puts ("f34");
if (next-- != 34)
_exit (1);
}
void
f35 (void)
{
puts ("f35");
if (next-- != 35)
_exit (1);
}
void
f36 (void)
{
puts ("f36");
if (next-- != 36)
_exit (1);
}
void
f37 (void)
{
puts ("f37");
if (next-- != 37)
_exit (1);
}
void
f38 (void)
{
puts ("f38");
if (next-- != 38)
_exit (1);
}
void
f39 (void)
{
puts ("f39");
if (next-- != 39)
_exit (1);
}
void
foo (void)
{
atexit (f00);
atexit (f01);
atexit (f02);
atexit (f03);
atexit (f04);
atexit (f05);
atexit (f06);
atexit (f07);
atexit (f08);
atexit (f09);
atexit (f10);
atexit (f11);
atexit (f12);
atexit (f13);
atexit (f14);
atexit (f15);
atexit (f16);
atexit (f17);
atexit (f18);
atexit (f19);
atexit (f20);
atexit (f21);
atexit (f22);
atexit (f23);
atexit (f24);
atexit (f25);
atexit (f26);
atexit (f27);
atexit (f28);
atexit (f29);
atexit (f30);
atexit (f31);
atexit (f32);
atexit (f33);
atexit (f34);
atexit (f35);
atexit (f36);
atexit (f37);
atexit (f38);
atexit (f39);
next = 39;
}
| {
"pile_set_name": "Github"
} |
/*
* This file is part of the UCB release of Plan 9. It is subject to the license
* terms in the LICENSE file found in the top-level directory of this
* distribution and at http://akaros.cs.berkeley.edu/files/Plan9License. No
* part of the UCB release of Plan 9, including this file, may be copied,
* modified, propagated, or distributed except according to the terms contained
* in the LICENSE file.
*/
#include <u.h>
#include <libc.h>
#include <bio.h>
#include <ip.h>
#include <ndb.h>
/*
* Look for a pair with the given attribute. look first on the same line,
* then in the whole entry.
*/
Ndbtuple*
ndbfindattr(Ndbtuple *entry, Ndbtuple *line, char *attr)
{
Ndbtuple *nt;
/* first look on same line (closer binding) */
for(nt = line; nt;){
if(strcmp(attr, nt->attr) == 0)
return nt;
nt = nt->line;
if(nt == line)
break;
}
/* search whole tuple */
for(nt = entry; nt; nt = nt->entry)
if(strcmp(attr, nt->attr) == 0)
return nt;
return nil;
}
Ndbtuple*
ndblookval(Ndbtuple *entry, Ndbtuple *line, char *attr, char *to)
{
Ndbtuple *t;
t = ndbfindattr(entry, line, attr);
if(t != nil){
strncpy(to, t->val, Ndbvlen-1);
to[Ndbvlen-1] = 0;
}
return t;
}
| {
"pile_set_name": "Github"
} |
<!--- Provide a short summary of your issue in the Title above. -->
### New Issue Checklist
<!-- Before you submit your issue, please make sure to check the following boxes by putting an x in the [ ] -->
- [ ] I updated Haptica to the latest version.
- [ ] I read the [Contribution Guidelines](https://github.com/efremidze/Haptica/blob/master/.github/CONTRIBUTING.md).
- [ ] I read the [documentation](https://github.com/efremidze/Haptica).
- [ ] I searched for [existing GitHub issues](https://github.com/efremidze/Haptica/issues).
### Issue Description
<!--- Describe your issue in detail. -->
<!--- Do not hesitate to attach screenshots if they can be helpful. -->
### Environment
- **iOS Version**: [INSERT iOS VERSION HERE]
- **Device(s)**: [INSERT DEVICE(S) HERE]
| {
"pile_set_name": "Github"
} |
<!DOCTYPE qhelp PUBLIC
"-//Semmle//qhelp//EN"
"qhelp.dtd">
<qhelp>
<overview>
<p>
Code should not synchronize on a variable or field of a boxed type (for example
<code>Integer</code>, <code>Boolean</code>) or of type <code>String</code> since
it is likely to contain an object that is used throughout the program. For example,
<code>Boolean.TRUE</code> holds a single instance that will be used in many places
throughout the program: whenever <code>true</code> is autoboxed or a call to
<code>Boolean.valueOf</code> is made with <code>true</code> as an argument the
same instance of <code>Boolean</code> is returned. It is therefore likely that
two classes synchronizing on a field of type <code>Boolean</code> will end up
synchronizing on the same object. This may lead to deadlock or threads being
blocked unnecessarily.
</p>
</overview>
<recommendation>
<p>
Synchronize on a specific lock object instead of using an object with a boxed type.
</p>
</recommendation>
<example>
<p>
In the following example, the intention is to allow <code>ThreadA</code> and
<code>ThreadB</code> to run at the same time. Unfortunately,
<code>ThreadA.lock</code> and <code>ThreadB.lock</code> both refer to the same
object (that is, the interned value of the <code>String</code>
<code>"lock"</code>) so the synchronized blocks in their run methods can not be
executed concurrently.
</p>
<sample src="SynchOnBoxedType.java" />
<p>
In the following example, the approach recommended above is shown. A separate
lock object is created for each thread allowing them to execute concurrently.
</p>
<sample src="SynchOnBoxedTypeGood.java" />
</example>
<references>
<li>
The CERT Oracle Secure Coding Standard for Java:
<a href="https://www.securecoding.cert.org/confluence/display/java/LCK01-J.+Do+not+synchronize+on+objects+that+may+be+reused">LCK01-J. Do not synchronize on objects that may be reused</a>,
</li>
</references>
</qhelp>
| {
"pile_set_name": "Github"
} |
//
// PSMTabDragWindow.m
// PSMTabBarControl
//
// Created by Kent Sutherland on 6/1/06.
// Copyright 2006 Kent Sutherland. All rights reserved.
//
#import "PSMTabDragWindow.h"
#import "PSMTabDragView.h"
@implementation PSMTabDragWindow
+ (PSMTabDragWindow *)dragWindowWithImage:(NSImage *)image styleMask:(NSUInteger)styleMask {
return [[PSMTabDragWindow alloc] initWithImage:image styleMask:styleMask];
}
- (id)initWithImage:(NSImage *)image styleMask:(NSUInteger)styleMask {
NSSize size = [image size];
if((self = [super initWithContentRect:NSMakeRect(0, 0, size.width, size.height) styleMask:styleMask backing:NSBackingStoreBuffered defer:NO])) {
_dragView = [[PSMTabDragView alloc] initWithFrame:NSMakeRect(0, 0, size.width, size.height)];
[self setContentView:_dragView];
[self setLevel:NSStatusWindowLevel];
[self setIgnoresMouseEvents:YES];
[self setOpaque:NO];
[_dragView setImage:image];
//Set the size of the window to be the exact size of the drag image
NSRect windowFrame = [self frame];
windowFrame.origin.y += windowFrame.size.height - size.height;
windowFrame.size = size;
if(styleMask | NSBorderlessWindowMask) {
windowFrame.size.height += 22;
}
[self setFrame:windowFrame display:YES];
}
return self;
}
- (PSMTabDragView *)dragView {
return _dragView;
}
@end
| {
"pile_set_name": "Github"
} |
/*
* Copyright (c) 2010 WiYun Inc.
* Author: luma([email protected])
*
* For all entities this program is free software; you can redistribute
* it and/or modify it under the terms of the 'WiEngine' license with
* the additional provision that 'WiEngine' must be credited in a manner
* that can be be observed by end users, for example, in the credits or during
* start up. (please find WiEngine logo in sdk's logo folder)
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package com.wiyun.engine.tests.lua;
import com.wiyun.engine.WiEngineTestActivity;
public class SplitWordsTest extends WiEngineTestActivity {
private native void nativeStart();
@Override
protected void runDemo() {
nativeStart();
}
}
| {
"pile_set_name": "Github"
} |
//========= Copyright Valve Corporation, All rights reserved. ============//
//
// Purpose:
//
// $NoKeywords: $
//=============================================================================//
#include "cbase.h"
#include "basehlcombatweapon.h"
#include "engine/IEngineSound.h"
#include "npcevent.h"
#include "in_buttons.h"
#include "antlion_maker.h"
#include "grenade_bugbait.h"
#include "gamestats.h"
// memdbgon must be the last include file in a .cpp file!!!
#include "tier0/memdbgon.h"
//
// Bug Bait Weapon
//
class CWeaponBugBait : public CBaseHLCombatWeapon
{
DECLARE_CLASS( CWeaponBugBait, CBaseHLCombatWeapon );
public:
DECLARE_SERVERCLASS();
CWeaponBugBait( void );
void Spawn( void );
void FallInit( void );
int CapabilitiesGet( void ) { return bits_CAP_WEAPON_RANGE_ATTACK1; }
void Operator_HandleAnimEvent( animevent_t *pEvent, CBaseCombatCharacter *pOperator );
void Drop( const Vector &vecVelocity );
void BugbaitStickyTouch( CBaseEntity *pOther );
void OnPickedUp( CBaseCombatCharacter *pNewOwner );
bool Deploy( void );
bool Holster( CBaseCombatWeapon *pSwitchingTo );
void ItemPostFrame( void );
void Precache( void );
void PrimaryAttack( void );
void SecondaryAttack( void );
void ThrowGrenade( CBasePlayer *pPlayer );
bool HasAnyAmmo( void ) { return true; }
bool Reload( void );
void SetSporeEmitterState( bool state = true );
bool ShouldDisplayHUDHint() { return true; }
DECLARE_DATADESC();
protected:
bool m_bDrawBackFinished;
bool m_bRedraw;
bool m_bEmitSpores;
EHANDLE m_hSporeTrail;
};
IMPLEMENT_SERVERCLASS_ST(CWeaponBugBait, DT_WeaponBugBait)
END_SEND_TABLE()
LINK_ENTITY_TO_CLASS( weapon_bugbait, CWeaponBugBait );
#ifndef HL2MP
PRECACHE_WEAPON_REGISTER( weapon_bugbait );
#endif
BEGIN_DATADESC( CWeaponBugBait )
DEFINE_FIELD( m_hSporeTrail, FIELD_EHANDLE ),
DEFINE_FIELD( m_bRedraw, FIELD_BOOLEAN ),
DEFINE_FIELD( m_bEmitSpores, FIELD_BOOLEAN ),
DEFINE_FIELD( m_bDrawBackFinished, FIELD_BOOLEAN ),
DEFINE_FUNCTION( BugbaitStickyTouch ),
END_DATADESC()
//-----------------------------------------------------------------------------
// Purpose:
//-----------------------------------------------------------------------------
CWeaponBugBait::CWeaponBugBait( void )
{
m_bDrawBackFinished = false;
m_bRedraw = false;
m_hSporeTrail = NULL;
}
//-----------------------------------------------------------------------------
// Purpose:
//-----------------------------------------------------------------------------
void CWeaponBugBait::Spawn( void )
{
BaseClass::Spawn();
// Increase the bugbait's pickup volume. It spawns inside the antlion guard's body,
// and playtesters seem to be wary about moving into the body.
SetSize( Vector( -4, -4, -4), Vector(4, 4, 4) );
CollisionProp()->UseTriggerBounds( true, 100 );
}
//-----------------------------------------------------------------------------
// Purpose:
//-----------------------------------------------------------------------------
void CWeaponBugBait::FallInit( void )
{
// Bugbait shouldn't be physics, because it musn't roll/move away from it's spawnpoint.
// The game will break if the player can't pick it up, so it must stay still.
SetModel( GetWorldModel() );
VPhysicsDestroyObject();
SetMoveType( MOVETYPE_FLYGRAVITY );
SetSolid( SOLID_BBOX );
AddSolidFlags( FSOLID_TRIGGER );
SetPickupTouch();
SetThink( &CBaseCombatWeapon::FallThink );
SetNextThink( gpGlobals->curtime + 0.1f );
}
//-----------------------------------------------------------------------------
// Purpose:
//-----------------------------------------------------------------------------
void CWeaponBugBait::Precache( void )
{
BaseClass::Precache();
UTIL_PrecacheOther( "npc_grenade_bugbait" );
PrecacheScriptSound( "Weapon_Bugbait.Splat" );
}
//-----------------------------------------------------------------------------
// Purpose:
//-----------------------------------------------------------------------------
void CWeaponBugBait::Drop( const Vector &vecVelocity )
{
BaseClass::Drop( vecVelocity );
// On touch, stick & stop moving. Increase our thinktime a bit so we don't stomp the touch for a bit
SetNextThink( gpGlobals->curtime + 3.0 );
SetTouch( &CWeaponBugBait::BugbaitStickyTouch );
m_hSporeTrail = SporeExplosion::CreateSporeExplosion();
if ( m_hSporeTrail )
{
SporeExplosion *pSporeExplosion = (SporeExplosion *)m_hSporeTrail.Get();
QAngle angles;
VectorAngles( Vector(0,0,1), angles );
pSporeExplosion->SetAbsAngles( angles );
pSporeExplosion->SetAbsOrigin( GetAbsOrigin() );
pSporeExplosion->SetParent( this );
pSporeExplosion->m_flSpawnRate = 16.0f;
pSporeExplosion->m_flParticleLifetime = 0.5f;
pSporeExplosion->SetRenderColor( 0.0f, 0.5f, 0.25f, 0.15f );
pSporeExplosion->m_flStartSize = 32;
pSporeExplosion->m_flEndSize = 48;
pSporeExplosion->m_flSpawnRadius = 4;
pSporeExplosion->SetLifetime( 9999 );
}
}
//-----------------------------------------------------------------------------
// Purpose: Stick to the world when we touch it
//-----------------------------------------------------------------------------
void CWeaponBugBait::BugbaitStickyTouch( CBaseEntity *pOther )
{
if ( !pOther->IsWorld() )
return;
// Stop moving, wait for pickup
SetMoveType( MOVETYPE_NONE );
SetThink( NULL );
SetPickupTouch();
}
//-----------------------------------------------------------------------------
// Purpose:
// Input : *pPicker -
//-----------------------------------------------------------------------------
void CWeaponBugBait::OnPickedUp( CBaseCombatCharacter *pNewOwner )
{
BaseClass::OnPickedUp( pNewOwner );
if ( m_hSporeTrail )
{
UTIL_Remove( m_hSporeTrail );
}
}
//-----------------------------------------------------------------------------
// Purpose:
//-----------------------------------------------------------------------------
void CWeaponBugBait::PrimaryAttack( void )
{
if ( m_bRedraw )
return;
CBaseCombatCharacter *pOwner = GetOwner();
if ( pOwner == NULL )
return;
CBasePlayer *pPlayer = ToBasePlayer( GetOwner() );
if ( pPlayer == NULL )
return;
SendWeaponAnim( ACT_VM_HAULBACK );
m_flTimeWeaponIdle = FLT_MAX;
m_flNextPrimaryAttack = FLT_MAX;
m_iPrimaryAttacks++;
gamestats->Event_WeaponFired( pPlayer, true, GetClassname() );
}
//-----------------------------------------------------------------------------
// Purpose:
//-----------------------------------------------------------------------------
void CWeaponBugBait::SecondaryAttack( void )
{
// Squeeze!
CPASAttenuationFilter filter( this );
EmitSound( filter, entindex(), "Weapon_Bugbait.Splat" );
if ( CGrenadeBugBait::ActivateBugbaitTargets( GetOwner(), GetAbsOrigin(), true ) == false )
{
g_AntlionMakerManager.BroadcastFollowGoal( GetOwner() );
}
SendWeaponAnim( ACT_VM_SECONDARYATTACK );
m_flNextSecondaryAttack = gpGlobals->curtime + SequenceDuration();
CBasePlayer *pOwner = ToBasePlayer( GetOwner() );
if ( pOwner )
{
m_iSecondaryAttacks++;
gamestats->Event_WeaponFired( pOwner, false, GetClassname() );
}
}
//-----------------------------------------------------------------------------
// Purpose:
// Input : *pPlayer -
//-----------------------------------------------------------------------------
void CWeaponBugBait::ThrowGrenade( CBasePlayer *pPlayer )
{
Vector vForward, vRight, vUp, vThrowPos, vThrowVel;
pPlayer->EyeVectors( &vForward, &vRight, &vUp );
vThrowPos = pPlayer->EyePosition();
vThrowPos += vForward * 18.0f;
vThrowPos += vRight * 12.0f;
pPlayer->GetVelocity( &vThrowVel, NULL );
vThrowVel += vForward * 1000;
CGrenadeBugBait *pGrenade = BugBaitGrenade_Create( vThrowPos, vec3_angle, vThrowVel, QAngle(600,random->RandomInt(-1200,1200),0), pPlayer );
if ( pGrenade != NULL )
{
// If the shot is clear to the player, give the missile a grace period
trace_t tr;
UTIL_TraceLine( pPlayer->EyePosition(), pPlayer->EyePosition() + ( vForward * 128 ), MASK_SHOT, this, COLLISION_GROUP_NONE, &tr );
if ( tr.fraction == 1.0 )
{
pGrenade->SetGracePeriod( 0.1f );
}
}
m_bRedraw = true;
}
//-----------------------------------------------------------------------------
// Purpose:
// Input : *pEvent -
// *pOperator -
//-----------------------------------------------------------------------------
void CWeaponBugBait::Operator_HandleAnimEvent( animevent_t *pEvent, CBaseCombatCharacter *pOperator )
{
CBasePlayer *pOwner = ToBasePlayer( GetOwner() );
switch( pEvent->event )
{
case EVENT_WEAPON_SEQUENCE_FINISHED:
m_bDrawBackFinished = true;
break;
case EVENT_WEAPON_THROW:
ThrowGrenade( pOwner );
break;
default:
BaseClass::Operator_HandleAnimEvent( pEvent, pOperator );
break;
}
}
//-----------------------------------------------------------------------------
// Purpose:
// Output : Returns true on success, false on failure.
//-----------------------------------------------------------------------------
bool CWeaponBugBait::Reload( void )
{
if ( ( m_bRedraw ) && ( m_flNextPrimaryAttack <= gpGlobals->curtime ) )
{
//Redraw the weapon
SendWeaponAnim( ACT_VM_DRAW );
//Update our times
m_flNextPrimaryAttack = gpGlobals->curtime + SequenceDuration();
//Mark this as done
m_bRedraw = false;
}
return true;
}
//-----------------------------------------------------------------------------
// Purpose:
//-----------------------------------------------------------------------------
void CWeaponBugBait::ItemPostFrame( void )
{
CBasePlayer *pOwner = ToBasePlayer( GetOwner() );
if ( pOwner == NULL )
return;
// See if we're cocked and ready to throw
if ( m_bDrawBackFinished )
{
if ( ( pOwner->m_nButtons & IN_ATTACK ) == false )
{
SendWeaponAnim( ACT_VM_THROW );
m_flNextPrimaryAttack = gpGlobals->curtime + SequenceDuration();
m_bDrawBackFinished = false;
}
}
else
{
//See if we're attacking
if ( ( pOwner->m_nButtons & IN_ATTACK ) && ( m_flNextPrimaryAttack < gpGlobals->curtime ) )
{
PrimaryAttack();
}
else if ( ( pOwner->m_nButtons & IN_ATTACK2 ) && ( m_flNextSecondaryAttack < gpGlobals->curtime ) )
{
SecondaryAttack();
}
}
if ( m_bRedraw )
{
if ( IsViewModelSequenceFinished() )
{
Reload();
}
}
WeaponIdle();
}
//-----------------------------------------------------------------------------
// Purpose:
//-----------------------------------------------------------------------------
bool CWeaponBugBait::Deploy( void )
{
CBasePlayer *pOwner = ToBasePlayer( GetOwner() );
if ( pOwner == NULL )
return false;
/*
if ( m_hSporeTrail == NULL )
{
m_hSporeTrail = SporeTrail::CreateSporeTrail();
m_hSporeTrail->m_bEmit = true;
m_hSporeTrail->m_flSpawnRate = 100.0f;
m_hSporeTrail->m_flParticleLifetime = 2.0f;
m_hSporeTrail->m_flStartSize = 1.0f;
m_hSporeTrail->m_flEndSize = 4.0f;
m_hSporeTrail->m_flSpawnRadius = 8.0f;
m_hSporeTrail->m_vecEndColor = Vector( 0, 0, 0 );
CBaseViewModel *vm = pOwner->GetViewModel();
if ( vm != NULL )
{
m_hSporeTrail->FollowEntity( vm );
}
}
*/
m_bRedraw = false;
m_bDrawBackFinished = false;
return BaseClass::Deploy();
}
//-----------------------------------------------------------------------------
// Purpose:
//-----------------------------------------------------------------------------
bool CWeaponBugBait::Holster( CBaseCombatWeapon *pSwitchingTo )
{
m_bRedraw = false;
m_bDrawBackFinished = false;
return BaseClass::Holster( pSwitchingTo );
}
//-----------------------------------------------------------------------------
// Purpose:
// Input : true -
//-----------------------------------------------------------------------------
void CWeaponBugBait::SetSporeEmitterState( bool state )
{
m_bEmitSpores = state;
}
| {
"pile_set_name": "Github"
} |
"""Generic interface to all dbm clones.
Instead of
import dbm
d = dbm.open(file, 'w', 0666)
use
import anydbm
d = anydbm.open(file, 'w')
The returned object is a dbhash, gdbm, dbm or dumbdbm object,
dependent on the type of database being opened (determined by whichdb
module) in the case of an existing dbm. If the dbm does not exist and
the create or new flag ('c' or 'n') was specified, the dbm type will
be determined by the availability of the modules (tested in the above
order).
It has the following interface (key and data are strings):
d[key] = data # store data at key (may override data at
# existing key)
data = d[key] # retrieve data at key (raise KeyError if no
# such key)
del d[key] # delete data stored at key (raises KeyError
# if no such key)
flag = key in d # true if the key exists
list = d.keys() # return a list of all existing keys (slow!)
Future versions may change the order in which implementations are
tested for existence, add interfaces to other dbm-like
implementations.
The open function has an optional second argument. This can be 'r',
for read-only access, 'w', for read-write access of an existing
database, 'c' for read-write access to a new or existing database, and
'n' for read-write access to a new database. The default is 'r'.
Note: 'r' and 'w' fail if the database doesn't exist; 'c' creates it
only if it doesn't exist; and 'n' always creates a new database.
"""
class error(Exception):
pass
_names = ['dbhash', 'gdbm', 'dbm', 'dumbdbm']
_errors = [error]
_defaultmod = None
for _name in _names:
try:
_mod = __import__(_name)
except ImportError:
continue
if not _defaultmod:
_defaultmod = _mod
_errors.append(_mod.error)
if not _defaultmod:
raise ImportError, "no dbm clone found; tried %s" % _names
error = tuple(_errors)
def open(file, flag = 'r', mode = 0666):
# guess the type of an existing database
from whichdb import whichdb
result=whichdb(file)
if result is None:
# db doesn't exist
if 'c' in flag or 'n' in flag:
# file doesn't exist and the new
# flag was used so use default type
mod = _defaultmod
else:
raise error, "need 'c' or 'n' flag to open new db"
elif result == "":
# db type cannot be determined
raise error, "db type could not be determined"
else:
mod = __import__(result)
return mod.open(file, flag, mode)
| {
"pile_set_name": "Github"
} |
/*
* Copyright 2017-2020 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.lettuce.core.cluster.api.sync;
/**
* Synchronous executed commands on a node selection for HyperLogLog (PF* commands).
*
* @param <K> Key type.
* @param <V> Value type.
* @author Mark Paluch
* @since 3.0
* @generated by io.lettuce.apigenerator.CreateSyncNodeSelectionClusterApi
*/
public interface NodeSelectionHLLCommands<K, V> {
/**
* Adds the specified elements to the specified HyperLogLog.
*
* @param key the key
* @param values the values
*
* @return Long integer-reply specifically:
*
* 1 if at least 1 HyperLogLog internal register was altered. 0 otherwise.
*/
Executions<Long> pfadd(K key, V... values);
/**
* Merge N different HyperLogLogs into a single one.
*
* @param destkey the destination key
* @param sourcekeys the source key
*
* @return String simple-string-reply The command just returns {@code OK}.
*/
Executions<String> pfmerge(K destkey, K... sourcekeys);
/**
* Return the approximated cardinality of the set(s) observed by the HyperLogLog at key(s).
*
* @param keys the keys
*
* @return Long integer-reply specifically:
*
* The approximated number of unique elements observed via {@code PFADD}.
*/
Executions<Long> pfcount(K... keys);
}
| {
"pile_set_name": "Github"
} |
/*
* Driver for the Conexant CX25821 PCIe bridge
*
* Copyright (C) 2009 Conexant Systems Inc.
* Authors <[email protected]>, <[email protected]>
* Based on Steven Toth <[email protected]> cx23885 driver
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include "cx25821-video.h"
static void buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb)
{
struct cx25821_buffer *buf =
container_of(vb, struct cx25821_buffer, vb);
struct cx25821_buffer *prev;
struct cx25821_fh *fh = vq->priv_data;
struct cx25821_dev *dev = fh->dev;
struct cx25821_dmaqueue *q = &dev->vidq[SRAM_CH05];
/* add jump to stopper */
buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP | RISC_IRQ1 | RISC_CNT_INC);
buf->risc.jmp[1] = cpu_to_le32(q->stopper.dma);
buf->risc.jmp[2] = cpu_to_le32(0); /* bits 63-32 */
dprintk(2, "jmp to stopper (0x%x)\n", buf->risc.jmp[1]);
if (!list_empty(&q->queued)) {
list_add_tail(&buf->vb.queue, &q->queued);
buf->vb.state = VIDEOBUF_QUEUED;
dprintk(2, "[%p/%d] buffer_queue - append to queued\n", buf,
buf->vb.i);
} else if (list_empty(&q->active)) {
list_add_tail(&buf->vb.queue, &q->active);
cx25821_start_video_dma(dev, q, buf,
&dev->sram_channels[SRAM_CH05]);
buf->vb.state = VIDEOBUF_ACTIVE;
buf->count = q->count++;
mod_timer(&q->timeout, jiffies + BUFFER_TIMEOUT);
dprintk(2,
"[%p/%d] buffer_queue - first active, buf cnt = %d, q->count = %d\n",
buf, buf->vb.i, buf->count, q->count);
} else {
prev =
list_entry(q->active.prev, struct cx25821_buffer, vb.queue);
if (prev->vb.width == buf->vb.width
&& prev->vb.height == buf->vb.height
&& prev->fmt == buf->fmt) {
list_add_tail(&buf->vb.queue, &q->active);
buf->vb.state = VIDEOBUF_ACTIVE;
buf->count = q->count++;
prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
/* 64 bit bits 63-32 */
prev->risc.jmp[2] = cpu_to_le32(0);
dprintk(2,
"[%p/%d] buffer_queue - append to active, buf->count=%d\n",
buf, buf->vb.i, buf->count);
} else {
list_add_tail(&buf->vb.queue, &q->queued);
buf->vb.state = VIDEOBUF_QUEUED;
dprintk(2, "[%p/%d] buffer_queue - first queued\n", buf,
buf->vb.i);
}
}
if (list_empty(&q->active)) {
dprintk(2, "active queue empty!\n");
}
}
static struct videobuf_queue_ops cx25821_video_qops = {
.buf_setup = buffer_setup,
.buf_prepare = buffer_prepare,
.buf_queue = buffer_queue,
.buf_release = buffer_release,
};
static int video_open(struct file *file)
{
int minor = video_devdata(file)->minor;
struct cx25821_dev *h, *dev = NULL;
struct cx25821_fh *fh;
struct list_head *list;
enum v4l2_buf_type type = 0;
u32 pix_format;
lock_kernel();
list_for_each(list, &cx25821_devlist) {
h = list_entry(list, struct cx25821_dev, devlist);
if (h->video_dev[SRAM_CH05]
&& h->video_dev[SRAM_CH05]->minor == minor) {
dev = h;
type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
}
}
if (NULL == dev) {
unlock_kernel();
return -ENODEV;
}
printk("open minor=%d type=%s\n", minor, v4l2_type_names[type]);
/* allocate + initialize per filehandle data */
fh = kzalloc(sizeof(*fh), GFP_KERNEL);
if (NULL == fh) {
unlock_kernel();
return -ENOMEM;
}
file->private_data = fh;
fh->dev = dev;
fh->type = type;
fh->width = 720;
if (dev->tvnorm & V4L2_STD_PAL_BG || dev->tvnorm & V4L2_STD_PAL_DK)
fh->height = 576;
else
fh->height = 480;
dev->channel_opened = SRAM_CH05;
pix_format =
(dev->pixel_formats[dev->channel_opened] ==
PIXEL_FRMT_411) ? V4L2_PIX_FMT_Y41P : V4L2_PIX_FMT_YUYV;
fh->fmt = format_by_fourcc(pix_format);
v4l2_prio_open(&dev->prio, &fh->prio);
videobuf_queue_sg_init(&fh->vidq, &cx25821_video_qops,
&dev->pci->dev, &dev->slock,
V4L2_BUF_TYPE_VIDEO_CAPTURE,
V4L2_FIELD_INTERLACED,
sizeof(struct cx25821_buffer), fh);
dprintk(1, "post videobuf_queue_init()\n");
unlock_kernel();
return 0;
}
static ssize_t video_read(struct file *file, char __user * data, size_t count,
loff_t * ppos)
{
struct cx25821_fh *fh = file->private_data;
switch (fh->type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
if (res_locked(fh->dev, RESOURCE_VIDEO5))
return -EBUSY;
return videobuf_read_one(&fh->vidq, data, count, ppos,
file->f_flags & O_NONBLOCK);
default:
BUG();
return 0;
}
}
static unsigned int video_poll(struct file *file,
struct poll_table_struct *wait)
{
struct cx25821_fh *fh = file->private_data;
struct cx25821_buffer *buf;
if (res_check(fh, RESOURCE_VIDEO5)) {
/* streaming capture */
if (list_empty(&fh->vidq.stream))
return POLLERR;
buf = list_entry(fh->vidq.stream.next,
struct cx25821_buffer, vb.stream);
} else {
/* read() capture */
buf = (struct cx25821_buffer *)fh->vidq.read_buf;
if (NULL == buf)
return POLLERR;
}
poll_wait(file, &buf->vb.done, wait);
if (buf->vb.state == VIDEOBUF_DONE || buf->vb.state == VIDEOBUF_ERROR) {
if (buf->vb.state == VIDEOBUF_DONE) {
struct cx25821_dev *dev = fh->dev;
if (dev && dev->use_cif_resolution[SRAM_CH05]) {
u8 cam_id = *((char *)buf->vb.baddr + 3);
memcpy((char *)buf->vb.baddr,
(char *)buf->vb.baddr + (fh->width * 2),
(fh->width * 2));
*((char *)buf->vb.baddr + 3) = cam_id;
}
}
return POLLIN | POLLRDNORM;
}
return 0;
}
static int video_release(struct file *file)
{
struct cx25821_fh *fh = file->private_data;
struct cx25821_dev *dev = fh->dev;
//stop the risc engine and fifo
cx_write(channel5->dma_ctl, 0); /* FIFO and RISC disable */
/* stop video capture */
if (res_check(fh, RESOURCE_VIDEO5)) {
videobuf_queue_cancel(&fh->vidq);
res_free(dev, fh, RESOURCE_VIDEO5);
}
if (fh->vidq.read_buf) {
buffer_release(&fh->vidq, fh->vidq.read_buf);
kfree(fh->vidq.read_buf);
}
videobuf_mmap_free(&fh->vidq);
v4l2_prio_close(&dev->prio, &fh->prio);
file->private_data = NULL;
kfree(fh);
return 0;
}
static int vidioc_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
{
struct cx25821_fh *fh = priv;
struct cx25821_dev *dev = fh->dev;
if (unlikely(fh->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)) {
return -EINVAL;
}
if (unlikely(i != fh->type)) {
return -EINVAL;
}
if (unlikely(!res_get(dev, fh, get_resource(fh, RESOURCE_VIDEO5)))) {
return -EBUSY;
}
return videobuf_streamon(get_queue(fh));
}
static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
{
struct cx25821_fh *fh = priv;
struct cx25821_dev *dev = fh->dev;
int err, res;
if (fh->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
if (i != fh->type)
return -EINVAL;
res = get_resource(fh, RESOURCE_VIDEO5);
err = videobuf_streamoff(get_queue(fh));
if (err < 0)
return err;
res_free(dev, fh, res);
return 0;
}
static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
struct cx25821_fh *fh = priv;
struct cx25821_dev *dev = ((struct cx25821_fh *)priv)->dev;
int err;
int pix_format = 0;
if (fh) {
err = v4l2_prio_check(&dev->prio, &fh->prio);
if (0 != err)
return err;
}
dprintk(2, "%s()\n", __func__);
err = vidioc_try_fmt_vid_cap(file, priv, f);
if (0 != err)
return err;
fh->fmt = format_by_fourcc(f->fmt.pix.pixelformat);
fh->vidq.field = f->fmt.pix.field;
// check if width and height is valid based on set standard
if (is_valid_width(f->fmt.pix.width, dev->tvnorm)) {
fh->width = f->fmt.pix.width;
}
if (is_valid_height(f->fmt.pix.height, dev->tvnorm)) {
fh->height = f->fmt.pix.height;
}
if (f->fmt.pix.pixelformat == V4L2_PIX_FMT_Y41P)
pix_format = PIXEL_FRMT_411;
else if (f->fmt.pix.pixelformat == V4L2_PIX_FMT_YUYV)
pix_format = PIXEL_FRMT_422;
else
return -EINVAL;
cx25821_set_pixel_format(dev, SRAM_CH05, pix_format);
// check if cif resolution
if (fh->width == 320 || fh->width == 352) {
dev->use_cif_resolution[SRAM_CH05] = 1;
} else {
dev->use_cif_resolution[SRAM_CH05] = 0;
}
dev->cif_width[SRAM_CH05] = fh->width;
medusa_set_resolution(dev, fh->width, SRAM_CH05);
dprintk(2, "%s() width=%d height=%d field=%d\n", __func__, fh->width,
fh->height, fh->vidq.field);
cx25821_call_all(dev, video, s_fmt, f);
return 0;
}
static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p)
{
int ret_val = 0;
struct cx25821_fh *fh = priv;
struct cx25821_dev *dev = ((struct cx25821_fh *)priv)->dev;
ret_val = videobuf_dqbuf(get_queue(fh), p, file->f_flags & O_NONBLOCK);
p->sequence = dev->vidq[SRAM_CH05].count;
return ret_val;
}
static int vidioc_log_status(struct file *file, void *priv)
{
struct cx25821_dev *dev = ((struct cx25821_fh *)priv)->dev;
char name[32 + 2];
struct sram_channel *sram_ch = &dev->sram_channels[SRAM_CH05];
u32 tmp = 0;
snprintf(name, sizeof(name), "%s/2", dev->name);
printk(KERN_INFO "%s/2: ============ START LOG STATUS ============\n",
dev->name);
cx25821_call_all(dev, core, log_status);
tmp = cx_read(sram_ch->dma_ctl);
printk(KERN_INFO "Video input 5 is %s\n",
(tmp & 0x11) ? "streaming" : "stopped");
printk(KERN_INFO "%s/2: ============= END LOG STATUS =============\n",
dev->name);
return 0;
}
static int vidioc_s_ctrl(struct file *file, void *priv,
struct v4l2_control *ctl)
{
struct cx25821_fh *fh = priv;
struct cx25821_dev *dev = ((struct cx25821_fh *)priv)->dev;
int err;
if (fh) {
err = v4l2_prio_check(&dev->prio, &fh->prio);
if (0 != err)
return err;
}
return cx25821_set_control(dev, ctl, SRAM_CH05);
}
// exported stuff
static const struct v4l2_file_operations video_fops = {
.owner = THIS_MODULE,
.open = video_open,
.release = video_release,
.read = video_read,
.poll = video_poll,
.mmap = video_mmap,
.ioctl = video_ioctl2,
};
static const struct v4l2_ioctl_ops video_ioctl_ops = {
.vidioc_querycap = vidioc_querycap,
.vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
.vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
.vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
.vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
.vidioc_reqbufs = vidioc_reqbufs,
.vidioc_querybuf = vidioc_querybuf,
.vidioc_qbuf = vidioc_qbuf,
.vidioc_dqbuf = vidioc_dqbuf,
#ifdef TUNER_FLAG
.vidioc_s_std = vidioc_s_std,
.vidioc_querystd = vidioc_querystd,
#endif
.vidioc_cropcap = vidioc_cropcap,
.vidioc_s_crop = vidioc_s_crop,
.vidioc_g_crop = vidioc_g_crop,
.vidioc_enum_input = vidioc_enum_input,
.vidioc_g_input = vidioc_g_input,
.vidioc_s_input = vidioc_s_input,
.vidioc_g_ctrl = vidioc_g_ctrl,
.vidioc_s_ctrl = vidioc_s_ctrl,
.vidioc_queryctrl = vidioc_queryctrl,
.vidioc_streamon = vidioc_streamon,
.vidioc_streamoff = vidioc_streamoff,
.vidioc_log_status = vidioc_log_status,
.vidioc_g_priority = vidioc_g_priority,
.vidioc_s_priority = vidioc_s_priority,
#ifdef CONFIG_VIDEO_V4L1_COMPAT
.vidiocgmbuf = vidiocgmbuf,
#endif
#ifdef TUNER_FLAG
.vidioc_g_tuner = vidioc_g_tuner,
.vidioc_s_tuner = vidioc_s_tuner,
.vidioc_g_frequency = vidioc_g_frequency,
.vidioc_s_frequency = vidioc_s_frequency,
#endif
#ifdef CONFIG_VIDEO_ADV_DEBUG
.vidioc_g_register = vidioc_g_register,
.vidioc_s_register = vidioc_s_register,
#endif
};
struct video_device cx25821_video_template5 = {
.name = "cx25821-video",
.fops = &video_fops,
.minor = -1,
.ioctl_ops = &video_ioctl_ops,
.tvnorms = CX25821_NORMS,
.current_norm = V4L2_STD_NTSC_M,
};
| {
"pile_set_name": "Github"
} |
# Go support for Protocol Buffers - Google's data interchange format
#
# Copyright 2010 The Go Authors. All rights reserved.
# https://github.com/golang/protobuf
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
install:
go install
test: install generate-test-pbs
go test
generate-test-pbs:
make install
make -C testdata
protoc --go_out=Mtestdata/test.proto=github.com/golang/protobuf/proto/testdata,Mgoogle/protobuf/any.proto=github.com/golang/protobuf/ptypes/any:. proto3_proto/proto3.proto
make
| {
"pile_set_name": "Github"
} |
#!/usr/local/bin/ruby -w
$TESTING = true
require 'minitest/autorun'
require 'sexp_processor'
require 'stringio'
require 'pp'
class SexpTestCase < MiniTest::Unit::TestCase
# KEY for regex tests
# :a == no change
# :b == will change (but sometimes ONLY once)
# :c == change to
include SexpMatchSpecials
def util_equals(x, y)
result = x == y
refute_nil result, "#{x.inspect} does not === #{y.inspect}"
end
def util_equals3(x, y)
result = x === y
refute_nil result, "#{x.inspect} does not === #{y.inspect}"
end
def setup
@any = ANY()
end
def test_stupid
# shuts up test/unit
end
end
class TestSexp < SexpTestCase # ZenTest FULL
class SexpFor
def method
1
end
end
def util_pretty_print(expect, input)
io = StringIO.new
PP.pp(input, io)
io.rewind
assert_equal(expect, io.read.chomp)
end
def setup
super
@sexp_class = Object.const_get(self.class.name[4..-1])
@processor = SexpProcessor.new
@sexp = @sexp_class.new(1, 2, 3)
@basic_sexp = s(:lasgn, :var, s(:lit, 42))
@re = s(:lit, 42)
@bad1 = s(:lit, 24)
@bad1 = s(:blah, 42)
end
def test_class_from_array
# raise NotImplementedError, 'Need to write test_class_from_array'
end
def test_class_index
# raise NotImplementedError, 'Need to write test_class_index'
end
def test_array_type_eh
assert_equal false, @sexp.array_type?
@sexp.unshift :array
assert_equal true, @sexp.array_type?
end
def test_each_of_type
# TODO: huh... this tests fails if top level sexp :b is removed
@sexp = s(:b, s(:a, s(:b, s(:a), :a, s(:b, :a), s(:b, s(:a)))))
count = 0
@sexp.each_of_type(:a) do |exp|
count += 1
end
assert_equal(3, count, "must find 3 a's in #{@sexp.inspect}")
end
def test_equals2_array
# can't use assert_equals because it uses array as receiver
refute_equal(@sexp, [1, 2, 3],
"Sexp must not be equal to equivalent array")
# both directions just in case
# HACK - this seems to be a bug in ruby as far as I'm concerned
# assert_not_equal([1, 2, 3], @sexp,
# "Sexp must not be equal to equivalent array")
end
def test_equals2_not_body
sexp2 = s(1, 2, 5)
refute_equal(@sexp, sexp2)
end
def test_equals2_sexp
sexp2 = s(1, 2, 3)
unless @sexp.class == Sexp then
refute_equal(@sexp, sexp2)
end
end
def test_equals3_any
util_equals3 @any, s()
util_equals3 @any, s(:a)
util_equals3 @any, s(:a, :b, s(:c))
end
def test_equals3_full_match
util_equals3 s(), s() # 0
util_equals3 s(:blah), s(:blah) # 1
util_equals3 s(:a, :b), s(:a, :b) # 2
util_equals3 @basic_sexp, @basic_sexp.dup # deeper structure
end
def test_equals3_mismatch
assert_nil s() === s(:a)
assert_nil s(:a) === s()
assert_nil s(:blah1) === s(:blah2)
assert_nil s(:a) === s(:a, :b)
assert_nil s(:a, :b) === s(:a)
assert_nil s(:a1, :b) === s(:a2, :b)
assert_nil s(:a, :b1) === s(:a, :b2)
assert_nil @basic_sexp === @basic_sexp.dup.push(42)
assert_nil @basic_sexp.dup.push(42) === @basic_sexp
end
def test_equals3_subset_match
util_equals3 s(:a), s(s(:a), s(:b)) # left
util_equals3 s(:a), s(:blah, s(:a ), s(:b)) # mid 1
util_equals3 s(:a, 1), s(:blah, s(:a, 1), s(:b)) # mid 2
util_equals3 @basic_sexp, s(:blah, @basic_sexp.dup, s(:b)) # mid deeper
util_equals3 @basic_sexp, s(@basic_sexp.dup, s(:a), s(:b)) # left deeper
util_equals3 s(:a), s(:blah, s(:blah, s(:a))) # left deeper
end
# def test_equalstilde_any
# result = @basic_sexp =~ s(:lit, ANY())
# p result
# assert result
# end
def test_equalstilde_fancy
assert_nil s(:b) =~ s(:a, s(:b), :c)
refute_nil s(:a, s(:b), :c) =~ s(:b)
end
def test_equalstilde_plain
result = @basic_sexp =~ @re
assert result
end
def test_find_and_replace_all
@sexp = s(:a, s(:b, s(:a), s(:b), s(:b, s(:a))))
expected = s(:a, s(:a, s(:a), s(:a), s(:a, s(:a))))
@sexp.find_and_replace_all(:b, :a)
assert_equal(expected, @sexp)
end
def test_gsub
assert_equal s(:c), s().gsub(s(), s(:c))
assert_equal s(:c), s(:b).gsub(s(:b), s(:c))
assert_equal s(:a), s(:a).gsub(s(:b), s(:c))
assert_equal s(:a, s(:c)), s(:a, s(:b)).gsub(s(:b), s(:c))
assert_equal(s(:a, s(:c), s(:c)),
s(:a, s(:b), s(:b)).gsub(s(:b), s(:c)))
assert_equal(s(:a, s(:c), s(:a, s(:c))),
s(:a, s(:b), s(:a, s(:b))).gsub(s(:b), s(:c)))
end
def test_inspect
k = @sexp_class
n = k.name[0].chr.downcase
assert_equal("#{n}()",
k.new().inspect)
assert_equal("#{n}(:a)",
k.new(:a).inspect)
assert_equal("#{n}(:a, :b)",
k.new(:a, :b).inspect)
assert_equal("#{n}(:a, #{n}(:b))",
k.new(:a, k.new(:b)).inspect)
end
def test_mass
assert_equal 1, s(:a).mass
assert_equal 3, s(:a, s(:b), s(:c)).mass
s = s(:iter,
s(:call, nil, :a, s(:arglist, s(:lit, 1))),
s(:lasgn, :c),
s(:call, nil, :d, s(:arglist)))
assert_equal 7, s.mass
end
def test_method_missing
assert_nil @sexp.not_there
assert_equal s(:lit, 42), @basic_sexp.lit
end
def test_method_missing_ambigious
assert_raises NoMethodError do
pirate = s(:says, s(:arrr!), s(:arrr!), s(:arrr!))
pirate.arrr!
end
end
def test_method_missing_deep
sexp = s(:blah, s(:a, s(:b, s(:c, :yay!))))
assert_equal(s(:c, :yay!), sexp.a.b.c)
end
def test_method_missing_delete
sexp = s(:blah, s(:a, s(:b, s(:c, :yay!))))
assert_equal(s(:c, :yay!), sexp.a.b.c(true))
assert_equal(s(:blah, s(:a, s(:b))), sexp)
end
def test_pretty_print
util_pretty_print("s()",
s())
util_pretty_print("s(:a)",
s(:a))
util_pretty_print("s(:a, :b)",
s(:a, :b))
util_pretty_print("s(:a, s(:b))",
s(:a, s(:b)))
end
def test_sexp_body
assert_equal [2, 3], @sexp.sexp_body
end
def test_shift
assert_equal(1, @sexp.shift)
assert_equal(2, @sexp.shift)
assert_equal(3, @sexp.shift)
assert_raises(RuntimeError) do
@sexp.shift
end
end
def test_structure
@sexp = s(:a, 1, 2, s(:b, 3, 4), 5, 6)
backup = @sexp.deep_clone
expected = s(:a, s(:b))
assert_equal(expected, @sexp.structure)
assert_equal(backup, @sexp)
end
def test_sub
assert_equal s(:c), s().sub(s(), s(:c))
assert_equal s(:c), s(:b).sub(s(:b), s(:c))
assert_equal s(:a), s(:a).sub(s(:b), s(:c))
assert_equal s(:a, s(:c)), s(:a, s(:c)).sub(s(:b), s(:c))
assert_equal s(:a, s(:c), s(:b)), s(:a, s(:b), s(:b)).sub(s(:b), s(:c))
assert_equal(s(:a, s(:c), s(:a)),
s(:a, s(:b), s(:a)).sub(s(:b), s(:c)))
assert_equal(s(:a, s(:c), s(:a, s(:a))),
s(:a, s(:b), s(:a, s(:a))).sub(s(:b), s(:c)))
assert_equal(s(:a, s(:a), s(:a, s(:c), s(:b))),
s(:a, s(:a), s(:a, s(:b), s(:b))).sub(s(:b), s(:c)))
assert_equal(s(:a, s(:c, s(:b))),
s(:a, s(:b)).sub(s(:b), s(:c, s(:b))))
end
def test_to_a
assert_equal([1, 2, 3], @sexp.to_a)
end
def test_to_s
test_inspect
end
end
class TestSexpAny < SexpTestCase
def setup
super
end
def test_equals
util_equals @any, s()
util_equals @any, s(:a)
util_equals @any, s(:a, :b, s(:c))
end
def test_equals3
util_equals3 @any, s()
util_equals3 @any, s(:a)
util_equals3 @any, s(:a, :b, s(:c))
end
end
| {
"pile_set_name": "Github"
} |
jvm.Region = function(config){
var bbox,
text,
offsets,
labelDx,
labelDy;
this.config = config;
this.map = this.config.map;
this.shape = config.canvas.addPath({
d: config.path,
'data-code': config.code
}, config.style, config.canvas.rootElement);
this.shape.addClass('jvectormap-region jvectormap-element');
bbox = this.shape.getBBox();
text = this.getLabelText(config.code);
if (this.config.label && text) {
offsets = this.getLabelOffsets(config.code);
this.labelX = bbox.x + bbox.width / 2 + offsets[0];
this.labelY = bbox.y + bbox.height / 2 + offsets[1];
this.label = config.canvas.addText({
text: text,
'text-anchor': 'middle',
'alignment-baseline': 'central',
x: this.labelX,
y: this.labelY,
'data-code': config.code
}, config.labelStyle, config.labelsGroup);
this.label.addClass('jvectormap-region jvectormap-element');
}
};
jvm.inherits(jvm.Region, jvm.MapObject);
jvm.Region.prototype.updateLabelPosition = function(){
if (this.label) {
this.label.set({
x: this.labelX * this.map.scale + this.map.transX * this.map.scale,
y: this.labelY * this.map.scale + this.map.transY * this.map.scale
});
}
}; | {
"pile_set_name": "Github"
} |
#include <SmingCore.h>
// If you want, you can define WiFi settings globally in Eclipse Environment Variables
#ifndef WIFI_SSID
#define WIFI_SSID "PleaseEnterSSID" // Put you SSID and Password here
#define WIFI_PWD "PleaseEnterPass"
#endif
FtpServer ftp;
void gotIP(IpAddress ip, IpAddress netmask, IpAddress gateway)
{
Serial.print("IP: ");
Serial.println(ip);
// Start FTP server
ftp.listen(21);
ftp.addUser("me", "123"); // FTP account
// You can also use special FTP comand: "fsformat" for clearing file system (for example from TotalCMD)
}
// Will be called when WiFi station timeout was reached
void connectFail(const String& ssid, MacAddress bssid, WifiDisconnectReason reason)
{
Serial.println("I'm NOT CONNECTED. Need help!!! :(");
// .. some you code for configuration ..
}
void init()
{
spiffs_mount(); // Mount file system, in order to work with files
Serial.begin(SERIAL_BAUD_RATE); // 115200 by default
Serial.systemDebugOutput(true); // Enable debug output to serial
fileSetContent("example.txt", "hello world!");
fileSetContent("data.bin", "\1\2\3\4\5");
WifiStation.enable(true);
WifiStation.config(WIFI_SSID, WIFI_PWD);
WifiAccessPoint.enable(false);
// Run our method when station was connected to AP (or not connected)
WifiEvents.onStationDisconnect(connectFail);
WifiEvents.onStationGotIP(gotIP);
}
| {
"pile_set_name": "Github"
} |
---
name: Viet-Tam Luu
link: https://plus.google.com/+VietTamLuu
organization: Google
occupation_title: Software Engineer
github: viettaml
---
| {
"pile_set_name": "Github"
} |
var convert = require('./convert'),
func = convert('invokeArgsMap', require('../invokeMap'));
func.placeholder = require('./placeholder');
module.exports = func;
| {
"pile_set_name": "Github"
} |
package main
import (
"fmt"
"github.com/blang/semver"
"github.com/rhysd/dotfiles/src"
"github.com/rhysd/go-github-selfupdate/selfupdate"
"gopkg.in/alecthomas/kingpin.v2"
"os"
)
var (
cli = kingpin.New("dotfiles", "A dotfiles symlinks manager")
clone = cli.Command("clone", "Clone remote repository")
cloneRepo = clone.Arg("repository", "Repository. Format: 'user', 'user/repo-name', '[email protected]:repo.git, 'https://somewhere.com/repo.git'").Required().String()
clonePath = clone.Arg("path", "Path where repository cloned").String()
cloneHTTPS = clone.Flag("https", "Use https:// instead of git@ protocol for `git clone`.").Short('h').Bool()
link = cli.Command("link", "Put symlinks to setup your configurations")
linkDryRun = link.Flag("dry", "Show what happens only").Bool()
linkRepo = link.Arg("repo", "Path to your dotfiles repository. If omitted, $DOTFILES_REPO_PATH is searched and fallback into the current directory.").String()
linkSpecified = link.Arg("files", "Files to link. If you specify no file, all will be linked.").Strings()
// TODO link_no_default = link.Flag("no-default", "Link files specified by mappings.json and mappings_*.json")
list = cli.Command("list", "Show a list of symbolic link put by this command")
listRepo = list.Arg("repo", "Path to your dotfiles repository. If omitted, $DOTFILES_REPO_PATH is searched and fallback into the current directory.").String()
clean = cli.Command("clean", "Remove all symbolic links put by this command")
cleanRepo = clean.Arg("repo", "Path to your dotfiles repository. If omitted, $DOTFILES_REPO_PATH is searched and fallback into the current directory.").String()
update = cli.Command("update", "Update your dotfiles repository")
updateRepo = update.Arg("repo", "Path to your dotfiles repository. If omitted, $DOTFILES_REPO_PATH is searched and fallback into the current directory.").String()
version = cli.Command("version", "Show version")
updateSelf = cli.Command("selfupdate", "Show version")
)
func unimplemented(cmd string) {
fmt.Fprintf(os.Stderr, "Command '%s' is not implemented yet!\n", cmd)
}
func exit(err error) {
if err != nil {
fmt.Fprintf(os.Stderr, "Error: %s\n", err.Error())
// Note: Exit code is detemined with looking http://tldp.org/LDP/abs/html/exitcodes.html
os.Exit(113)
} else {
os.Exit(0)
}
}
func selfUpdate() {
v := semver.MustParse(dotfiles.Version())
latest, err := selfupdate.UpdateSelf(v, "rhysd/dotfiles")
if err != nil {
exit(err)
}
if v.Equals(latest.Version) {
fmt.Println("Current version", v, "is the latest")
} else {
fmt.Println("Successfully updated to version", v)
fmt.Println("Release Note:\n", latest.ReleaseNotes)
}
}
func main() {
switch kingpin.MustParse(cli.Parse(os.Args[1:])) {
case clone.FullCommand():
exit(dotfiles.Clone(*cloneRepo, *clonePath, *cloneHTTPS))
case link.FullCommand():
exit(dotfiles.Link(*linkRepo, *linkSpecified, *linkDryRun))
case list.FullCommand():
exit(dotfiles.List(*listRepo))
case clean.FullCommand():
exit(dotfiles.Clean(*cleanRepo))
case update.FullCommand():
exit(dotfiles.Update(*updateRepo))
case version.FullCommand():
fmt.Println(dotfiles.Version())
case updateSelf.FullCommand():
selfUpdate()
default:
panic("Internal error: Unreachable! Please report this to https://github.com/rhysd/dotfiles/issues")
}
}
| {
"pile_set_name": "Github"
} |
/* apps/ec.c */
/*
* Written by Nils Larsch for the OpenSSL project.
*/
/* ====================================================================
* Copyright (c) 1998-2005 The OpenSSL Project. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* 3. All advertising materials mentioning features or use of this
* software must display the following acknowledgment:
* "This product includes software developed by the OpenSSL Project
* for use in the OpenSSL Toolkit. (http://www.openssl.org/)"
*
* 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
* endorse or promote products derived from this software without
* prior written permission. For written permission, please contact
* [email protected].
*
* 5. Products derived from this software may not be called "OpenSSL"
* nor may "OpenSSL" appear in their names without prior written
* permission of the OpenSSL Project.
*
* 6. Redistributions of any form whatsoever must retain the following
* acknowledgment:
* "This product includes software developed by the OpenSSL Project
* for use in the OpenSSL Toolkit (http://www.openssl.org/)"
*
* THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
* EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR
* ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
* OF THE POSSIBILITY OF SUCH DAMAGE.
* ====================================================================
*
* This product includes cryptographic software written by Eric Young
* ([email protected]). This product includes software written by Tim
* Hudson ([email protected]).
*
*/
#include <openssl/opensslconf.h>
#ifndef OPENSSL_NO_EC
# include <stdio.h>
# include <stdlib.h>
# include <string.h>
# include "apps.h"
# include <openssl/bio.h>
# include <openssl/err.h>
# include <openssl/evp.h>
# include <openssl/pem.h>
# undef PROG
# define PROG ec_main
/*-
* -inform arg - input format - default PEM (one of DER, NET or PEM)
* -outform arg - output format - default PEM
* -in arg - input file - default stdin
* -out arg - output file - default stdout
* -des - encrypt output if PEM format with DES in cbc mode
* -text - print a text version
* -param_out - print the elliptic curve parameters
* -conv_form arg - specifies the point encoding form
* -param_enc arg - specifies the parameter encoding
*/
int MAIN(int, char **);
int MAIN(int argc, char **argv)
{
int ret = 1;
EC_KEY *eckey = NULL;
const EC_GROUP *group;
int i, badops = 0;
const EVP_CIPHER *enc = NULL;
BIO *in = NULL, *out = NULL;
int informat, outformat, text = 0, noout = 0;
int pubin = 0, pubout = 0, param_out = 0;
char *infile, *outfile, *prog, *engine;
ENGINE *e = NULL;
char *passargin = NULL, *passargout = NULL;
char *passin = NULL, *passout = NULL;
point_conversion_form_t form = POINT_CONVERSION_UNCOMPRESSED;
int new_form = 0;
int asn1_flag = OPENSSL_EC_NAMED_CURVE;
int new_asn1_flag = 0;
apps_startup();
if (bio_err == NULL)
if ((bio_err = BIO_new(BIO_s_file())) != NULL)
BIO_set_fp(bio_err, stderr, BIO_NOCLOSE | BIO_FP_TEXT);
if (!load_config(bio_err, NULL))
goto end;
engine = NULL;
infile = NULL;
outfile = NULL;
informat = FORMAT_PEM;
outformat = FORMAT_PEM;
prog = argv[0];
argc--;
argv++;
while (argc >= 1) {
if (strcmp(*argv, "-inform") == 0) {
if (--argc < 1)
goto bad;
informat = str2fmt(*(++argv));
} else if (strcmp(*argv, "-outform") == 0) {
if (--argc < 1)
goto bad;
outformat = str2fmt(*(++argv));
} else if (strcmp(*argv, "-in") == 0) {
if (--argc < 1)
goto bad;
infile = *(++argv);
} else if (strcmp(*argv, "-out") == 0) {
if (--argc < 1)
goto bad;
outfile = *(++argv);
} else if (strcmp(*argv, "-passin") == 0) {
if (--argc < 1)
goto bad;
passargin = *(++argv);
} else if (strcmp(*argv, "-passout") == 0) {
if (--argc < 1)
goto bad;
passargout = *(++argv);
} else if (strcmp(*argv, "-engine") == 0) {
if (--argc < 1)
goto bad;
engine = *(++argv);
} else if (strcmp(*argv, "-noout") == 0)
noout = 1;
else if (strcmp(*argv, "-text") == 0)
text = 1;
else if (strcmp(*argv, "-conv_form") == 0) {
if (--argc < 1)
goto bad;
++argv;
new_form = 1;
if (strcmp(*argv, "compressed") == 0)
form = POINT_CONVERSION_COMPRESSED;
else if (strcmp(*argv, "uncompressed") == 0)
form = POINT_CONVERSION_UNCOMPRESSED;
else if (strcmp(*argv, "hybrid") == 0)
form = POINT_CONVERSION_HYBRID;
else
goto bad;
} else if (strcmp(*argv, "-param_enc") == 0) {
if (--argc < 1)
goto bad;
++argv;
new_asn1_flag = 1;
if (strcmp(*argv, "named_curve") == 0)
asn1_flag = OPENSSL_EC_NAMED_CURVE;
else if (strcmp(*argv, "explicit") == 0)
asn1_flag = 0;
else
goto bad;
} else if (strcmp(*argv, "-param_out") == 0)
param_out = 1;
else if (strcmp(*argv, "-pubin") == 0)
pubin = 1;
else if (strcmp(*argv, "-pubout") == 0)
pubout = 1;
else if ((enc = EVP_get_cipherbyname(&(argv[0][1]))) == NULL) {
BIO_printf(bio_err, "unknown option %s\n", *argv);
badops = 1;
break;
}
argc--;
argv++;
}
if (badops) {
bad:
BIO_printf(bio_err, "%s [options] <infile >outfile\n", prog);
BIO_printf(bio_err, "where options are\n");
BIO_printf(bio_err, " -inform arg input format - "
"DER or PEM\n");
BIO_printf(bio_err, " -outform arg output format - "
"DER or PEM\n");
BIO_printf(bio_err, " -in arg input file\n");
BIO_printf(bio_err, " -passin arg input file pass "
"phrase source\n");
BIO_printf(bio_err, " -out arg output file\n");
BIO_printf(bio_err, " -passout arg output file pass "
"phrase source\n");
BIO_printf(bio_err, " -engine e use engine e, "
"possibly a hardware device.\n");
BIO_printf(bio_err, " -des encrypt PEM output, "
"instead of 'des' every other \n"
" cipher "
"supported by OpenSSL can be used\n");
BIO_printf(bio_err, " -text print the key\n");
BIO_printf(bio_err, " -noout don't print key out\n");
BIO_printf(bio_err, " -param_out print the elliptic "
"curve parameters\n");
BIO_printf(bio_err, " -conv_form arg specifies the "
"point conversion form \n");
BIO_printf(bio_err, " possible values:"
" compressed\n");
BIO_printf(bio_err, " "
" uncompressed (default)\n");
BIO_printf(bio_err, " " " hybrid\n");
BIO_printf(bio_err, " -param_enc arg specifies the way"
" the ec parameters are encoded\n");
BIO_printf(bio_err, " in the asn1 der " "encoding\n");
BIO_printf(bio_err, " possible values:"
" named_curve (default)\n");
BIO_printf(bio_err, " "
"explicit\n");
goto end;
}
ERR_load_crypto_strings();
e = setup_engine(bio_err, engine, 0);
if (!app_passwd(bio_err, passargin, passargout, &passin, &passout)) {
BIO_printf(bio_err, "Error getting passwords\n");
goto end;
}
in = BIO_new(BIO_s_file());
out = BIO_new(BIO_s_file());
if ((in == NULL) || (out == NULL)) {
ERR_print_errors(bio_err);
goto end;
}
if (infile == NULL)
BIO_set_fp(in, stdin, BIO_NOCLOSE);
else {
if (BIO_read_filename(in, infile) <= 0) {
perror(infile);
goto end;
}
}
BIO_printf(bio_err, "read EC key\n");
if (informat == FORMAT_ASN1) {
if (pubin)
eckey = d2i_EC_PUBKEY_bio(in, NULL);
else
eckey = d2i_ECPrivateKey_bio(in, NULL);
} else if (informat == FORMAT_PEM) {
if (pubin)
eckey = PEM_read_bio_EC_PUBKEY(in, NULL, NULL, NULL);
else
eckey = PEM_read_bio_ECPrivateKey(in, NULL, NULL, passin);
} else {
BIO_printf(bio_err, "bad input format specified for key\n");
goto end;
}
if (eckey == NULL) {
BIO_printf(bio_err, "unable to load Key\n");
ERR_print_errors(bio_err);
goto end;
}
if (outfile == NULL) {
BIO_set_fp(out, stdout, BIO_NOCLOSE);
# ifdef OPENSSL_SYS_VMS
{
BIO *tmpbio = BIO_new(BIO_f_linebuffer());
out = BIO_push(tmpbio, out);
}
# endif
} else {
if (BIO_write_filename(out, outfile) <= 0) {
perror(outfile);
goto end;
}
}
group = EC_KEY_get0_group(eckey);
if (new_form)
EC_KEY_set_conv_form(eckey, form);
if (new_asn1_flag)
EC_KEY_set_asn1_flag(eckey, asn1_flag);
if (text)
if (!EC_KEY_print(out, eckey, 0)) {
perror(outfile);
ERR_print_errors(bio_err);
goto end;
}
if (noout) {
ret = 0;
goto end;
}
BIO_printf(bio_err, "writing EC key\n");
if (outformat == FORMAT_ASN1) {
if (param_out)
i = i2d_ECPKParameters_bio(out, group);
else if (pubin || pubout)
i = i2d_EC_PUBKEY_bio(out, eckey);
else
i = i2d_ECPrivateKey_bio(out, eckey);
} else if (outformat == FORMAT_PEM) {
if (param_out)
i = PEM_write_bio_ECPKParameters(out, group);
else if (pubin || pubout)
i = PEM_write_bio_EC_PUBKEY(out, eckey);
else
i = PEM_write_bio_ECPrivateKey(out, eckey, enc,
NULL, 0, NULL, passout);
} else {
BIO_printf(bio_err, "bad output format specified for " "outfile\n");
goto end;
}
if (!i) {
BIO_printf(bio_err, "unable to write private key\n");
ERR_print_errors(bio_err);
} else
ret = 0;
end:
if (in)
BIO_free(in);
if (out)
BIO_free_all(out);
if (eckey)
EC_KEY_free(eckey);
release_engine(e);
if (passin)
OPENSSL_free(passin);
if (passout)
OPENSSL_free(passout);
apps_shutdown();
OPENSSL_EXIT(ret);
}
#else /* !OPENSSL_NO_EC */
# if PEDANTIC
static void *dummy = &dummy;
# endif
#endif
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="utf-8"?>
<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemDefinitionGroup>
<ClCompile>
<AdditionalIncludeDirectories>$(ForeignDir)\cml-1_0_2;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
</ClCompile>
</ItemDefinitionGroup>
<ItemGroup />
</Project> | {
"pile_set_name": "Github"
} |
/*
* Q40 I/O port IDE Driver
*
* (c) Richard Zidlicky
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive for
* more details.
*
*
*/
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/blkdev.h>
#include <linux/ide.h>
#include <linux/module.h>
#include <asm/ide.h>
/*
* Bases of the IDE interfaces
*/
#define Q40IDE_NUM_HWIFS 2
#define PCIDE_BASE1 0x1f0
#define PCIDE_BASE2 0x170
#define PCIDE_BASE3 0x1e8
#define PCIDE_BASE4 0x168
#define PCIDE_BASE5 0x1e0
#define PCIDE_BASE6 0x160
static const unsigned long pcide_bases[Q40IDE_NUM_HWIFS] = {
PCIDE_BASE1, PCIDE_BASE2, /* PCIDE_BASE3, PCIDE_BASE4 , PCIDE_BASE5,
PCIDE_BASE6 */
};
static int q40ide_default_irq(unsigned long base)
{
switch (base) {
case 0x1f0: return 14;
case 0x170: return 15;
case 0x1e8: return 11;
default:
return 0;
}
}
/*
* Addresses are pretranslated for Q40 ISA access.
*/
static void q40_ide_setup_ports(struct ide_hw *hw, unsigned long base, int irq)
{
memset(hw, 0, sizeof(*hw));
/* BIG FAT WARNING:
assumption: only DATA port is ever used in 16 bit mode */
hw->io_ports.data_addr = Q40_ISA_IO_W(base);
hw->io_ports.error_addr = Q40_ISA_IO_B(base + 1);
hw->io_ports.nsect_addr = Q40_ISA_IO_B(base + 2);
hw->io_ports.lbal_addr = Q40_ISA_IO_B(base + 3);
hw->io_ports.lbam_addr = Q40_ISA_IO_B(base + 4);
hw->io_ports.lbah_addr = Q40_ISA_IO_B(base + 5);
hw->io_ports.device_addr = Q40_ISA_IO_B(base + 6);
hw->io_ports.status_addr = Q40_ISA_IO_B(base + 7);
hw->io_ports.ctl_addr = Q40_ISA_IO_B(base + 0x206);
hw->irq = irq;
}
static void q40ide_input_data(ide_drive_t *drive, struct ide_cmd *cmd,
void *buf, unsigned int len)
{
unsigned long data_addr = drive->hwif->io_ports.data_addr;
if (drive->media == ide_disk && cmd && (cmd->tf_flags & IDE_TFLAG_FS)) {
__ide_mm_insw(data_addr, buf, (len + 1) / 2);
return;
}
raw_insw_swapw((u16 *)data_addr, buf, (len + 1) / 2);
}
static void q40ide_output_data(ide_drive_t *drive, struct ide_cmd *cmd,
void *buf, unsigned int len)
{
unsigned long data_addr = drive->hwif->io_ports.data_addr;
if (drive->media == ide_disk && cmd && (cmd->tf_flags & IDE_TFLAG_FS)) {
__ide_mm_outsw(data_addr, buf, (len + 1) / 2);
return;
}
raw_outsw_swapw((u16 *)data_addr, buf, (len + 1) / 2);
}
/* Q40 has a byte-swapped IDE interface */
static const struct ide_tp_ops q40ide_tp_ops = {
.exec_command = ide_exec_command,
.read_status = ide_read_status,
.read_altstatus = ide_read_altstatus,
.write_devctl = ide_write_devctl,
.dev_select = ide_dev_select,
.tf_load = ide_tf_load,
.tf_read = ide_tf_read,
.input_data = q40ide_input_data,
.output_data = q40ide_output_data,
};
static const struct ide_port_info q40ide_port_info = {
.tp_ops = &q40ide_tp_ops,
.host_flags = IDE_HFLAG_MMIO | IDE_HFLAG_NO_DMA,
.irq_flags = IRQF_SHARED,
.chipset = ide_generic,
};
/*
* the static array is needed to have the name reported in /proc/ioports,
* hwif->name unfortunately isn't available yet
*/
static const char *q40_ide_names[Q40IDE_NUM_HWIFS]={
"ide0", "ide1"
};
/*
* Probe for Q40 IDE interfaces
*/
static int __init q40ide_init(void)
{
int i;
struct ide_hw hw[Q40IDE_NUM_HWIFS], *hws[] = { NULL, NULL };
if (!MACH_IS_Q40)
return -ENODEV;
printk(KERN_INFO "ide: Q40 IDE controller\n");
for (i = 0; i < Q40IDE_NUM_HWIFS; i++) {
const char *name = q40_ide_names[i];
if (!request_region(pcide_bases[i], 8, name)) {
printk("could not reserve ports %lx-%lx for %s\n",
pcide_bases[i],pcide_bases[i]+8,name);
continue;
}
if (!request_region(pcide_bases[i]+0x206, 1, name)) {
printk("could not reserve port %lx for %s\n",
pcide_bases[i]+0x206,name);
release_region(pcide_bases[i], 8);
continue;
}
q40_ide_setup_ports(&hw[i], pcide_bases[i],
q40ide_default_irq(pcide_bases[i]));
hws[i] = &hw[i];
}
return ide_host_add(&q40ide_port_info, hws, Q40IDE_NUM_HWIFS, NULL);
}
module_init(q40ide_init);
MODULE_LICENSE("GPL");
| {
"pile_set_name": "Github"
} |
/*
* Copyright (C) 2015-2017 Netronome Systems, Inc.
*
* This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this
* source tree or the BSD 2-Clause License provided below. You have the
* option to license this software under the complete terms of either license.
*
* The BSD 2-Clause License:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* 1. Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* 2. Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/*
* nfp_xpb.h
* Author: Jason McMullan <[email protected]>
*/
#ifndef NFP6000_XPB_H
#define NFP6000_XPB_H
/* For use with NFP6000 Databook "XPB Addressing" section
*/
#define NFP_XPB_OVERLAY(island) (((island) & 0x3f) << 24)
#define NFP_XPB_ISLAND(island) (NFP_XPB_OVERLAY(island) + 0x60000)
#define NFP_XPB_ISLAND_of(offset) (((offset) >> 24) & 0x3F)
/* For use with NFP6000 Databook "XPB Island and Device IDs" chapter
*/
#define NFP_XPB_DEVICE(island, slave, device) \
(NFP_XPB_OVERLAY(island) | \
(((slave) & 3) << 22) | \
(((device) & 0x3f) << 16))
#endif /* NFP6000_XPB_H */
| {
"pile_set_name": "Github"
} |
package org.concordion.internal;
import org.concordion.api.Fixture;
public class SimpleEvaluator extends OgnlEvaluator {
public SimpleEvaluator(Fixture fixture) {
super(fixture);
}
@Override
public Object evaluate(String expression) {
validateEvaluationExpression(expression);
return super.evaluate(expression);
}
@Override
public void setVariable(String expression, Object value) {
validateSetVariableExpression(expression);
super.setVariable(expression, value);
}
public static void validateEvaluationExpression(String expression) {
if (!EVALUATION_PATTERNS.matches(expression)) {
throw new RuntimeException("Invalid expression [" + expression + "]");
}
}
public static void validateSetVariableExpression(String expression) {
if (!SET_VARIABLE_PATTERNS.matches(expression)) {
throw new RuntimeException("Invalid 'set' expression [" + expression + "]");
}
}
private static final String METHOD_NAME_PATTERN = "[a-z][a-zA-Z0-9_]*";
private static final String PROPERTY_NAME_PATTERN = "[a-z][a-zA-Z0-9_]*";
private static final String STRING_PATTERN = "'[^']+'";
private static final String LHS_VARIABLE_PATTERN = "#" + METHOD_NAME_PATTERN;
private static final String RHS_VARIABLE_PATTERN = "(" + LHS_VARIABLE_PATTERN + "|#TEXT|#HREF|#LEVEL)";
private static final String METHOD_CALL_PARAMS = METHOD_NAME_PATTERN + " *\\( *" + RHS_VARIABLE_PATTERN + "(, *" + RHS_VARIABLE_PATTERN + " *)*\\)";
private static final String METHOD_CALL_NO_PARAMS = METHOD_NAME_PATTERN + " *\\( *\\)";
private static final String TERNARY_STRING_RESULT = " \\? " + STRING_PATTERN + " : " + STRING_PATTERN;
private static final MultiPattern EVALUATION_PATTERNS = MultiPattern.fromRegularExpressions(
PROPERTY_NAME_PATTERN,
METHOD_CALL_NO_PARAMS,
METHOD_CALL_PARAMS,
RHS_VARIABLE_PATTERN,
LHS_VARIABLE_PATTERN + "(\\." + PROPERTY_NAME_PATTERN + ")+",
LHS_VARIABLE_PATTERN + " *= *" + PROPERTY_NAME_PATTERN,
LHS_VARIABLE_PATTERN + " *= *" + METHOD_CALL_NO_PARAMS,
LHS_VARIABLE_PATTERN + " *= *" + METHOD_CALL_PARAMS,
LHS_VARIABLE_PATTERN + TERNARY_STRING_RESULT,
PROPERTY_NAME_PATTERN + TERNARY_STRING_RESULT,
METHOD_CALL_NO_PARAMS + TERNARY_STRING_RESULT,
METHOD_CALL_PARAMS + TERNARY_STRING_RESULT,
LHS_VARIABLE_PATTERN + "\\." + METHOD_CALL_NO_PARAMS,
LHS_VARIABLE_PATTERN + "\\." + METHOD_CALL_PARAMS);
// #var VARIABLE
// #var = myProp VARIABLE = PROPERTY
// #var = myMethod() VARIABLE = METHOD
// #var = myMethod(var1) VARIABLE = METHOD_WITH_PARAM
// #var = myMethod(var1, var2) VARIABLE = METHOD_WITH_MULTIPLE_PARAMS
private static final MultiPattern SET_VARIABLE_PATTERNS = MultiPattern.fromRegularExpressions(
RHS_VARIABLE_PATTERN,
LHS_VARIABLE_PATTERN + "\\." + PROPERTY_NAME_PATTERN,
LHS_VARIABLE_PATTERN + " *= *" + PROPERTY_NAME_PATTERN,
LHS_VARIABLE_PATTERN + " *= *" + METHOD_NAME_PATTERN + " *\\( *\\)",
LHS_VARIABLE_PATTERN + " *= *" + METHOD_NAME_PATTERN + " *\\( *" + RHS_VARIABLE_PATTERN + "(, *" + RHS_VARIABLE_PATTERN + " *)*\\)");
}
| {
"pile_set_name": "Github"
} |
//========= Copyright Valve Corporation, All rights reserved. ============//
//
// Purpose:
//
//=============================================================================//
#ifndef BONE_MERGE_CACHE_H
#define BONE_MERGE_CACHE_H
#ifdef _WIN32
#pragma once
#endif
class C_BaseAnimating;
class CStudioHdr;
#include "mathlib/vector.h"
class CBoneMergeCache
{
public:
CBoneMergeCache();
void Init( C_BaseAnimating *pOwner );
// Updates the lookups that let it merge bones quickly.
void UpdateCache();
// This copies the transform from all bones in the followed entity that have
// names that match our bones.
void MergeMatchingBones( int boneMask );
// copy bones instead of matrices
void CopyParentToChild( const Vector parentPos[], const Quaternion parentQ[], Vector childPos[], Quaternion childQ[], int boneMask );
void CopyChildToParent( const Vector childPos[], const Quaternion childQ[], Vector parentPos[], Quaternion parentQ[], int boneMask );
// Returns true if the specified bone is one that gets merged in MergeMatchingBones.
int IsBoneMerged( int iBone ) const;
// Gets the origin for the first merge bone on the parent.
bool GetAimEntOrigin( Vector *pAbsOrigin, QAngle *pAbsAngles );
bool GetRootBone( matrix3x4_t &rootBone );
private:
// This is the entity that we're keeping the cache updated for.
C_BaseAnimating *m_pOwner;
// All the cache data is based off these. When they change, the cache data is regenerated.
// These are either all valid pointers or all NULL.
C_BaseAnimating *m_pFollow;
CStudioHdr *m_pFollowHdr;
const studiohdr_t *m_pFollowRenderHdr;
CStudioHdr *m_pOwnerHdr;
// This is the mask we need to use to set up bones on the followed entity to do the bone merge
int m_nFollowBoneSetupMask;
// Cache data.
class CMergedBone
{
public:
unsigned short m_iMyBone;
unsigned short m_iParentBone;
};
CUtlVector<CMergedBone> m_MergedBones;
CUtlVector<unsigned char> m_BoneMergeBits; // One bit for each bone. The bit is set if the bone gets merged.
};
inline int CBoneMergeCache::IsBoneMerged( int iBone ) const
{
if ( m_pOwnerHdr )
return m_BoneMergeBits[iBone >> 3] & ( 1 << ( iBone & 7 ) );
else
return 0;
}
#endif // BONE_MERGE_CACHE_H
| {
"pile_set_name": "Github"
} |
#!/bin/bash
http -v POST :8080/persons/stream < people.json | {
"pile_set_name": "Github"
} |
//
// Generated by class-dump 3.5 (64 bit).
//
// class-dump is Copyright (C) 1997-1998, 2000-2001, 2004-2013 by Steve Nygard.
//
#import "NSObject.h"
@interface ChatRoomUtils : NSObject
{
}
+ (void)onAddChatroomMemberTooMuch:(id)arg1 delegate:(id)arg2;
+ (void)onNeedUpgradeChatRoom:(id)arg1 delegate:(id)arg2 tag:(int)arg3;
+ (void)bindCardUpgradeChatRoom:(id)arg1 ViewController:(id)arg2 forScene:(int)arg3;
+ (void)openVerifyContactProfile:(id)arg1 forChatRoom:(id)arg2 inViewController:(id)arg3 delegate:(id)arg4;
+ (void)genFildsOfContact:(id)arg1 toContact:(id)arg2;
+ (void)openContactProfile:(id)arg1 ofChatRoomContact:(id)arg2 inViewController:(id)arg3 delegate:(id)arg4 searchScene:(int)arg5;
+ (void)openContactProfile:(id)arg1 ofChatRoomContact:(id)arg2 inViewController:(id)arg3 delegate:(id)arg4;
+ (void)openAddContactWithMemberList:(id)arg1 inViewController:(id)arg2 delegate:(id)arg3;
@end
| {
"pile_set_name": "Github"
} |
.cm-s-elegant span.cm-number, .cm-s-elegant span.cm-string, .cm-s-elegant span.cm-atom {color: #762;}
.cm-s-elegant span.cm-comment {color: #262; font-style: italic; line-height: 1em;}
.cm-s-elegant span.cm-meta {color: #555; font-style: italic; line-height: 1em;}
.cm-s-elegant span.cm-variable {color: black;}
.cm-s-elegant span.cm-variable-2 {color: #b11;}
.cm-s-elegant span.cm-qualifier {color: #555;}
.cm-s-elegant span.cm-keyword {color: #730;}
.cm-s-elegant span.cm-builtin {color: #30a;}
.cm-s-elegant span.cm-link {color: #762;}
.cm-s-elegant span.cm-error {background-color: #fdd;}
.cm-s-elegant .CodeMirror-activeline-background {background: #e8f2ff !important;}
.cm-s-elegant .CodeMirror-matchingbracket {outline:1px solid grey; color:black !important;}
| {
"pile_set_name": "Github"
} |
'use strict';
var ctx = require('./_ctx')
, $export = require('./_export')
, toObject = require('./_to-object')
, call = require('./_iter-call')
, isArrayIter = require('./_is-array-iter')
, toLength = require('./_to-length')
, createProperty = require('./_create-property')
, getIterFn = require('./core.get-iterator-method');
$export($export.S + $export.F * !require('./_iter-detect')(function(iter){ Array.from(iter); }), 'Array', {
// 22.1.2.1 Array.from(arrayLike, mapfn = undefined, thisArg = undefined)
from: function from(arrayLike/*, mapfn = undefined, thisArg = undefined*/){
var O = toObject(arrayLike)
, C = typeof this == 'function' ? this : Array
, aLen = arguments.length
, mapfn = aLen > 1 ? arguments[1] : undefined
, mapping = mapfn !== undefined
, index = 0
, iterFn = getIterFn(O)
, length, result, step, iterator;
if(mapping)mapfn = ctx(mapfn, aLen > 2 ? arguments[2] : undefined, 2);
// if object isn't iterable or it's array with default iterator - use simple case
if(iterFn != undefined && !(C == Array && isArrayIter(iterFn))){
for(iterator = iterFn.call(O), result = new C; !(step = iterator.next()).done; index++){
createProperty(result, index, mapping ? call(iterator, mapfn, [step.value, index], true) : step.value);
}
} else {
length = toLength(O.length);
for(result = new C(length); length > index; index++){
createProperty(result, index, mapping ? mapfn(O[index], index) : O[index]);
}
}
result.length = index;
return result;
}
});
| {
"pile_set_name": "Github"
} |
package org.support.project.di;
import org.support.project.di.DI;
import org.support.project.di.Instance;
@DI(instance = Instance.Prototype)
public class TestNoAspectObject {
private String hoge;
/**
* hogeを取得します。
*
* @return hoge
*/
public String getHoge() {
return hoge;
}
/**
* hogeを設定します。
*
* @param hoge
* hoge
*/
public TestNoAspectObject setHoge(String hoge) {
this.hoge = hoge;
return this;
}
}
| {
"pile_set_name": "Github"
} |
/*
* Copyright 2017 Red Hat, Inc. and/or its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.jbpm.executor.cdi.impl.jpa;
import javax.annotation.PostConstruct;
import javax.enterprise.context.ApplicationScoped;
import javax.enterprise.inject.Produces;
import javax.inject.Inject;
import javax.persistence.EntityManagerFactory;
import javax.persistence.PersistenceUnit;
import org.jbpm.executor.ExecutorServiceFactory;
import org.jbpm.executor.impl.ExecutorServiceImpl;
import org.jbpm.executor.impl.event.ExecutorEventSupportImpl;
import org.jbpm.executor.impl.jpa.ExecutorQueryServiceImpl;
import org.jbpm.executor.impl.jpa.ExecutorRequestAdminServiceImpl;
import org.jbpm.executor.impl.jpa.JPAExecutorStoreService;
import org.jbpm.shared.services.impl.TransactionalCommandService;
import org.kie.api.executor.Executor;
import org.kie.api.executor.ExecutorAdminService;
import org.kie.api.executor.ExecutorQueryService;
import org.kie.api.executor.ExecutorService;
import org.kie.api.executor.ExecutorStoreService;
@ApplicationScoped
public class JPAExecutorServiceProducer {
@Inject
@PersistenceUnit(unitName = "org.jbpm.domain")
private EntityManagerFactory emf;
private ExecutorEventSupportImpl eventSupport = new ExecutorEventSupportImpl();
private ExecutorService service;
@PostConstruct
public void setup() {
service = ExecutorServiceFactory.newExecutorService(emf, eventSupport);
}
@Produces
public ExecutorService produceExecutorService() {
return service;
}
@Produces
public Executor produceExecutor() {
return ((ExecutorServiceImpl)service).getExecutor();
}
@Produces
public ExecutorEventSupportImpl produceExecutorEventSupport() {
return eventSupport;
}
@Produces
public ExecutorStoreService produceStoreService() {
ExecutorStoreService storeService = new JPAExecutorStoreService(true);
TransactionalCommandService commandService = new TransactionalCommandService(emf);
((JPAExecutorStoreService) storeService).setCommandService(commandService);
((JPAExecutorStoreService) storeService).setEmf(emf);
return storeService;
}
@Produces
public ExecutorAdminService produceAdminService() {
ExecutorAdminService adminService = new ExecutorRequestAdminServiceImpl();
TransactionalCommandService commandService = new TransactionalCommandService(emf);
((ExecutorRequestAdminServiceImpl) adminService).setCommandService(commandService);
return adminService;
}
@Produces
public ExecutorQueryService produceQueryService() {
ExecutorQueryService queryService = new ExecutorQueryServiceImpl(true);
TransactionalCommandService commandService = new TransactionalCommandService(emf);
((ExecutorQueryServiceImpl) queryService).setCommandService(commandService);
return queryService;
}
}
| {
"pile_set_name": "Github"
} |
<?php
/**
* Laravel - A PHP Framework For Web Artisans
*
* @package Laravel
* @author Taylor Otwell <[email protected]>
*/
/*
|--------------------------------------------------------------------------
| Register The Auto Loader
|--------------------------------------------------------------------------
|
| Composer provides a convenient, automatically generated class loader for
| our application. We just need to utilize it! We'll simply require it
| into the script here so that we don't have to worry about manual
| loading any of our classes later on. It feels great to relax.
|
*/
require __DIR__.'/../bootstrap/autoload.php';
/*
|--------------------------------------------------------------------------
| Turn On The Lights
|--------------------------------------------------------------------------
|
| We need to illuminate PHP development, so let us turn on the lights.
| This bootstraps the framework and gets it ready for use, then it
| will load up this application so that we can run it and send
| the responses back to the browser and delight our users.
|
*/
$app = require_once __DIR__.'/../bootstrap/app.php';
/*
|--------------------------------------------------------------------------
| Run The Application
|--------------------------------------------------------------------------
|
| Once we have the application, we can handle the incoming request
| through the kernel, and send the associated response back to
| the client's browser allowing them to enjoy the creative
| and wonderful application we have prepared for them.
|
*/
$kernel = $app->make(Illuminate\Contracts\Http\Kernel::class);
$response = $kernel->handle(
$request = Illuminate\Http\Request::capture()
);
$response->send();
$kernel->terminate($request, $response);
| {
"pile_set_name": "Github"
} |
<?xml version="1.0"?>
<DDDefinition xmlns="http://www.cern.ch/cms/DDL" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.cern.ch/cms/DDL ../../../DetectorDescription/Schema/DDLSchema.xsd">
<SolidSection label="ebapd.xml">
<Box name="ECAL" dx="10*cm" dy="10*cm" dz="1*cm"/>
</SolidSection>
<LogicalPartSection label="ebapd.xml">
<LogicalPart name="ECAL" category="unspecified">
<rSolid name="ECAL"/>
<rMaterial name="materials:Air"/>
</LogicalPart>
</LogicalPartSection>
<Algorithm name="ecal:DDEcalAPDAlgo">
<rParent name="ebapd:ECAL"/>
<!-- APD assembly has capsule, ceramic, bulk silicon, epoxy, sensitive silicon-->
<Vector name="CerPos" type="numeric" nEntries="3">
0*mm, 0*mm, 0*mm
</Vector>
<Numeric name="APDHere" value="1"/>
<String name="CapName" value="ECAP"/>
<String name="CapMat" value="materials:Air"/>
<Numeric name="CapXSize" value=" 23*mm"/>
<Numeric name="CapYSize" value=" 20*mm"/>
<Numeric name="CapThick" value=" 4*mm"/>
<String name="SGLName" value="ESGL"/>
<String name="SGLMat" value="materials:Silicone_Gel"/>
<Numeric name="SGLThick" value=" 0.030*mm"/>
<String name="CerName" value="ECER"/>
<String name="CerMat" value="materials:Borosilicate_Glass"/>
<Numeric name="CerXSize" value=" 9.1*mm"/>
<Numeric name="CerYSize" value="10.5*mm"/>
<Numeric name="CerThick" value=" 1.55*mm + 0.575*mm - 0.350*mm"/>
<String name="AGLName" value="EAGL"/>
<String name="AGLMat" value="materials:E_Epoxy"/>
<Numeric name="AGLThick" value=" 0.575*mm"/>
<String name="BSiName" value="EBSI"/>
<String name="BSiMat" value="materials:E_Silicon"/>
<Numeric name="BSiXSize" value=" 7.5*mm"/>
<Numeric name="BSiYSize" value=" 7.5*mm"/>
<Numeric name="BSiThick" value=" 0.400*mm + 0.575*mm"/>
<String name="APDName" value="EAPD"/>
<String name="APDMat" value="materials:E_Silicon"/>
<Numeric name="APDSide" value=" 5*mm"/>
<Numeric name="APDThick" value=" 0.005*mm"/>
<Numeric name="APDZ" value="12.5*mm"/>
<Numeric name="APDX1" value=" 7.2*mm"/>
<Numeric name="APDX2" value="16.8*mm"/>
<String name="ATJName" value="EATJ"/>
<String name="ATJMat" value="materials:E_Silicon"/>
<Numeric name="ATJThick" value=" 0.0449*mm"/>
<String name="ANDName" value="EAND"/>
<String name="ANDMat" value="materials:E_Silicon"/>
<Numeric name="ANDThick" value=" 0.0001*mm"/>
</Algorithm>
</DDDefinition>
| {
"pile_set_name": "Github"
} |
# copystructure
copystructure is a Go library for deep copying values in Go.
This allows you to copy Go values that may contain reference values
such as maps, slices, or pointers, and copy their data as well instead
of just their references.
## Installation
Standard `go get`:
```
$ go get github.com/mitchellh/copystructure
```
## Usage & Example
For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/copystructure).
The `Copy` function has examples associated with it there.
| {
"pile_set_name": "Github"
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.