text
stringlengths 2
100k
| meta
dict |
---|---|
package com.orientechnologies.orient.setup;
import com.orientechnologies.orient.core.db.OrientDB;
import com.orientechnologies.orient.core.db.OrientDBConfig;
import java.util.Collection;
// A setup allows creating and managing a cluster of OrientDB servers.
public interface TestSetup {
void setup() throws TestSetupException;
void teardown() throws TestSetupException;
void startServer(String serverId) throws TestSetupException;
void shutdownServer(String serverId) throws TestSetupException;
String getAddress(String serverId, PortType port);
OrientDB createRemote(String serverId, OrientDBConfig config);
OrientDB createRemote(
String serverId, String serverUser, String serverPassword, OrientDBConfig config);
OrientDB createRemote(
Collection<String> serverIds,
String serverUser,
String serverPassword,
OrientDBConfig config);
enum PortType {
HTTP,
BINARY
}
}
| {
"pile_set_name": "Github"
} |
/**
* Typography
* --------------------------------------------------
*/
// Body text
// -------------------------
p {
margin: 0 0 ($line-height-computed / 2);
}
// Emphasis & misc
// -------------------------
small { font-size: 85%; }
cite { font-style: normal; }
// Alignment
// -------------------------
.text-left { text-align: left; }
.text-right { text-align: right; }
.text-center { text-align: center; }
// Headings
// -------------------------
h1, h2, h3, h4, h5, h6,
.h1, .h2, .h3, .h4, .h5, .h6 {
color: $base-color;
font-weight: $headings-font-weight;
font-family: $headings-font-family;
line-height: $headings-line-height;
small {
font-weight: normal;
line-height: 1;
}
}
h1, .h1,
h2, .h2,
h3, .h3 {
margin-top: $line-height-computed;
margin-bottom: ($line-height-computed / 2);
&:first-child {
margin-top: 0;
}
+ h1, + .h1,
+ h2, + .h2,
+ h3, + .h3 {
margin-top: ($line-height-computed / 2);
}
}
h4, .h4,
h5, .h5,
h6, .h6 {
margin-top: ($line-height-computed / 2);
margin-bottom: ($line-height-computed / 2);
}
h1, .h1 { font-size: floor($font-size-base * 2.60); } // ~36px
h2, .h2 { font-size: floor($font-size-base * 2.15); } // ~30px
h3, .h3 { font-size: ceil($font-size-base * 1.70); } // ~24px
h4, .h4 { font-size: ceil($font-size-base * 1.25); } // ~18px
h5, .h5 { font-size: $font-size-base; }
h6, .h6 { font-size: ceil($font-size-base * 0.85); } // ~12px
h1 small, .h1 small { font-size: ceil($font-size-base * 1.70); } // ~24px
h2 small, .h2 small { font-size: ceil($font-size-base * 1.25); } // ~18px
h3 small, .h3 small,
h4 small, .h4 small { font-size: $font-size-base; }
// Description Lists
// -------------------------
dl {
margin-bottom: $line-height-computed;
}
dt,
dd {
line-height: $line-height-base;
}
dt {
font-weight: bold;
}
// Blockquotes
// -------------------------
blockquote {
margin: 0 0 $line-height-computed;
padding: ($line-height-computed / 2) $line-height-computed;
border-left: 5px solid gray;
p {
font-weight: 300;
font-size: ($font-size-base * 1.25);
line-height: 1.25;
}
p:last-child {
margin-bottom: 0;
}
small {
display: block;
line-height: $line-height-base;
&:before {
content: '\2014 \00A0';// EM DASH, NBSP;
}
}
}
// Quotes
// -------------------------
q:before,
q:after,
blockquote:before,
blockquote:after {
content: "";
}
// Addresses
// -------------------------
address {
display: block;
margin-bottom: $line-height-computed;
font-style: normal;
line-height: $line-height-base;
}
// Links
// -------------------------
a {
color: $link-color;
}
a.subdued {
padding-right: 10px;
color: #888;
text-decoration: none;
&:hover {
text-decoration: none;
}
&:last-child {
padding-right: 0;
}
}
| {
"pile_set_name": "Github"
} |
/////////////////////////////////////////////////////////////////////////////
// Name: wx/univ/toplevel.h
// Purpose: Top level window, abstraction of wxFrame and wxDialog
// Author: Vaclav Slavik
// Copyright: (c) 2001-2002 SciTech Software, Inc. (www.scitechsoft.com)
// Licence: wxWindows licence
/////////////////////////////////////////////////////////////////////////////
#ifndef __WX_UNIV_TOPLEVEL_H__
#define __WX_UNIV_TOPLEVEL_H__
#include "wx/univ/inpcons.h"
#include "wx/univ/inphand.h"
#include "wx/icon.h"
// ----------------------------------------------------------------------------
// constants
// ----------------------------------------------------------------------------
// frame decorations type flags used in wxRenderer and wxColourScheme
enum
{
wxTOPLEVEL_ACTIVE = 0x00000001,
wxTOPLEVEL_MAXIMIZED = 0x00000002,
wxTOPLEVEL_TITLEBAR = 0x00000004,
wxTOPLEVEL_ICON = 0x00000008,
wxTOPLEVEL_RESIZEABLE = 0x00000010,
wxTOPLEVEL_BORDER = 0x00000020,
wxTOPLEVEL_BUTTON_CLOSE = 0x01000000,
wxTOPLEVEL_BUTTON_MAXIMIZE = 0x02000000,
wxTOPLEVEL_BUTTON_ICONIZE = 0x04000000,
wxTOPLEVEL_BUTTON_RESTORE = 0x08000000,
wxTOPLEVEL_BUTTON_HELP = 0x10000000
};
// frame hit test return values:
enum
{
wxHT_TOPLEVEL_NOWHERE = 0x00000000,
wxHT_TOPLEVEL_CLIENT_AREA = 0x00000001,
wxHT_TOPLEVEL_ICON = 0x00000002,
wxHT_TOPLEVEL_TITLEBAR = 0x00000004,
wxHT_TOPLEVEL_BORDER_N = 0x00000010,
wxHT_TOPLEVEL_BORDER_S = 0x00000020,
wxHT_TOPLEVEL_BORDER_E = 0x00000040,
wxHT_TOPLEVEL_BORDER_W = 0x00000080,
wxHT_TOPLEVEL_BORDER_NE = wxHT_TOPLEVEL_BORDER_N | wxHT_TOPLEVEL_BORDER_E,
wxHT_TOPLEVEL_BORDER_SE = wxHT_TOPLEVEL_BORDER_S | wxHT_TOPLEVEL_BORDER_E,
wxHT_TOPLEVEL_BORDER_NW = wxHT_TOPLEVEL_BORDER_N | wxHT_TOPLEVEL_BORDER_W,
wxHT_TOPLEVEL_BORDER_SW = wxHT_TOPLEVEL_BORDER_S | wxHT_TOPLEVEL_BORDER_W,
wxHT_TOPLEVEL_ANY_BORDER = 0x000000F0,
wxHT_TOPLEVEL_BUTTON_CLOSE = /*0x01000000*/ wxTOPLEVEL_BUTTON_CLOSE,
wxHT_TOPLEVEL_BUTTON_MAXIMIZE = /*0x02000000*/ wxTOPLEVEL_BUTTON_MAXIMIZE,
wxHT_TOPLEVEL_BUTTON_ICONIZE = /*0x04000000*/ wxTOPLEVEL_BUTTON_ICONIZE,
wxHT_TOPLEVEL_BUTTON_RESTORE = /*0x08000000*/ wxTOPLEVEL_BUTTON_RESTORE,
wxHT_TOPLEVEL_BUTTON_HELP = /*0x10000000*/ wxTOPLEVEL_BUTTON_HELP,
wxHT_TOPLEVEL_ANY_BUTTON = 0x1F000000
};
// Flags for interactive frame manipulation functions (only in wxUniversal):
enum
{
wxINTERACTIVE_MOVE = 0x00000001,
wxINTERACTIVE_RESIZE = 0x00000002,
wxINTERACTIVE_RESIZE_S = 0x00000010,
wxINTERACTIVE_RESIZE_N = 0x00000020,
wxINTERACTIVE_RESIZE_W = 0x00000040,
wxINTERACTIVE_RESIZE_E = 0x00000080,
wxINTERACTIVE_WAIT_FOR_INPUT = 0x10000000
};
// ----------------------------------------------------------------------------
// the actions supported by this control
// ----------------------------------------------------------------------------
#define wxACTION_TOPLEVEL_ACTIVATE wxT("activate") // (de)activate the frame
#define wxACTION_TOPLEVEL_BUTTON_PRESS wxT("pressbtn") // press titlebar btn
#define wxACTION_TOPLEVEL_BUTTON_RELEASE wxT("releasebtn") // press titlebar btn
#define wxACTION_TOPLEVEL_BUTTON_CLICK wxT("clickbtn") // press titlebar btn
#define wxACTION_TOPLEVEL_MOVE wxT("move") // move the frame
#define wxACTION_TOPLEVEL_RESIZE wxT("resize") // resize the frame
//-----------------------------------------------------------------------------
// wxTopLevelWindow
//-----------------------------------------------------------------------------
class WXDLLIMPEXP_CORE wxTopLevelWindow : public wxTopLevelWindowNative,
public wxInputConsumer
{
public:
// construction
wxTopLevelWindow() { Init(); }
wxTopLevelWindow(wxWindow *parent,
wxWindowID id,
const wxString& title,
const wxPoint& pos = wxDefaultPosition,
const wxSize& size = wxDefaultSize,
long style = wxDEFAULT_FRAME_STYLE,
const wxString& name = wxFrameNameStr)
{
Init();
Create(parent, id, title, pos, size, style, name);
}
bool Create(wxWindow *parent,
wxWindowID id,
const wxString& title,
const wxPoint& pos = wxDefaultPosition,
const wxSize& size = wxDefaultSize,
long style = wxDEFAULT_FRAME_STYLE,
const wxString& name = wxFrameNameStr);
// wxUniv-specific methods: do [not] use native decorations for this (or
// all) window(s)
//
// notice that this has no effect if the system doesn't support any native
// decorations anyhow and that by default native decorations are used
//
// if UseNativeDecorations() is used, it must be called before Create()
static void UseNativeDecorationsByDefault(bool native = true);
void UseNativeDecorations(bool native = true);
bool IsUsingNativeDecorations() const;
// implement base class pure virtuals
virtual bool ShowFullScreen(bool show, long style = wxFULLSCREEN_ALL);
virtual wxPoint GetClientAreaOrigin() const;
virtual void SetIcons(const wxIconBundle& icons);
// implementation from now on
// --------------------------
// tests for frame's part at given point
long HitTest(const wxPoint& pt) const;
virtual bool PerformAction(const wxControlAction& action,
long numArg = -1,
const wxString& strArg = wxEmptyString);
static wxInputHandler *GetStdInputHandler(wxInputHandler *handlerDef);
virtual wxInputHandler *DoGetStdInputHandler(wxInputHandler *handlerDef)
{
return GetStdInputHandler(handlerDef);
}
// move/resize the frame interactively, i.e. let the user do it
virtual void InteractiveMove(int flags = wxINTERACTIVE_MOVE);
virtual wxSize GetMinSize() const;
virtual wxWindow *GetInputWindow() const { return const_cast<wxTopLevelWindow*>(this); }
protected:
virtual void DoGetClientSize(int *width, int *height) const;
virtual void DoSetClientSize(int width, int height);
// handle titlebar button click event
virtual void ClickTitleBarButton(long button);
// return wxTOPLEVEL_xxx combination based on current state of the frame
long GetDecorationsStyle() const;
// common part of all ctors
void Init();
void RefreshTitleBar();
void OnNcPaint(wxNcPaintEvent& event);
void OnSystemMenu(wxCommandEvent& event);
// true if wxTLW should render decorations (aka titlebar) itself
static int ms_drawDecorations;
// true if wxTLW can be iconized
static int ms_canIconize;
// true if we're using native decorations
bool m_usingNativeDecorations;
// true for currently active frame
bool m_isActive;
// version of icon for titlebar (16x16)
wxIcon m_titlebarIcon;
// saved window style in fullscreen mdoe
long m_fsSavedStyle;
// currently pressed titlebar button
long m_pressedButton;
DECLARE_DYNAMIC_CLASS(wxTopLevelWindow)
DECLARE_EVENT_TABLE()
WX_DECLARE_INPUT_CONSUMER()
};
#endif // __WX_UNIV_TOPLEVEL_H__
| {
"pile_set_name": "Github"
} |
# $ReOpenLDAP$
This directory contains slapd overlays specific to samba4 LDAP backend:
- pguid (not used)
- rdnval (under evaluation)
- vernum (under evaluation)
- PGUID
This overlay maintains the operational attribute "parentUUID". It contains
the entryUUID of the parent entry. This overlay is not being considered
right now.
- RDNVAL
This overlay maintains the operational attribute "rdnValue". It contains
the value of the entry's RDN. This attribute is defined by the overlay
itself as
( 1.3.6.1.4.1.4203.666.1.58
NAME 'rdnValue'
DESC 'the value of the naming attributes'
SYNTAX '1.3.6.1.4.1.1466.115.121.1.15'
EQUALITY caseIgnoreMatch
USAGE dSAOperation
NO-USER-MODIFICATION )
under ReOpenLDAP's development OID arc. This OID is temporary.
To use the overlay, add:
moduleload contrib-rdnval.la
...
database <whatever>
...
overlay rdnval
to your slapd configuration file. An instance is required for each database
that needs to maintain this attribute.
- VERNUM
This overlay increments a counter any time an attribute is modified.
It is intended to increment the counter 'msDS-KeyVersionNumber' when
the attribute 'unicodePwd' is modified.
These overlays are only set up to be built as a dynamically loaded modules.
On most platforms, in order for the modules to be usable, all of the
library dependencies must also be available as shared libraries.
If you need to build the overlays statically, you will have to move them
into the slapd/overlays directory and edit the Makefile and overlays.c
to reference them.
---
Copyright 1992-2018 ReOpenLDAP AUTHORS: please see AUTHORS file.
All rights reserved.
This file is part of ReOpenLDAP.
Redistribution and use in source and binary forms, with or without
modification, are permitted only as authorized by the OpenLDAP
Public License.
A copy of this license is available in the file LICENSE in the
top-level directory of the distribution or, alternatively, at
<http://www.OpenLDAP.org/license.html>.
| {
"pile_set_name": "Github"
} |
/*
* Copyright (c) 1986 Regents of the University of California.
* All rights reserved. The Berkeley software License Agreement
* specifies the terms and conditions for redistribution.
*/
#define BSD 211 /* 2.11 * 100, as cpp doesn't do floats */
#ifndef offsetof
#define offsetof(type, member) ((size_t)(&((type *)0)->member))
#endif
/*
* Machine type dependent parameters.
*/
#include <machine/machparam.h>
/*
* Machine-independent constants
*/
#ifndef NMOUNT
#define NMOUNT 2 /* number of mountable file systems */
#endif
#define MAXUPRC 20 /* max processes per user */
#define NOFILE 30 /* max open files per process */
#define NCARGS 5120 /* # characters in exec arglist */
#define NGROUPS 16 /* max number groups */
#define NOGROUP 65535 /* marker for empty group set member */
/*
* Priorities
*/
#define PSWP 0
#define PINOD 10
#define PRIBIO 20
#define PRIUBA 24
#define PZERO 25
#define PPIPE 26
#define PSOCK 26
#define PWAIT 30
#define PLOCK 35
#define PPAUSE 40
#define PUSER 50
#define NZERO 0
#define PRIMASK 0xff
#define PCATCH 0x100
/*
* Signals
*/
#include <sys/signal.h>
#define NBPW sizeof(int) /* number of bytes in an integer */
#ifndef NULL
#define NULL 0
#endif
#define CMASK 026 /* default mask for file creation */
#define NODEV (dev_t)(-1)
/* CBLOCK is the size of a clist block, must be power of 2 */
#define CBLOCK 32
#define CBSIZE (CBLOCK - sizeof(struct cblock *)) /* data chars/clist */
#define CROUND (CBLOCK - 1) /* clist rounding */
#include <sys/types.h>
/*
* File system parameters and macros.
*
* The file system is made out of blocks of most MAXBSIZE units.
*/
#define MAXBSIZE 1024
/*
* MAXPATHLEN defines the longest permissable path length
* after expanding symbolic links. It is used to allocate
* a temporary buffer from the buffer pool in which to do the
* name expansion, hence should be a power of two, and must
* be less than or equal to MAXBSIZE.
* MAXSYMLINKS defines the maximum number of symbolic links
* that may be expanded in a path name. It should be set high
* enough to allow all legitimate uses, but halt infinite loops
* reasonably quickly.
*/
#define MAXPATHLEN 256
#define MAXSYMLINKS 8
/*
* Macros for fast min/max.
*/
#define MIN(a,b) (((a)<(b))?(a):(b))
#ifndef MAX
#define MAX(a,b) (((a)>(b))?(a):(b))
#endif
/*
* Macros for counting and rounding.
*/
#ifndef howmany
# define howmany(x,y) (((x)+((y)-1))/(y))
#endif
#define roundup(x,y) ((((x)+((y)-1))/(y))*(y))
/*
* Maximum size of hostname recognized and stored in the kernel.
*/
#define MAXHOSTNAMELEN 64
#if defined(KERNEL) && defined(INET)
# include <machine/net_mac.h>
#endif
/*
* MAXMEM is the maximum core per process is allowed. First number is Kb.
*/
#define MAXMEM (96*1024)
/*
* Max length of a user login name.
*/
#define MAXLOGNAME 16
| {
"pile_set_name": "Github"
} |
'use strict';
module.exports.definition = {
set: function (v) {
this._setProperty('-webkit-locale', v);
},
get: function () {
return this.getPropertyValue('-webkit-locale');
},
enumerable: true,
configurable: true
};
| {
"pile_set_name": "Github"
} |
; RUN: llc -march=mips -mcpu=mips32r2 -O0 -filetype=obj -fast-isel=0 <%s | \
; RUN: llvm-dwarfdump -v -all - | FileCheck %s
declare void @llvm.dbg.declare(metadata, metadata, metadata)
declare void @foo(i32*)
; void foo(int *);
;
; int f2(int a, int b) {
; int c __attribute__((aligned(16))) = a + b;
; foo(&c);
; return c;
; }
;
; int *f3(int a, int b) {
; int c __attribute__((aligned(16))) = a + b;
; int *w = alloca(c);
; foo(&c);
; return w;
; }
; CHECK: DW_TAG_subprogram
; CHECK: DW_AT_location [DW_FORM_exprloc] (DW_OP_breg29 SP_64+36)
; CHECK: DW_AT_name [DW_FORM_strp] ( .debug_str[0x00000065] = "c")
; Function Attrs: nounwind
define i32 @f2(i32 signext %a, i32 signext %b) !dbg !4 {
entry:
%a.addr = alloca i32, align 4
%b.addr = alloca i32, align 4
%c = alloca i32, align 16
store i32 %a, i32* %a.addr, align 4
call void @llvm.dbg.declare(metadata i32* %a.addr, metadata !15, metadata !16), !dbg !17
store i32 %b, i32* %b.addr, align 4
call void @llvm.dbg.declare(metadata i32* %b.addr, metadata !18, metadata !16), !dbg !19
call void @llvm.dbg.declare(metadata i32* %c, metadata !20, metadata !16), !dbg !21
%0 = load i32, i32* %a.addr, align 4, !dbg !22
%1 = load i32, i32* %b.addr, align 4, !dbg !23
%add = add nsw i32 %0, %1, !dbg !24
store i32 %add, i32* %c, align 16, !dbg !21
call void @foo(i32* %c), !dbg !25
%2 = load i32, i32* %c, align 16, !dbg !26
ret i32 %2, !dbg !27
}
; CHECK: DW_TAG_subprogram
; CHECK: DW_AT_location [DW_FORM_exprloc] (DW_OP_breg23 S7_64+32)
; CHECK: DW_AT_name [DW_FORM_strp] ( .debug_str[0x00000065] = "c")
define i32* @f3(i32 signext %a, i32 signext %b) !dbg !8 {
entry:
%a.addr = alloca i32, align 4
%b.addr = alloca i32, align 4
%c = alloca i32, align 16
%w = alloca i32*, align 4
store i32 %a, i32* %a.addr, align 4
call void @llvm.dbg.declare(metadata i32* %a.addr, metadata !28, metadata !16), !dbg !29
store i32 %b, i32* %b.addr, align 4
call void @llvm.dbg.declare(metadata i32* %b.addr, metadata !30, metadata !16), !dbg !31
call void @llvm.dbg.declare(metadata i32* %c, metadata !32, metadata !16), !dbg !33
%0 = load i32, i32* %a.addr, align 4, !dbg !34
%1 = load i32, i32* %b.addr, align 4, !dbg !35
%add = add nsw i32 %0, %1, !dbg !36
store i32 %add, i32* %c, align 16, !dbg !33
call void @llvm.dbg.declare(metadata i32** %w, metadata !37, metadata !DIExpression(DW_OP_deref)), !dbg !38
%2 = load i32, i32* %c, align 16, !dbg !39
%3 = alloca i8, i32 %2, !dbg !40
%4 = bitcast i8* %3 to i32*, !dbg !40
store i32* %4, i32** %w, align 4, !dbg !38
call void @foo(i32* %c), !dbg !41
%5 = load i32*, i32** %w, align 4, !dbg !42
ret i32* %5, !dbg !43
}
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!12, !13}
!llvm.ident = !{!14}
!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "clang version 3.8.0 (trunk 251783) (llvm/trunk 251781)", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !2)
!1 = !DIFile(filename: "test.c", directory: "/home/vk/repos/tmp/dwarf")
!2 = !{}
!4 = distinct !DISubprogram(name: "f2", scope: !1, file: !1, line: 20, type: !5, isLocal: false, isDefinition: true, scopeLine: 20, flags: DIFlagPrototyped, isOptimized: false, unit: !0, retainedNodes: !2)
!5 = !DISubroutineType(types: !6)
!6 = !{!7, !7, !7}
!7 = !DIBasicType(name: "int", size: 32, align: 32, encoding: DW_ATE_signed)
!8 = distinct !DISubprogram(name: "f3", scope: !1, file: !1, line: 27, type: !9, isLocal: false, isDefinition: true, scopeLine: 27, flags: DIFlagPrototyped, isOptimized: false, unit: !0, retainedNodes: !2)
!9 = !DISubroutineType(types: !10)
!10 = !{!11, !7, !7}
!11 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !7, size: 32, align: 32)
!12 = !{i32 2, !"Dwarf Version", i32 4}
!13 = !{i32 2, !"Debug Info Version", i32 3}
!14 = !{!"clang version 3.8.0 (trunk 251783) (llvm/trunk 251781)"}
!15 = !DILocalVariable(name: "a", arg: 1, scope: !4, file: !1, line: 20, type: !7)
!16 = !DIExpression()
!17 = !DILocation(line: 20, column: 12, scope: !4)
!18 = !DILocalVariable(name: "b", arg: 2, scope: !4, file: !1, line: 20, type: !7)
!19 = !DILocation(line: 20, column: 19, scope: !4)
!20 = !DILocalVariable(name: "c", scope: !4, file: !1, line: 21, type: !7)
!21 = !DILocation(line: 21, column: 7, scope: !4)
!22 = !DILocation(line: 21, column: 40, scope: !4)
!23 = !DILocation(line: 21, column: 44, scope: !4)
!24 = !DILocation(line: 21, column: 42, scope: !4)
!25 = !DILocation(line: 22, column: 3, scope: !4)
!26 = !DILocation(line: 23, column: 10, scope: !4)
!27 = !DILocation(line: 23, column: 3, scope: !4)
!28 = !DILocalVariable(name: "a", arg: 1, scope: !8, file: !1, line: 27, type: !7)
!29 = !DILocation(line: 27, column: 13, scope: !8)
!30 = !DILocalVariable(name: "b", arg: 2, scope: !8, file: !1, line: 27, type: !7)
!31 = !DILocation(line: 27, column: 20, scope: !8)
!32 = !DILocalVariable(name: "c", scope: !8, file: !1, line: 28, type: !7)
!33 = !DILocation(line: 28, column: 7, scope: !8)
!34 = !DILocation(line: 28, column: 40, scope: !8)
!35 = !DILocation(line: 28, column: 44, scope: !8)
!36 = !DILocation(line: 28, column: 42, scope: !8)
!37 = !DILocalVariable(name: "w", scope: !8, file: !1, line: 29, type: !11)
!38 = !DILocation(line: 29, column: 8, scope: !8)
!39 = !DILocation(line: 29, column: 19, scope: !8)
!40 = !DILocation(line: 29, column: 12, scope: !8)
!41 = !DILocation(line: 30, column: 3, scope: !8)
!42 = !DILocation(line: 31, column: 10, scope: !8)
!43 = !DILocation(line: 31, column: 3, scope: !8)
| {
"pile_set_name": "Github"
} |
# referer
修改请求头的referer字段,有些服务器会校验请求头的referer字段,这个协议可以用来绕过这个检测或者测试后台的功能,配置方式:
pattern referer://url
pattern参见[匹配模式](../pattern.html),更多模式请参考[配置方式](../mode.html)。
例子:
如果我们在www.test.com域名的页面中发www.aliexpress.com的请求,则请求头的referer为www.test.com域名下的url或为空,这样可能请求到后台会返回403,可以这么修改referer:
www.aliexpress.com referer://http://www.aliexpress.com
把www.aliexpress.com域名下的请求都加上`http://www.aliexpress.com`这个referer。
#### 过滤规则
需要确保whistle是最新版本:[更新whistle](../update.html)
如果要过滤指定请求或指定协议的规则匹配,可以用如下协议:
1. [ignore](./ignore.html):忽略指定规则
2. [filter](./filter.html):过滤指定pattern,支持根据请求方法、请求头、请求客户端IP过滤
例子:
```
# 下面表示匹配pattern的同时不能为post请求且请求头里面的cookie字段必须包含test(忽略大小写)、url里面必须包含 cgi-bin 的请求
# 即:过滤掉匹配filter里面的请求
pattern operator1 operator2 excludeFilter://m:post includeFilter://h:cookie=test includeFilter:///cgi-bin/i
# 下面表示匹配pattern1、pattern2的请求方法为post、或请求头里面的cookie字段不能包含类似 `uin=123123` 且url里面必须包含 cgi-bin 的请求
operator pattern1 pattern2 includeFilter://m:post excludeFilter://h:cookie=/uin=o\d+/i excludeFilter:///cgi-bin/i
# 下面表示匹配pattern的请求忽略除了host以外的所有规则
pattern ignore://*|!host
# 下面表示匹配pattern的请求忽略file和host协议的规则
pattern ignore://file|host
```
| {
"pile_set_name": "Github"
} |
import chalk from 'chalk';
import address from 'address';
import webpack from 'webpack';
import WebpackDevServer from 'webpack-dev-server';
import { get } from 'lodash';
import { getPort } from 'portfinder';
import { GREEN } from '../common/constant';
import { getSiteDevConfig } from '../config/webpack.site.dev';
import { getSitePrdConfig } from '../config/webpack.site.prd';
function logServerInfo(port: number) {
const local = `http://localhost:${port}/`;
const network = `http://${address.ip()}:${port}/`;
console.log('\n Site running at:\n');
console.log(` ${chalk.bold('Local')}: ${chalk.hex(GREEN)(local)} `);
console.log(` ${chalk.bold('Network')}: ${chalk.hex(GREEN)(network)}`);
}
function runDevServer(
port: number,
config: ReturnType<typeof getSiteDevConfig>
) {
const server = new WebpackDevServer(webpack(config), config.devServer);
// this is a hack to disable wds status log
(server as any).showStatus = function() {};
const host = get(config.devServer, 'host', 'localhost');
server.listen(port, host, (err?: Error) => {
if (err) {
console.log(err);
}
});
}
function watch() {
const config = getSiteDevConfig();
getPort(
{
port: config.devServer!.port,
},
(err, port) => {
if (err) {
console.log(err);
return;
}
logServerInfo(port);
runDevServer(port, config);
}
);
}
function build() {
return new Promise((resolve, reject) => {
const config = getSitePrdConfig();
webpack(config, (err, stats) => {
if (err || stats.hasErrors()) {
reject();
} else {
resolve();
}
});
});
}
export async function compileSite(production = false) {
if (production) {
await build();
} else {
watch();
}
}
| {
"pile_set_name": "Github"
} |
-1070.239413 -2459.377273 1359.887719 -333637.877058
116.956405 1223.953920 2680.337160 -1250832.140695
-0.964137 0.188274 0.187063 539.552544
| {
"pile_set_name": "Github"
} |
;(***********************************************************************)
;(* *)
;(* OCaml *)
;(* *)
;(* Jacques Garrigue, Ian T Zimmerman, Damien Doligez *)
;(* *)
;(* Copyright 1997 Institut National de Recherche en Informatique et *)
;(* en Automatique. All rights reserved. This file is distributed *)
;(* under the terms of the GNU General Public License. *)
;(* *)
;(***********************************************************************)
;; caml-font: font-lock support for OCaml files
;; now with perfect parsing of comments and strings
(require 'font-lock)
(defvar caml-font-stop-face
(progn
(make-face 'caml-font-stop-face)
(set-face-foreground 'caml-font-stop-face "White")
(set-face-background 'caml-font-stop-face "Red")
'caml-font-stop-face))
(defvar caml-font-doccomment-face
(progn
(make-face 'caml-font-doccomment-face)
(set-face-foreground 'caml-font-doccomment-face "Red")
'caml-font-doccomment-face))
(unless (facep 'font-lock-preprocessor-face)
(defvar font-lock-preprocessor-face
(copy-face 'font-lock-builtin-face
'font-lock-preprocessor-face)))
(defconst caml-font-lock-keywords
`(
;modules and constructors
("`?\\<[A-Z][A-Za-z0-9_']*\\>" . font-lock-function-name-face)
;definition
(,(regexp-opt '("and" "as" "constraint" "class"
"exception" "external" "fun" "function" "functor"
"in" "inherit" "initializer" "let"
"method" "mutable" "module" "of" "private" "rec"
"type" "val" "virtual")
'words)
. font-lock-type-face)
;blocking
(,(regexp-opt '("begin" "end" "object" "sig" "struct") 'words)
. font-lock-keyword-face)
;linenums
("# *[0-9]+" . font-lock-preprocessor-face)
;infix operators
(,(regexp-opt '("asr" "land" "lor" "lsl" "lsr" "lxor" "mod") 'words)
. font-lock-builtin-face)
;control
(,(concat "[|#&]\\|->\\|"
(regexp-opt '("do" "done" "downto" "else" "for" "if" "ignore"
"lazy" "match" "new" "or" "then" "to" "try"
"when" "while" "with")
'words))
. font-lock-constant-face)
("\\<raise\\|failwith\\|invalid_arg\\>"
. font-lock-comment-face)
;labels (and open)
("\\(\\([~?]\\|\\<\\)[a-z][a-zA-Z0-9_']*:\\)[^:=]"
1 font-lock-variable-name-face)
("\\<\\(assert\\|open\\|include\\)\\>\\|[~?][ (]*[a-z][a-zA-Z0-9_']*"
. font-lock-variable-name-face)))
(defun caml-font-syntactic-face (s)
(let ((in-string (nth 3 s))
(in-comment (nth 4 s))
(start (nth 8 s)))
(cond
(in-string 'font-lock-string-face)
(in-comment
(save-excursion
(goto-char start)
(cond
((looking-at "(\\*\\*/\\*\\*)") 'caml-font-stop-face)
((looking-at "(\\*\\*[^*]") 'caml-font-doccomment-face)
(t 'font-lock-comment-face)))))))
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
; In order to correctly fontify an OCaml buffer, it is necessary to
; lex the buffer to tell what is a comment and what is a string.
; We do this incrementally in a hook
; (font-lock-extend-after-change-region-function), which is called
; whenever the buffer changes. It sets the syntax-table property
; on each beginning and end of chars, strings, and comments.
; This mode handles correctly all the strange cases in the following
; OCaml code.
;
; let l' _ = ();;
; let _' _ = ();;
; let l' = ();;
; let b2_' = ();;
; let a'a' = ();;
; let f2 _ _ = ();;
; let f3 _ _ _ = ();;
; let f' _ _ _ _ _ = ();;
; let hello = ();;
;
; (* ==== easy stuff ==== *)
;
; (* a comment *)
; (* "a string" in a comment *)
; (* "another string *)" in a comment *)
; (* not a string '"' in a comment *)
; "a string";;
; '"';; (* not a string *)
;
; (* ==== hard stuff ==== *)
;
; l'"' not not a string ";;
; _'"' also not not a string";;
; f2 0l'"';; (* not not not a string *)
; f2 0_'"';; (* also not not not a string *)
; f3 0.0l'"' not not not not a string ";;
; f3 0.0_'"';; (* not not not not not a string *)
; f2 0b01_'"';; (* not not not a string *)
; f3 0b2_'"' not not not not a string ";;
; f3 0b02_'"';; (* not not not not not a string *)
; '\'';; (* a char *)
; '
; ';; (* a char *)
; '^M
; ';; (* also a char [replace ^M with one CR character] *)
; a'a';; (* not a char *)
; type '
; a' t = X;; (* also not a char *)
;
; (* ==== far-out stuff ==== *)
;
; f'"'" "*) print_endline "hello";;(* \"" ;;
; (* f'"'" "*) print_endline "hello";;(* \"" ;; *)
(defconst caml-font-ident-re
(concat "[A-Za-z_\300-\326\330-\366\370-\377]"
"[A-Za-z_\300-\326\330-\366\370-\377'0-9]*")
)
(defconst caml-font-int-re
(concat "\\(0[xX][0-9A-Fa-f][0-9A-Fa-f_]*\\|0[oO][0-7][0-7_]*"
"\\|0[bB][01][01_]*\\)[lLn]?")
)
; decimal integers are folded into the RE for floats to get longest-match
; without using posix-looking-at
(defconst caml-font-decimal-re
"[0-9][0-9_]*\\([lLn]\\|\\.[0-9_]*\\)?\\([eE][+-]?[0-9][0-9_]*\\)?"
)
; match any ident or numeral token
(defconst caml-font-ident-or-num-re
(concat caml-font-ident-re "\\|" caml-font-int-re "\\|" caml-font-decimal-re)
)
; match any char token
(defconst caml-font-char-re
(concat "'\\(\015\012\\|[^\\']\\|"
"\\(\\\\\\([\\'\"ntbr ]\\|[0-9][0-9][0-9]"
"\\|x[0-9A-Fa-f][0-9A-Fa-f]\\)\\)\\)'")
)
; match a quote followed by a newline
(defconst caml-font-quote-newline-re
"'\\(\015\012\\|[\012\015]\\)"
)
; match any token or sequence of tokens that cannot contain a
; quote, double quote, a start of comment, or a newline
; note: this is only to go faster than one character at a time
(defconst caml-font-other-re
"[^A-Za-z_0-9\012\015\300-\326\330-\366\370-\377'\"(]+"
)
; match any sequence of non-special characters in a comment
; note: this is only to go faster than one character at a time
(defconst caml-font-other-comment-re
"[^(*\"'\012\015]+"
)
; match any sequence of non-special characters in a string
; note: this is only to go faster than one character at a time
(defconst caml-font-other-string-re
"[^\\\"\012\015]"
)
; match a newline
(defconst caml-font-newline-re
"\\(\015\012\\|[\012\015]\\)"
)
; Put the 'caml-font-state property with the given state on the
; character before pos. Return nil if it was already there, t if not.
(defun caml-font-put-state (pos state)
(if (equal state (get-text-property (1- pos) 'caml-font-state))
nil
(put-text-property (1- pos) pos 'caml-font-state state)
t)
)
; Same as looking-at, but erase properties 'caml-font-state and
; 'syntax-table from the matched range
(defun caml-font-looking-at (re)
(let ((result (looking-at re)))
(when result
(remove-text-properties (match-beginning 0) (match-end 0)
'(syntax-table nil caml-font-state nil)))
result)
)
; Annotate the buffer starting at point in state (st . depth)
; Set the 'syntax-table property on beginnings and ends of:
; - strings
; - chars
; - comments
; Also set the 'caml-font-state property on each LF character that is
; not preceded by a single quote. The property gives the state of the
; lexer (nil or t) after reading that character.
; Leave the point at a point where the pre-existing 'caml-font-state
; property is consistent with the new parse, or at the end of the buffer.
; depth is the depth of nested comments at this point
; it must be a non-negative integer
; st can be:
; nil -- we are in the base state
; t -- we are within a string
(defun caml-font-annotate (st depth)
(let ((continue t))
(while (and continue (not (eobp)))
(cond
((and (equal st nil) (= depth 0)) ; base state, outside comment
(cond
((caml-font-looking-at caml-font-ident-or-num-re)
(goto-char (match-end 0)))
((caml-font-looking-at caml-font-char-re)
(put-text-property (point) (1+ (point))
'syntax-table (string-to-syntax "|"))
(put-text-property (1- (match-end 0)) (match-end 0)
'syntax-table (string-to-syntax "|"))
(goto-char (match-end 0)))
((caml-font-looking-at caml-font-quote-newline-re)
(goto-char (match-end 0)))
((caml-font-looking-at "\"")
(put-text-property (point) (1+ (point))
'syntax-table (string-to-syntax "|"))
(goto-char (match-end 0))
(setq st t))
((caml-font-looking-at "(\\*")
(put-text-property (point) (1+ (point))
'syntax-table (string-to-syntax "!"))
(goto-char (match-end 0))
(setq depth 1))
((looking-at caml-font-newline-re)
(goto-char (match-end 0))
(setq continue (caml-font-put-state (match-end 0) '(nil . 0))))
((caml-font-looking-at caml-font-other-re)
(goto-char (match-end 0)))
(t
(remove-text-properties (point) (1+ (point))
'(syntax-table nil caml-font-state nil))
(goto-char (1+ (point))))))
((equal st nil) ; base state inside comment
(cond
((caml-font-looking-at "(\\*")
(goto-char (match-end 0))
(setq depth (1+ depth)))
((caml-font-looking-at "\\*)")
(goto-char (match-end 0))
(setq depth (1- depth))
(when (= depth 0)
(put-text-property (1- (point)) (point)
'syntax-table (string-to-syntax "!"))))
((caml-font-looking-at "\"")
(goto-char (match-end 0))
(setq st t))
((caml-font-looking-at caml-font-char-re)
(goto-char (match-end 0)))
((caml-font-looking-at caml-font-quote-newline-re)
(goto-char (match-end 0)))
((caml-font-looking-at "''")
(goto-char (match-end 0)))
((looking-at caml-font-newline-re)
(goto-char (match-end 0))
(setq continue (caml-font-put-state (match-end 0) (cons nil depth))))
((caml-font-looking-at caml-font-other-comment-re)
(goto-char (match-end 0)))
(t
(remove-text-properties (point) (1+ (point))
'(syntax-table nil caml-font-state nil))
(goto-char (1+ (point))))))
(t ; string state inside or outside a comment
(cond
((caml-font-looking-at "\"")
(when (= depth 0)
(put-text-property (point) (1+ (point))
'syntax-table (string-to-syntax "|")))
(goto-char (1+ (point)))
(setq st nil))
((caml-font-looking-at "\\\\[\"\\]")
(goto-char (match-end 0)))
((looking-at caml-font-newline-re)
(goto-char (match-end 0))
(setq continue (caml-font-put-state (match-end 0) (cons t depth))))
((caml-font-looking-at caml-font-other-string-re)
(goto-char (match-end 0)))
(t
(remove-text-properties (point) (1+ (point))
'(syntax-table nil caml-font-state nil))
(goto-char (1+ (point)))))))))
)
; This is the hook function for font-lock-extend-after-change-function
; It finds the nearest saved state at the left of the changed text,
; calls caml-font-annotate to set the 'caml-font-state and 'syntax-table
; properties, then returns the range that was parsed by caml-font-annotate.
(defun caml-font-extend-after-change (beg end &optional old-len)
(save-excursion
(save-match-data
(let ((caml-font-modified (buffer-modified-p))
start-at
end-at
state)
(remove-text-properties beg end '(syntax-table nil caml-font-state nil))
(setq start-at
(or (and (> beg (point-min))
(get-text-property (1- beg) 'caml-font-state)
beg)
(previous-single-property-change beg 'caml-font-state)
(point-min)))
(setq state (or (and (> start-at (point-min))
(get-text-property (1- start-at) 'caml-font-state))
(cons nil 0)))
(goto-char start-at)
(caml-font-annotate (car state) (cdr state))
(setq end-at (point))
(restore-buffer-modified-p caml-font-modified)
(cons start-at end-at))))
)
; We don't use the normal caml-mode syntax table because it contains an
; approximation of strings and comments that interferes with our
; annotations.
(defconst caml-font-syntax-table
(let ((tbl (make-syntax-table)))
(modify-syntax-entry ?' "w" tbl)
(modify-syntax-entry ?_ "w" tbl)
(modify-syntax-entry ?\" "." tbl)
(let ((i 192))
(while (< i 256)
(or (= i 215) (= i 247) (modify-syntax-entry i "w" tbl))
(setq i (1+ i))))
tbl))
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; font-lock commands are similar for caml-mode and inferior-caml-mode
(defun caml-font-set-font-lock ()
(setq parse-sexp-lookup-properties t)
(setq font-lock-defaults
(list
'caml-font-lock-keywords ; keywords
nil ; keywords-only
nil ; case-fold
nil ; syntax-alist
nil ; syntax-begin
(cons 'font-lock-syntax-table caml-font-syntax-table)
'(font-lock-extend-after-change-region-function
. caml-font-extend-after-change)
'(font-lock-syntactic-face-function . caml-font-syntactic-face)
))
(caml-font-extend-after-change (point-min) (point-max) 0)
(font-lock-mode 1)
)
(add-hook 'caml-mode-hook 'caml-font-set-font-lock)
(defconst inferior-caml-font-lock-keywords
`(("^[#-]" . font-lock-comment-face)
,@caml-font-lock-keywords))
(defun inferior-caml-set-font-lock ()
(setq parse-sexp-lookup-properties t)
(setq font-lock-defaults
(list
'inferior-caml-font-lock-keywords ; keywords
nil ; keywords-only
nil ; case-fold
nil ; syntax-alist
nil ; syntax-begin
(cons 'font-lock-syntax-table caml-font-syntax-table)
'(font-lock-extend-after-change-region-function
. caml-font-extend-after-change)
'(font-lock-syntactic-face-function . caml-font-syntactic-face)
))
(caml-font-extend-after-change (point-min) (point-max) 0)
(font-lock-mode 1)
)
(add-hook 'inferior-caml-mode-hooks 'inferior-caml-set-font-lock)
(provide 'caml-font)
| {
"pile_set_name": "Github"
} |
@ECHO OFF
REM Command file for Sphinx documentation
if "%SPHINXBUILD%" == "" (
set SPHINXBUILD=sphinx-build
)
set BUILDDIR=build
set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% source
set I18NSPHINXOPTS=%SPHINXOPTS% source
if NOT "%PAPER%" == "" (
set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS%
set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS%
)
if "%1" == "" goto help
if "%1" == "help" (
:help
echo.Please use `make ^<target^>` where ^<target^> is one of
echo. html to make standalone HTML files
echo. dirhtml to make HTML files named index.html in directories
echo. singlehtml to make a single large HTML file
echo. pickle to make pickle files
echo. json to make JSON files
echo. htmlhelp to make HTML files and a HTML help project
echo. qthelp to make HTML files and a qthelp project
echo. devhelp to make HTML files and a Devhelp project
echo. epub to make an epub
echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter
echo. text to make text files
echo. man to make manual pages
echo. texinfo to make Texinfo files
echo. gettext to make PO message catalogs
echo. changes to make an overview over all changed/added/deprecated items
echo. linkcheck to check all external links for integrity
echo. doctest to run all doctests embedded in the documentation if enabled
goto end
)
if "%1" == "clean" (
for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i
del /q /s %BUILDDIR%\*
goto end
)
if "%1" == "html" (
%SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The HTML pages are in %BUILDDIR%/html.
goto end
)
if "%1" == "dirhtml" (
%SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml.
goto end
)
if "%1" == "singlehtml" (
%SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml.
goto end
)
if "%1" == "pickle" (
%SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle
if errorlevel 1 exit /b 1
echo.
echo.Build finished; now you can process the pickle files.
goto end
)
if "%1" == "json" (
%SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json
if errorlevel 1 exit /b 1
echo.
echo.Build finished; now you can process the JSON files.
goto end
)
if "%1" == "htmlhelp" (
%SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp
if errorlevel 1 exit /b 1
echo.
echo.Build finished; now you can run HTML Help Workshop with the ^
.hhp project file in %BUILDDIR%/htmlhelp.
goto end
)
if "%1" == "qthelp" (
%SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp
if errorlevel 1 exit /b 1
echo.
echo.Build finished; now you can run "qcollectiongenerator" with the ^
.qhcp project file in %BUILDDIR%/qthelp, like this:
echo.^> qcollectiongenerator %BUILDDIR%\qthelp\Format-TransformingEncryptionfteproxy.qhcp
echo.To view the help file:
echo.^> assistant -collectionFile %BUILDDIR%\qthelp\Format-TransformingEncryptionfteproxy.ghc
goto end
)
if "%1" == "devhelp" (
%SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp
if errorlevel 1 exit /b 1
echo.
echo.Build finished.
goto end
)
if "%1" == "epub" (
%SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The epub file is in %BUILDDIR%/epub.
goto end
)
if "%1" == "latex" (
%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
if errorlevel 1 exit /b 1
echo.
echo.Build finished; the LaTeX files are in %BUILDDIR%/latex.
goto end
)
if "%1" == "text" (
%SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The text files are in %BUILDDIR%/text.
goto end
)
if "%1" == "man" (
%SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The manual pages are in %BUILDDIR%/man.
goto end
)
if "%1" == "texinfo" (
%SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo.
goto end
)
if "%1" == "gettext" (
%SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The message catalogs are in %BUILDDIR%/locale.
goto end
)
if "%1" == "changes" (
%SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes
if errorlevel 1 exit /b 1
echo.
echo.The overview file is in %BUILDDIR%/changes.
goto end
)
if "%1" == "linkcheck" (
%SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck
if errorlevel 1 exit /b 1
echo.
echo.Link check complete; look for any errors in the above output ^
or in %BUILDDIR%/linkcheck/output.txt.
goto end
)
if "%1" == "doctest" (
%SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest
if errorlevel 1 exit /b 1
echo.
echo.Testing of doctests in the sources finished, look at the ^
results in %BUILDDIR%/doctest/output.txt.
goto end
)
:end
| {
"pile_set_name": "Github"
} |
/*
* DMA buffer calls
*/
int DMAbuf_open(int dev, int mode);
int DMAbuf_release(int dev, int mode);
int DMAbuf_getwrbuffer(int dev, char **buf, int *size, int dontblock);
int DMAbuf_getrdbuffer(int dev, char **buf, int *len, int dontblock);
int DMAbuf_rmchars(int dev, int buff_no, int c);
int DMAbuf_start_output(int dev, int buff_no, int l);
int DMAbuf_move_wrpointer(int dev, int l);
/* int DMAbuf_ioctl(int dev, unsigned int cmd, void __user *arg, int local); */
void DMAbuf_init(int dev, int dma1, int dma2);
void DMAbuf_deinit(int dev);
int DMAbuf_start_dma (int dev, unsigned long physaddr, int count, int dma_mode);
void DMAbuf_inputintr(int dev);
void DMAbuf_outputintr(int dev, int underflow_flag);
struct dma_buffparms;
int DMAbuf_space_in_queue (int dev);
int DMAbuf_activate_recording (int dev, struct dma_buffparms *dmap);
int DMAbuf_get_buffer_pointer (int dev, struct dma_buffparms *dmap, int direction);
void DMAbuf_launch_output(int dev, struct dma_buffparms *dmap);
unsigned int DMAbuf_poll(struct file *file, int dev, poll_table *wait);
void DMAbuf_start_devices(unsigned int devmask);
void DMAbuf_reset (int dev);
int DMAbuf_sync (int dev);
/*
* System calls for /dev/dsp and /dev/audio (audio.c)
*/
int audio_read (int dev, struct file *file, char __user *buf, int count);
int audio_write (int dev, struct file *file, const char __user *buf, int count);
int audio_open (int dev, struct file *file);
void audio_release (int dev, struct file *file);
int audio_ioctl (int dev, struct file *file,
unsigned int cmd, void __user *arg);
void audio_init_devices (void);
void reorganize_buffers (int dev, struct dma_buffparms *dmap, int recording);
/*
* System calls for the /dev/sequencer
*/
int sequencer_read (int dev, struct file *file, char __user *buf, int count);
int sequencer_write (int dev, struct file *file, const char __user *buf, int count);
int sequencer_open (int dev, struct file *file);
void sequencer_release (int dev, struct file *file);
int sequencer_ioctl (int dev, struct file *file, unsigned int cmd, void __user *arg);
unsigned int sequencer_poll(int dev, struct file *file, poll_table * wait);
void sequencer_init (void);
void sequencer_unload (void);
void sequencer_timer(unsigned long dummy);
int note_to_freq(int note_num);
unsigned long compute_finetune(unsigned long base_freq, int bend, int range,
int vibrato_bend);
void seq_input_event(unsigned char *event, int len);
void seq_copy_to_input (unsigned char *event, int len);
/*
* System calls for the /dev/midi
*/
int MIDIbuf_read (int dev, struct file *file, char __user *buf, int count);
int MIDIbuf_write (int dev, struct file *file, const char __user *buf, int count);
int MIDIbuf_open (int dev, struct file *file);
void MIDIbuf_release (int dev, struct file *file);
int MIDIbuf_ioctl (int dev, struct file *file, unsigned int cmd, void __user *arg);
unsigned int MIDIbuf_poll(int dev, struct file *file, poll_table * wait);
int MIDIbuf_avail(int dev);
void MIDIbuf_bytes_received(int dev, unsigned char *buf, int count);
/* From soundcard.c */
void request_sound_timer (int count);
void sound_stop_timer(void);
void conf_printf(char *name, struct address_info *hw_config);
void conf_printf2(char *name, int base, int irq, int dma, int dma2);
/* From sound_timer.c */
void sound_timer_interrupt(void);
void sound_timer_syncinterval(unsigned int new_usecs);
/* From midi_synth.c */
void do_midi_msg (int synthno, unsigned char *msg, int mlen);
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="UTF-8" ?>
<definitions id="definitions"
targetNamespace="http://camunda.org/schema/1.0/bpmn20"
xmlns="http://www.omg.org/spec/BPMN/20100524/MODEL"
xmlns:camunda="http://camunda.org/schema/1.0/bpmn">
<process id="FormsProcess" isExecutable="true">
<startEvent id="start" camunda:formKey="org/camunda/bpm/engine/test/api/form/start.form" />
<sequenceFlow id="flow1" sourceRef="start" targetRef="task" />
<userTask id="task"
camunda:formKey="org/camunda/bpm/engine/test/api/form/task.form"
camunda:assignee="kermit" />
<sequenceFlow id="flow2" sourceRef="task" targetRef="wait" />
<receiveTask id="wait" />
</process>
</definitions>
| {
"pile_set_name": "Github"
} |
## Batch file to undeploy helloworld-mdb that was used for the messaging-clustering quickstart.
undeploy helloworld-mdb.war
| {
"pile_set_name": "Github"
} |
{-# LANGUAGE MagicHash #-}
-----------------------------------------------------------------------------
--
-- GHCi Interactive debugging commands
--
-- Pepe Iborra (supported by Google SoC) 2006
--
-- ToDo: lots of violation of layering here. This module should
-- decide whether it is above the GHC API (import GHC and nothing
-- else) or below it.
--
-----------------------------------------------------------------------------
module GHC.Runtime.Debugger (pprintClosureCommand, showTerm, pprTypeAndContents) where
import GHC.Prelude
import GHC.Runtime.Linker
import GHC.Runtime.Heap.Inspect
import GHC.Runtime.Interpreter
import GHCi.RemoteTypes
import GHC.Driver.Monad
import GHC.Driver.Types
import GHC.Types.Id
import GHC.Iface.Syntax ( showToHeader )
import GHC.Iface.Env ( newInteractiveBinder )
import GHC.Types.Name
import GHC.Types.Var hiding ( varName )
import GHC.Types.Var.Set
import GHC.Types.Unique.Set
import GHC.Core.Type
import GHC
import GHC.Utils.Outputable
import GHC.Core.Ppr.TyThing
import GHC.Utils.Error
import GHC.Utils.Monad
import GHC.Driver.Session
import GHC.Driver.Ppr
import GHC.Utils.Exception
import Control.Monad
import Control.Monad.Catch as MC
import Data.List ( (\\) )
import Data.Maybe
import Data.IORef
-------------------------------------
-- | The :print & friends commands
-------------------------------------
pprintClosureCommand :: GhcMonad m => Bool -> Bool -> String -> m ()
pprintClosureCommand bindThings force str = do
tythings <- (catMaybes . concat) `liftM`
mapM (\w -> GHC.parseName w >>=
mapM GHC.lookupName)
(words str)
let ids = [id | AnId id <- tythings]
-- Obtain the terms and the recovered type information
(subst, terms) <- mapAccumLM go emptyTCvSubst ids
-- Apply the substitutions obtained after recovering the types
modifySession $ \hsc_env ->
hsc_env{hsc_IC = substInteractiveContext (hsc_IC hsc_env) subst}
-- Finally, print the Terms
unqual <- GHC.getPrintUnqual
docterms <- mapM showTerm terms
dflags <- getDynFlags
liftIO $ (printOutputForUser dflags unqual . vcat)
(zipWith (\id docterm -> ppr id <+> char '=' <+> docterm)
ids
docterms)
where
-- Do the obtainTerm--bindSuspensions-computeSubstitution dance
go :: GhcMonad m => TCvSubst -> Id -> m (TCvSubst, Term)
go subst id = do
let id' = updateIdTypeAndMult (substTy subst) id
id_ty' = idType id'
term_ <- GHC.obtainTermFromId maxBound force id'
term <- tidyTermTyVars term_
term' <- if bindThings
then bindSuspensions term
else return term
-- Before leaving, we compare the type obtained to see if it's more specific
-- Then, we extract a substitution,
-- mapping the old tyvars to the reconstructed types.
let reconstructed_type = termType term
hsc_env <- getSession
case (improveRTTIType hsc_env id_ty' reconstructed_type) of
Nothing -> return (subst, term')
Just subst' -> do { dflags <- GHC.getSessionDynFlags
; liftIO $
dumpIfSet_dyn dflags Opt_D_dump_rtti "RTTI"
FormatText
(fsep $ [text "RTTI Improvement for", ppr id,
text "old substitution:" , ppr subst,
text "new substitution:" , ppr subst'])
; return (subst `unionTCvSubst` subst', term')}
tidyTermTyVars :: GhcMonad m => Term -> m Term
tidyTermTyVars t =
withSession $ \hsc_env -> do
let env_tvs = tyThingsTyCoVars $ ic_tythings $ hsc_IC hsc_env
my_tvs = termTyCoVars t
tvs = env_tvs `minusVarSet` my_tvs
tyvarOccName = nameOccName . tyVarName
tidyEnv = (initTidyOccEnv (map tyvarOccName (nonDetEltsUniqSet tvs))
-- It's OK to use nonDetEltsUniqSet here because initTidyOccEnv
-- forgets the ordering immediately by creating an env
, getUniqSet $ env_tvs `intersectVarSet` my_tvs)
return $ mapTermType (snd . tidyOpenType tidyEnv) t
-- | Give names, and bind in the interactive environment, to all the suspensions
-- included (inductively) in a term
bindSuspensions :: GhcMonad m => Term -> m Term
bindSuspensions t = do
hsc_env <- getSession
inScope <- GHC.getBindings
let ictxt = hsc_IC hsc_env
prefix = "_t"
alreadyUsedNames = map (occNameString . nameOccName . getName) inScope
availNames = map ((prefix++) . show) [(1::Int)..] \\ alreadyUsedNames
availNames_var <- liftIO $ newIORef availNames
(t', stuff) <- liftIO $ foldTerm (nameSuspensionsAndGetInfos hsc_env availNames_var) t
let (names, tys, fhvs) = unzip3 stuff
let ids = [ mkVanillaGlobal name ty
| (name,ty) <- zip names tys]
new_ic = extendInteractiveContextWithIds ictxt ids
dl = hsc_dynLinker hsc_env
liftIO $ extendLinkEnv dl (zip names fhvs)
setSession hsc_env {hsc_IC = new_ic }
return t'
where
-- Processing suspensions. Give names and recopilate info
nameSuspensionsAndGetInfos :: HscEnv -> IORef [String]
-> TermFold (IO (Term, [(Name,Type,ForeignHValue)]))
nameSuspensionsAndGetInfos hsc_env freeNames = TermFold
{
fSuspension = doSuspension hsc_env freeNames
, fTerm = \ty dc v tt -> do
tt' <- sequence tt
let (terms,names) = unzip tt'
return (Term ty dc v terms, concat names)
, fPrim = \ty n ->return (Prim ty n,[])
, fNewtypeWrap =
\ty dc t -> do
(term, names) <- t
return (NewtypeWrap ty dc term, names)
, fRefWrap = \ty t -> do
(term, names) <- t
return (RefWrap ty term, names)
}
doSuspension hsc_env freeNames ct ty hval _name = do
name <- atomicModifyIORef' freeNames (\x->(tail x, head x))
n <- newGrimName hsc_env name
return (Suspension ct ty hval (Just n), [(n,ty,hval)])
-- A custom Term printer to enable the use of Show instances
showTerm :: GhcMonad m => Term -> m SDoc
showTerm term = do
dflags <- GHC.getSessionDynFlags
if gopt Opt_PrintEvldWithShow dflags
then cPprTerm (liftM2 (++) (\_y->[cPprShowable]) cPprTermBase) term
else cPprTerm cPprTermBase term
where
cPprShowable prec t@Term{ty=ty, val=fhv} =
if not (isFullyEvaluatedTerm t)
then return Nothing
else do
hsc_env <- getSession
dflags <- GHC.getSessionDynFlags
do
(new_env, bname) <- bindToFreshName hsc_env ty "showme"
setSession new_env
-- XXX: this tries to disable logging of errors
-- does this still do what it is intended to do
-- with the changed error handling and logging?
let noop_log _ _ _ _ _ = return ()
expr = "Prelude.return (Prelude.show " ++
showPpr dflags bname ++
") :: Prelude.IO Prelude.String"
dl = hsc_dynLinker hsc_env
GHC.setSessionDynFlags dflags{log_action=noop_log}
txt_ <- withExtendedLinkEnv dl
[(bname, fhv)]
(GHC.compileExprRemote expr)
let myprec = 10 -- application precedence. TODO Infix constructors
txt <- liftIO $ evalString hsc_env txt_
if not (null txt) then
return $ Just $ cparen (prec >= myprec && needsParens txt)
(text txt)
else return Nothing
`MC.finally` do
setSession hsc_env
GHC.setSessionDynFlags dflags
cPprShowable prec NewtypeWrap{ty=new_ty,wrapped_term=t} =
cPprShowable prec t{ty=new_ty}
cPprShowable _ _ = return Nothing
needsParens ('"':_) = False -- some simple heuristics to see whether parens
-- are redundant in an arbitrary Show output
needsParens ('(':_) = False
needsParens txt = ' ' `elem` txt
bindToFreshName hsc_env ty userName = do
name <- newGrimName hsc_env userName
let id = mkVanillaGlobal name ty
new_ic = extendInteractiveContextWithIds (hsc_IC hsc_env) [id]
return (hsc_env {hsc_IC = new_ic }, name)
-- Create new uniques and give them sequentially numbered names
newGrimName :: MonadIO m => HscEnv -> String -> m Name
newGrimName hsc_env userName
= liftIO (newInteractiveBinder hsc_env occ noSrcSpan)
where
occ = mkOccName varName userName
pprTypeAndContents :: GhcMonad m => Id -> m SDoc
pprTypeAndContents id = do
dflags <- GHC.getSessionDynFlags
let pcontents = gopt Opt_PrintBindContents dflags
pprdId = (pprTyThing showToHeader . AnId) id
if pcontents
then do
let depthBound = 100
-- If the value is an exception, make sure we catch it and
-- show the exception, rather than propagating the exception out.
e_term <- MC.try $ GHC.obtainTermFromId depthBound False id
docs_term <- case e_term of
Right term -> showTerm term
Left exn -> return (text "*** Exception:" <+>
text (show (exn :: SomeException)))
return $ pprdId <+> equals <+> docs_term
else return pprdId
| {
"pile_set_name": "Github"
} |
/**
* Copyright 2007-2016, Kaazing Corporation. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.kaazing.gateway.transport.http.connector.specification.rfc7230;
import static java.util.concurrent.TimeUnit.SECONDS;
import static org.junit.Assert.assertTrue;
import java.util.concurrent.CountDownLatch;
import org.apache.mina.core.future.ConnectFuture;
import org.apache.mina.core.service.IoHandler;
import org.apache.mina.core.session.IoSession;
import org.apache.mina.core.session.IoSessionInitializer;
import org.jmock.Expectations;
import org.jmock.api.Invocation;
import org.jmock.integration.junit4.JUnitRuleMockery;
import org.jmock.lib.action.CustomAction;
import org.jmock.lib.concurrent.Synchroniser;
import org.junit.Ignore;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.DisableOnDebug;
import org.junit.rules.RuleChain;
import org.junit.rules.TestRule;
import org.junit.rules.Timeout;
import org.kaazing.gateway.transport.http.HttpConnectSession;
import org.kaazing.gateway.transport.http.HttpConnectorRule;
import org.kaazing.gateway.transport.http.HttpHeaders;
import org.kaazing.gateway.transport.http.HttpMethod;
import org.kaazing.k3po.junit.annotation.Specification;
import org.kaazing.k3po.junit.rules.K3poRule;
import org.kaazing.mina.core.session.IoSessionEx;
import org.kaazing.test.util.ITUtil;
import org.kaazing.test.util.MethodExecutionTrace;
public class ConnectionManagementIT {
private final HttpConnectorRule connector = new HttpConnectorRule();
private JUnitRuleMockery context = new JUnitRuleMockery() {
{
setThreadingPolicy(new Synchroniser());
}
};
private final TestRule trace = new MethodExecutionTrace();
private TestRule contextRule = ITUtil.toTestRule(context);
private final K3poRule k3po = new K3poRule().setScriptRoot("org/kaazing/specification/http/rfc7230/connection.management");
private final TestRule timeoutRule = new DisableOnDebug(new Timeout(5, SECONDS));
@Rule
public TestRule chain = RuleChain.outerRule(trace).around(timeoutRule).around(contextRule).around(connector).around(k3po);
@Test
@Specification({"client.must.close.connection.after.request.with.connection.close/response"})
public void clientMustCloseConnectionAfterRequestWithConnectionClose() throws Exception {
final IoHandler handler = context.mock(IoHandler.class);
final CountDownLatch closed = new CountDownLatch(1);
connector.getConnectOptions().put("http.userAgentHeaderEnabled", Boolean.FALSE);
context.checking(new Expectations() {
{
oneOf(handler).sessionCreated(with(any(IoSessionEx.class)));
oneOf(handler).sessionOpened(with(any(IoSessionEx.class)));
oneOf(handler).sessionClosed(with(any(IoSessionEx.class)));
will(new CustomAction("Latch countdown") {
@Override
public Object invoke(Invocation invocation) throws Throwable {
closed.countDown();
return null;
}
});
}
});
connector.connect("http://localhost:8080/", handler, new ConnectSessionInitializerGetConnectionClose());
assertTrue(closed.await(2, SECONDS));
k3po.finish();
}
@Test
@Specification({"server.must.close.connection.after.response.with.connection.close/response"})
public void serverMustCloseConnectionAfterResponseWithConnectionClose() throws Exception {
final IoHandler handler = context.mock(IoHandler.class);
final CountDownLatch closed = new CountDownLatch(1);
connector.getConnectOptions().put("http.userAgentHeaderEnabled", Boolean.FALSE);
context.checking(new Expectations() {
{
oneOf(handler).sessionCreated(with(any(IoSessionEx.class)));
oneOf(handler).sessionOpened(with(any(IoSessionEx.class)));
oneOf(handler).sessionClosed(with(any(IoSessionEx.class)));
will(new CustomAction("Latch countdown") {
@Override
public Object invoke(Invocation invocation) throws Throwable {
closed.countDown();
return null;
}
});
}
});
connector.connect("http://localhost:8080/", handler, new ConnectSessionInitializerGetHost());
assertTrue(closed.await(2, SECONDS));
k3po.finish();
}
@Test
@Specification({"connections.should.persist.by.default/backend"})
public void connectionsShouldPersistByDefault() throws Exception {
final IoHandler handler = context.mock(IoHandler.class);
final CountDownLatch closed = new CountDownLatch(1);
context.checking(new Expectations() {
{
oneOf(handler).sessionCreated(with(any(IoSessionEx.class)));
oneOf(handler).sessionOpened(with(any(IoSessionEx.class)));
oneOf(handler).sessionClosed(with(any(IoSessionEx.class)));
will(new CustomAction("Latch countdown") {
@Override
public Object invoke(Invocation invocation) throws Throwable {
closed.countDown();
return null;
}
});
}
});
connector.connect("http://localhost:8080/", handler, new ConnectSessionInitializerGet());
closed.await(2, SECONDS);
final CountDownLatch closed2 = new CountDownLatch(1);
context.checking(new Expectations() {
{
oneOf(handler).sessionCreated(with(any(IoSessionEx.class)));
oneOf(handler).sessionOpened(with(any(IoSessionEx.class)));
oneOf(handler).sessionClosed(with(any(IoSessionEx.class)));
will(new CustomAction("Latch countdown") {
@Override
public Object invoke(Invocation invocation) throws Throwable {
closed2.countDown();
return null;
}
});
}
});
connector.connect("http://localhost:8080/", handler, new ConnectSessionInitializerGet());
closed2.await(2, SECONDS);
final CountDownLatch closed3 = new CountDownLatch(1);
context.checking(new Expectations() {
{
oneOf(handler).sessionCreated(with(any(IoSessionEx.class)));
oneOf(handler).sessionOpened(with(any(IoSessionEx.class)));
oneOf(handler).sessionClosed(with(any(IoSessionEx.class)));
will(new CustomAction("Latch countdown") {
@Override
public Object invoke(Invocation invocation) throws Throwable {
closed3.countDown();
return null;
}
});
}
});
connector.connect("http://localhost:8080/", handler, new ConnectSessionInitializerGet());
closed3.await(2, SECONDS);
k3po.finish();
}
@Test
@Specification({"server.must.close.its.half.of.connection.after.sending.response.if.it.receives.a.close/response"})
public void serverMustCloseItsHalfOfConnectionAfterSendingResponseIfItReceivesAClose() throws Exception {
final IoHandler handler = context.mock(IoHandler.class);
final CountDownLatch closed = new CountDownLatch(1);
context.checking(new Expectations() {
{
oneOf(handler).sessionCreated(with(any(IoSessionEx.class)));
oneOf(handler).sessionOpened(with(any(IoSessionEx.class)));
oneOf(handler).sessionClosed(with(any(IoSessionEx.class)));
will(new CustomAction("Latch countdown") {
@Override
public Object invoke(Invocation invocation) throws Throwable {
closed.countDown();
return null;
}
});
}
});
connector.connect("http://localhost:8080/path", handler, new ConnectSessionInitializerGetConnection());
assertTrue(closed.await(2, SECONDS));
k3po.finish();
}
@Ignore("Issue: https://github.com/kaazing/gateway/issues/678")
@Test
@Specification({"client.must.not.reuse.tcp.connection.when.receives.connection.close/response"})
public void clientMustNotReuseTcpConnectionWhenReceivesConnectionClose() throws Exception {
final IoHandler handler = context.mock(IoHandler.class);
final CountDownLatch closed = new CountDownLatch(1);
context.checking(new Expectations() {
{
allowing(handler).sessionCreated(with(any(IoSessionEx.class)));
allowing(handler).sessionOpened(with(any(IoSessionEx.class)));
allowing(handler).sessionClosed(with(any(IoSessionEx.class)));
will(new CustomAction("Latch countdown") {
@Override
public Object invoke(Invocation invocation) throws Throwable {
closed.countDown();
return null;
}
});
}
});
connector.connect("http://localhost:8080/path", handler, new ConnectSessionInitializerGetConnectionLength1());
closed.await(2, SECONDS);
final CountDownLatch closed2 = new CountDownLatch(1);
context.checking(new Expectations() {
{
oneOf(handler).sessionCreated(with(any(IoSessionEx.class)));
oneOf(handler).sessionOpened(with(any(IoSessionEx.class)));
oneOf(handler).sessionClosed(with(any(IoSessionEx.class)));
will(new CustomAction("Latch countdown") {
@Override
public Object invoke(Invocation invocation) throws Throwable {
closed2.countDown();
return null;
}
});
}
});
connector.connect("http://localhost:8080/path2", handler, new ConnectSessionInitializerGetConnectionLength2());
closed2.await(2, SECONDS);
k3po.finish();
}
@Test
@Specification({"server.getting.upgrade.request.must.respond.with.upgrade.header/response"})
public void serverGettingUpgradeRequestMustRespondWithUpgradeHeader() throws Exception {
final IoHandler handler = context.mock(IoHandler.class);
final CountDownLatch closed = new CountDownLatch(1);
context.checking(new Expectations() {
{
oneOf(handler).sessionCreated(with(any(IoSessionEx.class)));
oneOf(handler).sessionOpened(with(any(IoSessionEx.class)));
oneOf(handler).sessionClosed(with(any(IoSessionEx.class)));
will(new CustomAction("Latch countdown") {
@Override
public Object invoke(Invocation invocation) throws Throwable {
closed.countDown();
return null;
}
});
}
});
connector.connect("http://localhost:8080/", handler, new ConnectSessionInitializerGetUpgrade());
assertTrue(closed.await(2, SECONDS));
k3po.finish();
}
@Test
@Specification({"server.that.sends.upgrade.required.must.include.upgrade.header/response"})
public void serverThatSendsUpgradeRequiredMustIncludeUpgradeHeader() throws Exception {
final IoHandler handler = context.mock(IoHandler.class);
final CountDownLatch closed = new CountDownLatch(1);
context.checking(new Expectations() {
{
oneOf(handler).sessionCreated(with(any(IoSessionEx.class)));
oneOf(handler).sessionOpened(with(any(IoSessionEx.class)));
will(new CustomAction("Latch countdown") {
@Override
public Object invoke(Invocation invocation) throws Throwable {
closed.countDown();
return null;
}
});
}
});
connector.connect("http://localhost:8080/", handler, new ConnectSessionInitializerGetClose());
closed.await(2, SECONDS);
k3po.finish();
}
@Test
@Specification({"server.that.is.upgrading.must.send.a.101.response/response"})
public void serverThatIsUpgradingMustSendA100Response() throws Exception {
final IoHandler handler = context.mock(IoHandler.class);
final CountDownLatch closed = new CountDownLatch(1);
context.checking(new Expectations() {
{
oneOf(handler).sessionCreated(with(any(IoSessionEx.class)));
oneOf(handler).sessionOpened(with(any(IoSessionEx.class)));
oneOf(handler).sessionClosed(with(any(IoSessionEx.class)));
will(new CustomAction("Latch countdown") {
@Override
public Object invoke(Invocation invocation) throws Throwable {
closed.countDown();
return null;
}
});
}
});
connector.connect("http://localhost:8080/", handler, new ConnectSessionInitializerGetUpgradeCap());
assertTrue(closed.await(2, SECONDS));
k3po.finish();
}
private static class ConnectSessionInitializerGet implements IoSessionInitializer<ConnectFuture> {
@Override
public void initializeSession(IoSession session, ConnectFuture future) {
HttpConnectSession connectSession = (HttpConnectSession) session;
connectSession.setMethod(HttpMethod.GET);
}
}
private static class ConnectSessionInitializerGetClose implements IoSessionInitializer<ConnectFuture> {
@Override
public void initializeSession(IoSession session, ConnectFuture future) {
HttpConnectSession connectSession = (HttpConnectSession) session;
connectSession.setMethod(HttpMethod.GET);
connectSession.close(false);
}
}
private static class ConnectSessionInitializerGetConnectionClose implements IoSessionInitializer<ConnectFuture> {
@Override
public void initializeSession(IoSession session, ConnectFuture future) {
HttpConnectSession connectSession = (HttpConnectSession) session;
connectSession.setMethod(HttpMethod.GET);
connectSession.addWriteHeader(HttpHeaders.HEADER_HOST, "localhost:8080");
connectSession.addWriteHeader(HttpHeaders.HEADER_CONNECTION, "close");
}
}
private static class ConnectSessionInitializerGetHost implements IoSessionInitializer<ConnectFuture> {
@Override
public void initializeSession(IoSession session, ConnectFuture future) {
HttpConnectSession connectSession = (HttpConnectSession) session;
connectSession.setMethod(HttpMethod.GET);
connectSession.addWriteHeader(HttpHeaders.HEADER_HOST, "localhost:8080");
}
}
private static class ConnectSessionInitializerGetConnection implements IoSessionInitializer<ConnectFuture> {
@Override
public void initializeSession(IoSession session, ConnectFuture future) {
HttpConnectSession connectSession = (HttpConnectSession) session;
connectSession.setMethod(HttpMethod.GET);
connectSession.addWriteHeader(HttpHeaders.HEADER_CONNECTION, "close");
}
}
private static class ConnectSessionInitializerGetConnectionLength1 implements IoSessionInitializer<ConnectFuture> {
@Override
public void initializeSession(IoSession session, ConnectFuture future) {
HttpConnectSession connectSession = (HttpConnectSession) session;
connectSession.setMethod(HttpMethod.GET);
connectSession.addWriteHeader(HttpHeaders.HEADER_CONNECTION, "close");
connectSession.addWriteHeader(HttpHeaders.HEADER_CONTENT_LENGTH, String.valueOf(0));
}
}
private static class ConnectSessionInitializerGetConnectionLength2 implements IoSessionInitializer<ConnectFuture> {
@Override
public void initializeSession(IoSession session, ConnectFuture future) {
HttpConnectSession connectSession = (HttpConnectSession) session;
connectSession.setMethod(HttpMethod.GET);
connectSession.addWriteHeader(HttpHeaders.HEADER_CONNECTION, "close");
connectSession.addWriteHeader(HttpHeaders.HEADER_CONTENT_LENGTH, String.valueOf(0));
connectSession.close(false);
}
}
private static class ConnectSessionInitializerGetUpgrade implements IoSessionInitializer<ConnectFuture> {
@Override
public void initializeSession(IoSession session, ConnectFuture future) {
HttpConnectSession connectSession = (HttpConnectSession) session;
connectSession.setMethod(HttpMethod.GET);
connectSession.addWriteHeader(HttpHeaders.HEADER_UPGRADE, "WebSocket");
connectSession.addWriteHeader(HttpHeaders.HEADER_CONNECTION, "upgrade");
}
}
private static class ConnectSessionInitializerGetUpgradeCap implements IoSessionInitializer<ConnectFuture> {
@Override
public void initializeSession(IoSession session, ConnectFuture future) {
HttpConnectSession connectSession = (HttpConnectSession) session;
connectSession.setMethod(HttpMethod.GET);
connectSession.addWriteHeader(HttpHeaders.HEADER_UPGRADE, "WebSocket");
connectSession.addWriteHeader(HttpHeaders.HEADER_CONNECTION, "Upgrade");
}
}
}
| {
"pile_set_name": "Github"
} |
[Rank]
Die secunda in Octava Nativitatis S. Joannis Baptistæ;;Semiduplex;;2;;ex Sancti/06-24
[Rule]
ex Sancti/06-24;
Gloria
[Oratio]
Ô Dieu, qui nous avez rendu ce jour vénérable par la nativité du bienheureux Jean : accordez à votre peuple la grâce des joies spirituelles ; et dirigez les âmes de tous les fidèles dans la voie du salut éternel.
$Per Dominum
[Secreta]
Seigneur, nous accumulons les dons sur vos autels : célébrant avec l’honneur qui lui est dû, la nativité de celui qui a rendu hommage au Sauveur du monde, avant sa venue, et qui l’a désigné ensuite comme présent, en la personne de notre Seigneur Jésus-Christ, votre Fils.
$Qui tecum
[Postcommunio]
Que votre Église, Seigneur, trouve un sujet de joie en la naissance du bienheureux Jean-Baptiste, par qui elle a reconnu l’auteur de sa régénération, Notre-Seigneur Jésus-Christ, votre Fils.
$Qui tecum
| {
"pile_set_name": "Github"
} |
<!DOCTYPE HTML>
<!-- NewPage -->
<html lang="en">
<head>
<!-- Generated by javadoc -->
<title>Uses of Class javax.swing.plaf.basic.BasicSliderUI.TrackListener (Java SE 12 & JDK 12 )</title>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
<link rel="stylesheet" type="text/css" href="../../../../../../stylesheet.css" title="Style">
<link rel="stylesheet" type="text/css" href="../../../../../../jquery/jquery-ui.css" title="Style">
<script type="text/javascript" src="../../../../../../script.js"></script>
<script type="text/javascript" src="../../../../../../jquery/jszip/dist/jszip.min.js"></script>
<script type="text/javascript" src="../../../../../../jquery/jszip-utils/dist/jszip-utils.min.js"></script>
<!--[if IE]>
<script type="text/javascript" src="../../../../../../jquery/jszip-utils/dist/jszip-utils-ie.min.js"></script>
<![endif]-->
<script type="text/javascript" src="../../../../../../jquery/jquery-3.3.1.js"></script>
<script type="text/javascript" src="../../../../../../jquery/jquery-migrate-3.0.1.js"></script>
<script type="text/javascript" src="../../../../../../jquery/jquery-ui.js"></script>
</head>
<body>
<script type="text/javascript"><!--
try {
if (location.href.indexOf('is-external=true') == -1) {
parent.document.title="Uses of Class javax.swing.plaf.basic.BasicSliderUI.TrackListener (Java SE 12 & JDK 12 )";
}
}
catch(err) {
}
//-->
var pathtoroot = "../../../../../../";
var useModuleDirectories = true;
loadScripts(document, 'script');</script>
<noscript>
<div>JavaScript is disabled on your browser.</div>
</noscript>
<header role="banner">
<nav role="navigation">
<div class="fixedNav">
<!-- ========= START OF TOP NAVBAR ======= -->
<div class="topNav"><a id="navbar.top">
<!-- -->
</a>
<div class="skipNav"><a href="#skip.navbar.top" title="Skip navigation links">Skip navigation links</a></div>
<a id="navbar.top.firstrow">
<!-- -->
</a>
<ul class="navList" title="Navigation">
<li><a href="../../../../../../index.html">Overview</a></li>
<li><a href="../../../../../module-summary.html">Module</a></li>
<li><a href="../package-summary.html">Package</a></li>
<li><a href="../BasicSliderUI.TrackListener.html" title="class in javax.swing.plaf.basic">Class</a></li>
<li class="navBarCell1Rev">Use</li>
<li><a href="../package-tree.html">Tree</a></li>
<li><a href="../../../../../../deprecated-list.html">Deprecated</a></li>
<li><a href="../../../../../../index-files/index-1.html">Index</a></li>
<li><a href="../../../../../../help-doc.html">Help</a></li>
</ul>
<div class="aboutLanguage"><div style="margin-top: 14px;"><strong>Java SE 12 & JDK 12</strong> </div></div>
</div>
<div class="subNav">
<ul class="navListSearch">
<li><label for="search">SEARCH:</label>
<input type="text" id="search" value="search" disabled="disabled">
<input type="reset" id="reset" value="reset" disabled="disabled">
</li>
</ul>
</div>
<a id="skip.navbar.top">
<!-- -->
</a>
<!-- ========= END OF TOP NAVBAR ========= -->
</div>
<div class="navPadding"> </div>
<script type="text/javascript"><!--
$('.navPadding').css('padding-top', $('.fixedNav').css("height"));
//-->
</script>
</nav>
</header>
<main role="main">
<div class="header">
<h2 title="Uses of Class javax.swing.plaf.basic.BasicSliderUI.TrackListener" class="title">Uses of Class<br>javax.swing.plaf.basic.BasicSliderUI.TrackListener</h2>
</div>
<div class="classUseContainer">
<ul class="blockList">
<li class="blockList">
<div class="useSummary">
<table>
<caption><span>Packages that use <a href="../BasicSliderUI.TrackListener.html" title="class in javax.swing.plaf.basic">BasicSliderUI.TrackListener</a></span><span class="tabEnd"> </span></caption>
<tr>
<th class="colFirst" scope="col">Package</th>
<th class="colLast" scope="col">Description</th>
</tr>
<tbody>
<tr class="altColor">
<th class="colFirst" scope="row"><a href="#javax.swing.plaf.basic">javax.swing.plaf.basic</a></th>
<td class="colLast">
<div class="block">Provides user interface objects built according to the Basic look and feel.</div>
</td>
</tr>
</tbody>
</table>
</div>
</li>
<li class="blockList">
<ul class="blockList">
<li class="blockList">
<section role="region"><a id="javax.swing.plaf.basic">
<!-- -->
</a>
<h3>Uses of <a href="../BasicSliderUI.TrackListener.html" title="class in javax.swing.plaf.basic">BasicSliderUI.TrackListener</a> in <a href="../package-summary.html">javax.swing.plaf.basic</a></h3>
<div class="useSummary">
<table>
<caption><span>Fields in <a href="../package-summary.html">javax.swing.plaf.basic</a> declared as <a href="../BasicSliderUI.TrackListener.html" title="class in javax.swing.plaf.basic">BasicSliderUI.TrackListener</a></span><span class="tabEnd"> </span></caption>
<tr>
<th class="colFirst" scope="col">Modifier and Type</th>
<th class="colSecond" scope="col">Field</th>
<th class="colLast" scope="col">Description</th>
</tr>
<tbody>
<tr class="altColor">
<td class="colFirst"><code>protected <a href="../BasicSliderUI.TrackListener.html" title="class in javax.swing.plaf.basic">BasicSliderUI.TrackListener</a></code></td>
<th class="colSecond" scope="row"><span class="typeNameLabel">BasicSliderUI.</span><code><span class="memberNameLink"><a href="../BasicSliderUI.html#trackListener">trackListener</a></span></code></th>
<td class="colLast">
<div class="block">Track listener</div>
</td>
</tr>
</tbody>
</table>
</div>
<div class="useSummary">
<table>
<caption><span>Methods in <a href="../package-summary.html">javax.swing.plaf.basic</a> that return <a href="../BasicSliderUI.TrackListener.html" title="class in javax.swing.plaf.basic">BasicSliderUI.TrackListener</a></span><span class="tabEnd"> </span></caption>
<tr>
<th class="colFirst" scope="col">Modifier and Type</th>
<th class="colSecond" scope="col">Method</th>
<th class="colLast" scope="col">Description</th>
</tr>
<tbody>
<tr class="altColor">
<td class="colFirst"><code>protected <a href="../BasicSliderUI.TrackListener.html" title="class in javax.swing.plaf.basic">BasicSliderUI.TrackListener</a></code></td>
<th class="colSecond" scope="row"><span class="typeNameLabel">BasicSliderUI.</span><code><span class="memberNameLink"><a href="../BasicSliderUI.html#createTrackListener(javax.swing.JSlider)">createTrackListener</a></span>​(<a href="../../../JSlider.html" title="class in javax.swing">JSlider</a> slider)</code></th>
<td class="colLast">
<div class="block">Creates a track listener.</div>
</td>
</tr>
</tbody>
</table>
</div>
</section>
</li>
</ul>
</li>
</ul>
</div>
</main>
<footer role="contentinfo">
<nav role="navigation">
<!-- ======= START OF BOTTOM NAVBAR ====== -->
<div class="bottomNav"><a id="navbar.bottom">
<!-- -->
</a>
<div class="skipNav"><a href="#skip.navbar.bottom" title="Skip navigation links">Skip navigation links</a></div>
<a id="navbar.bottom.firstrow">
<!-- -->
</a>
<ul class="navList" title="Navigation">
<li><a href="../../../../../../index.html">Overview</a></li>
<li><a href="../../../../../module-summary.html">Module</a></li>
<li><a href="../package-summary.html">Package</a></li>
<li><a href="../BasicSliderUI.TrackListener.html" title="class in javax.swing.plaf.basic">Class</a></li>
<li class="navBarCell1Rev">Use</li>
<li><a href="../package-tree.html">Tree</a></li>
<li><a href="../../../../../../deprecated-list.html">Deprecated</a></li>
<li><a href="../../../../../../index-files/index-1.html">Index</a></li>
<li><a href="../../../../../../help-doc.html">Help</a></li>
</ul>
<div class="aboutLanguage"><div style="margin-top: 14px;"><strong>Java SE 12 & JDK 12</strong> </div></div>
</div>
<a id="skip.navbar.bottom">
<!-- -->
</a>
<!-- ======== END OF BOTTOM NAVBAR ======= -->
</nav>
<p class="legalCopy"><small><a href="https://bugreport.java.com/bugreport/">Report a bug or suggest an enhancement</a><br> For further API reference and developer documentation see the <a href="https://docs.oracle.com/pls/topic/lookup?ctx=javase12.0.2&id=homepage" target="_blank">Java SE Documentation</a>, which contains more detailed, developer-targeted descriptions with conceptual overviews, definitions of terms, workarounds, and working code examples.<br> Java is a trademark or registered trademark of Oracle and/or its affiliates in the US and other countries.<br> <a href="../../../../../../../legal/copyright.html">Copyright</a> © 1993, 2019, Oracle and/or its affiliates, 500 Oracle Parkway, Redwood Shores, CA 94065 USA.<br>All rights reserved. Use is subject to <a href="https://www.oracle.com/technetwork/java/javase/terms/license/java12.0.2speclicense.html">license terms</a> and the <a href="https://www.oracle.com/technetwork/java/redist-137594.html">documentation redistribution policy</a>. <!-- Version 12.0.2+10 --></small></p>
</footer>
</body>
</html>
| {
"pile_set_name": "Github"
} |
import { TimePickerBase, getValidTime, timeProperty, hourProperty, minuteProperty } from './time-picker-common';
export * from './time-picker-common';
interface TimeChangedListener {
new (owner: TimePicker): android.widget.TimePicker.OnTimeChangedListener;
}
let TimeChangedListener: TimeChangedListener;
function initializeTimeChangedListener(): void {
if (TimeChangedListener) {
return;
}
apiLevel = android.os.Build.VERSION.SDK_INT;
@NativeClass
@Interfaces([android.widget.TimePicker.OnTimeChangedListener])
class TimeChangedListenerImpl extends java.lang.Object implements android.widget.TimePicker.OnTimeChangedListener {
constructor(public owner: TimePicker) {
super();
return global.__native(this);
}
onTimeChanged(picker: android.widget.TimePicker, hour: number, minute: number): void {
const timePicker = this.owner;
if (timePicker.updatingNativeValue) {
return;
}
const validTime = getValidTime(timePicker, hour, minute);
timeProperty.nativeValueChange(timePicker, new Date(0, 0, 0, validTime.hour, validTime.minute));
}
}
TimeChangedListener = TimeChangedListenerImpl;
}
let apiLevel: number;
export class TimePicker extends TimePickerBase {
nativeViewProtected: android.widget.TimePicker;
updatingNativeValue: boolean;
public createNativeView() {
return new android.widget.TimePicker(this._context);
}
public initNativeView(): void {
super.initNativeView();
const nativeView = this.nativeViewProtected;
initializeTimeChangedListener();
const listener = new TimeChangedListener(this);
nativeView.setOnTimeChangedListener(listener);
(<any>nativeView).listener = listener;
const calendar = ((<any>nativeView).calendar = java.util.Calendar.getInstance());
const hour = hourProperty.isSet(this) ? this.hour : calendar.get(java.util.Calendar.HOUR_OF_DAY);
const minute = minuteProperty.isSet(this) ? this.minute : calendar.get(java.util.Calendar.MINUTE);
const validTime = getValidTime(this, hour, minute);
if (!timeProperty.isSet(this)) {
this.time = new Date(0, 0, 0, validTime.hour, validTime.minute);
}
}
[minuteProperty.setNative](value: number) {
this.updatingNativeValue = true;
try {
if (apiLevel >= 23) {
(<any>this.nativeViewProtected).setMinute(value);
} else {
this.nativeViewProtected.setCurrentMinute(new java.lang.Integer(value));
}
} finally {
this.updatingNativeValue = false;
}
}
[hourProperty.setNative](value: number) {
this.updatingNativeValue = true;
try {
if (apiLevel >= 23) {
(<any>this.nativeViewProtected).setHour(value);
} else {
this.nativeViewProtected.setCurrentHour(new java.lang.Integer(value));
}
} finally {
this.updatingNativeValue = false;
}
}
}
| {
"pile_set_name": "Github"
} |
<!-- Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. -->
<template>
<div class="rk-progress">
<div class="rk-progress-inner" :style="`width:${precent}%;background-color: ${color};`"></div>
</div>
</template>
<script lang="ts">
import Vue from 'vue';
import { Component, Prop } from 'vue-property-decorator';
@Component({})
export default class RkProgress extends Vue {
@Prop({
default: 0,
})
public precent!: number;
@Prop({
default: '#3fb1e3',
})
public color!: string;
}
</script>
<style lang="scss" scoped>
.rk-progress {
height: 4px;
border-radius: 2px;
background-color: rgba(196, 200, 225, 0.3);
}
.rk-progress-inner {
height: 100%;
border-radius: 2px;
}
</style>
| {
"pile_set_name": "Github"
} |
package com.mcxiaoke.koi
import android.util.Base64
import java.io.UnsupportedEncodingException
import java.security.MessageDigest
import java.security.NoSuchAlgorithmException
import java.security.SecureRandom
/**
* @author mcxiaoke
* *
* @version 1.0 2013.03.16
*/
private object Helper {
fun getRandomString(): String = SecureRandom().nextLong().toString()
fun getRandomBytes(size: Int): ByteArray {
val random = SecureRandom()
val bytes = ByteArray(size)
random.nextBytes(bytes)
return bytes
}
fun getRawBytes(text: String): ByteArray {
try {
return text.toByteArray(Charsets.UTF_8)
} catch (e: UnsupportedEncodingException) {
return text.toByteArray()
}
}
fun getString(data: ByteArray): String {
try {
return String(data, Charsets.UTF_8)
} catch (e: UnsupportedEncodingException) {
return String(data)
}
}
fun base64Decode(text: String): ByteArray {
return Base64.decode(text, Base64.NO_WRAP)
}
fun base64Encode(data: ByteArray): String {
return Base64.encodeToString(data, Base64.NO_WRAP)
}
}
object HASH {
private val MD5 = "MD5"
private val SHA_1 = "SHA-1"
private val SHA_256 = "SHA-256"
private val DIGITS_LOWER = charArrayOf('0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f')
private val DIGITS_UPPER = charArrayOf('0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F')
fun md5(data: ByteArray): String {
return String(encodeHex(md5Bytes(data)))
}
fun md5(text: String): String {
return String(encodeHex(md5Bytes(Helper.getRawBytes(text))))
}
fun md5Bytes(data: ByteArray): ByteArray {
return getDigest(MD5).digest(data)
}
fun sha1(data: ByteArray): String {
return String(encodeHex(sha1Bytes(data)))
}
fun sha1(text: String): String {
return String(encodeHex(sha1Bytes(Helper.getRawBytes(text))))
}
fun sha1Bytes(data: ByteArray): ByteArray {
return getDigest(SHA_1).digest(data)
}
fun sha256(data: ByteArray): String {
return String(encodeHex(sha256Bytes(data)))
}
fun sha256(text: String): String {
return String(encodeHex(sha256Bytes(Helper.getRawBytes(text))))
}
fun sha256Bytes(data: ByteArray): ByteArray {
return getDigest(SHA_256).digest(data)
}
fun getDigest(algorithm: String): MessageDigest {
try {
return MessageDigest.getInstance(algorithm)
} catch (e: NoSuchAlgorithmException) {
throw IllegalArgumentException(e)
}
}
fun encodeHex(data: ByteArray, toLowerCase: Boolean = true): CharArray {
return encodeHex(data, if (toLowerCase) DIGITS_LOWER else DIGITS_UPPER)
}
fun encodeHex(data: ByteArray, toDigits: CharArray): CharArray {
val l = data.size
val out = CharArray(l shl 1)
var i = 0
var j = 0
while (i < l) {
out[j++] = toDigits[(240 and data[i].toInt()).ushr(4)]
out[j++] = toDigits[15 and data[i].toInt()]
i++
}
return out
}
}
| {
"pile_set_name": "Github"
} |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.rocketmq.replicator.schema;
public enum FieldName {
COMMON_MESSAGE("MessageExt"),
OFFSET("Offset");
private String key;
FieldName(String key) {
this.key = key;
}
public String getKey() {
return key;
}
}
| {
"pile_set_name": "Github"
} |
# NOTE - this repo is no longer maintained
We'll keep this up as a reference for anyone who may need it - please send any comments to [email protected] going forward
Thanks!
# NStack: Composable, typed streams and microservices for data analytics
## Introduction

NStack is a compute platform that is ideal for data analytics because it makes integrating data, publishing code, and connecting it all together really simple.
<!-- Think of it like Bash-like, type-safe, piping for containerised microservices that live on your cloud. -->
1. You can turn disparate data-sources -- such as databases, 3rd-party APIs, or HTTP endpoints -- into streams of typed records.
2. You can publish local code as functions on your cloud provider. Streams can be composed and connected with these functions using NStack's scripting language, and NStack automates all underlying infrastructure so you can focus on data-science instead of operations.
3. NStack provides end-to-end software life-cycle management for the data science process, including sharing and reuse, reproducibility, versioning, and runtime isolation.
## Getting Started
See the [website](https://nstack.com) for more information, or check out the [full documentation](https://docs.nstack.com).
### Intro Screencast
<a href="http://docs.nstack.com/en/latest/quick_start/index.html" target="_blank"><img src="https://asciinema.org/a/112733.png" width="600"/></a>
NStack is comprised of a CLI which runs on your machine, and a virtual machine which runs on the cloud.
### CLI Installation
The NStack CLI is available as self-contained executable for Linux, Windows, and macOS - binaries can be downloaded on our [releases page](https://github.com/nstack/nstack/releases).
Simply download `nstack-cli-{linux64,win64,macOS}` for your platform, uncompress, and run `nstack` from the Terminal/Command Prompt.
#### macOS
In addition to standalone download on the [releases page](https://github.com/nstack/nstack/releases), we have a [homebrew](https://brew.sh/) package that can easily be installed as follows,
```bash
$ brew tap nstack/nstack
$ brew cask install nstack-cli
```
#### Linux
We also provide RPM and DEB packages on the [releases page](https://github.com/nstack/nstack/releases) that will work with most common distros and can be installed via your system package manager.
<!--
We also have `yum` and `apt` repositories for Redhat and Debian- derived OSs that are updated on each release.
##### RedHat / Fedora / OpenSuse RPMs
A YUM/DNF repo for RedHat-based distros is located at http://distrepos.nstack.com/redhat - it includes both the `nstack-cli` and `nstack-server` packages,
```bash
sudo wget -O /etc/yum.repos.d/nstack.repo http://distrepos.nstack.com/redhat/nstack.repo
sudo dnf install nstack-cli
```
##### Ubuntu / Debian / Mint Debs
An Apt repo for Debian-based distros is located at http://distrepos.nstack.com/debian - it currently includes the `nstack-cli` package,
```bash
sudo wget -O /etc/sources.list.d/nstack.list http://distrepos.nstack.com/debian/nstack.list
sudo apt-get update
sudo apt-get install nstack-cli
```
-->
##### RPM Install
```bash
# change {version} as needed
dnf install https://github.com/nstack/nstack/releases/v{version}/nstack-cli-{version}.x86_64.rpm
```
##### DEB Install
```bash
# change {version} as needed
wget https://github.com/nstack/nstack/releases/v{version}/nstack-cli_{version}.amd64.deb
dpkg -i nstack-cli_{version}.amd64.deb
apt-get install -f
```
#### Compiling from source
NStack is built using [Haskell](https://wwww.haskell.org), to compile the CLI manually you will need the [Stack](https://www.haskellstack.org/) build tool.
Once this is installed, run the following commands from the git project root,
```bash
# stack setup only needed on first compile
stack setup
stack build nstack-cli
# install to user's local executable directory
stack install nstack-cli
```
### Server Installation
#### PaaS
NStack offers a free-of-charge PaaS (Platform as a Service) for demo use, which means you can try NStack without installing the server. Note that the PaaS is intended as a sandbox and the environment is wiped daily at 0600 UTC.
You can register an account and immediately start using NStack using the following command:
``nstack register <username> <email>``
This will send you an email with your credentials and instructions on getting started.
#### Host your own NStack server
To install your own NStack server, we provide a self-contained appliance VM:
- an AMI for AWS EC2 (`ami-53a47245`)
- a `.raw` disk image for hosting on your virtual machine of choice
We also provide an RPM for installing directly on a Red Hat-like server. These are all available on the [releases page](https://github.com/nstack/nstack/releases).
<!--
We also provide an RPM and an associated Yum repository for installing directly on a Red Hat-like server
```bash
sudo wget -O /etc/yum.repos.d/nstack.repo http://distrepos.nstack.com/redhat/nstack.repo
sudo dnf install nstack-server
```
-->
## Examples
### Basic Example

We can express this within the NStack scripting language locally as follows (just think of it as Bash for microservices).
```fsharp
module Demo:0.1.0 {
import NStack.Transformers:0.1.4 as T
import Acme.Classifiers:0.3.0 as C
// our analytics workflow
def workflow = Sources.Postgresql<(Text, Int)>
| T.transform { strength = 5 }
| C.classify { model = "RandomForest" }
| Sinks.S3blob<Text>
}
```
We can then build, deploy, and start this workflow on an NStack Server from the NStack CLI on Linux, macOS, or Windows.
```bash
> nstack build
Building Demo:0.1.0
> nstack start Demo:0.1.0.workflow
Workflow started as process 3.
```
### More Examples
See https://github.com/nstack/nstack-examples for a range of examples, which you can clone and use immediately, including
* [demos](https://github.com/nstack/nstack-examples/tree/master/demos) - Basic examples that demonstrate creating methods and composing them together into workflows
* [nstack](https://github.com/nstack/nstack-examples/tree/master/nstack) - A selection of NStack utility modules, including methods for uploading to S3 and processing images
* [iris](https://github.com/nstack/nstack-examples/tree/master/iris) - A Python-based classifier using `scikit-leaarn` that showcases building more complex modules with system dependencies and in-built data-sets
* [movies](https://github.com/nstack/nstack-examples/tree/master/movies) - A complex workflow composed from multiple individual services that processes movies data from the IMDB database, demonstrating composition, filtering, service configuration, and partial workflow reuse
## License and Contributing
### License
The NStack CLI is open-source and licensed under the [BSD3 license](https://opensource.org/licenses/BSD-3-Clause).
The NStack Server is provided free-of-charge for personal, hobbyist, non-commercial, and evaluation use. It is currently closed-source, however we'd like to open more of it up over time.
### Contributing
This repo is currently a mirror of our internal work, as development happens fairly rapidly.
However we welcome and encourage both Issues and PRs (PRs will require a CLA - as described in [CONTRIBUTING.md](https://github.com/nstack/nstack/blob/master/CONTRIBUTING.md)).
We're also looking into removing our mirror and working directly on this public repo if possible.
## What do people use NStack for?
### Productionising models
Productionise your models in the cloud without complex engineering, where they can be used in workflows and attached to data-sources. For instance, you can build a Random Forest classifier locally in Python, publish it to your cloud provider, and connect it to a streaming system, database, data-lake, or HTTP endpoint in under 10 minutes.
### Data Integration
Transform disparate and disconnected data-sources -- such as 3rd-party APIs, legacy infrastructure, or databases -- into streams of typed, structured records, which can be composed together. For instance, you could set up a workflow in the cloud which pipes the Twitter Ads API into your data lake (and even do some modelling in Python in-transit) in under 5 minutes.
## Features
- **Typed** Strongly-type your infrastructure and microservices to make them composable and secure
- **Streaming** Move your batch workloads and existing code to a streaming paradigm, without complex infrastructure
- **Fast** Really fast throughout by using the latest developments in the Linux kernel
- **Serverless** Modules are deployed as serverless, containerised, versioned, fully reproducible microservices
- **Composable** Compose infrastructure in a statically typed workflow language to automate operations
## Concepts
#### Modules
A module is a piece of code that has been published to NStack -- for instance, a Python class. Modules are comprised of one or more **functions** -- in the same way a class of Python has one or more methods on it. Modules can have dependencies, like files or operating system packages -- for instance, your training data, or the ``scikit-learn`` package.
#### Functions
Functions are "serverless" functions which live on modules -- for instance, the `predict` method on your Python class. Functions on NStack are _typed_, which means you define what kind of data they can take as input, and the kind of data they output. For instance, you can say that your `predict` method only takes `Text` and returns `Integer`. This is important because it means they can be safely composed together and reused, with the NStack platform guaranteeing type safety.
#### Sources & Sinks
A source is something which emits a stream of data. A sink is something which can receive a stream of data. Examples sources and sinks are databases, files, message-queues, and HTTP endpoints. Like modules, you can define the input and output types for your sources and sinks.
#### Workflows
Modules, sources, and sinks can be combined -- or _composed_ -- together to build workflows. This is accomplished using the NStack Workflow Language, a simple bash-like scripting language for connecting streams and functions together.
#### Processes
When a workflow is started and is running in the cloud, it becomes a process.
| {
"pile_set_name": "Github"
} |
import copy
import io
import os
import onnx
import torch
from inspect import signature
import warnings
import onnxruntime as ort
from . import _utils, amp, checkpoint, optim, postprocess, ORTTrainerOptions
from .model_desc_validation import _ORTTrainerModelDesc
from onnxruntime.tools.symbolic_shape_infer import SymbolicShapeInference
class TrainStepInfo(object):
r"""Private class used to store runtime information from current train step.
After every train step, :py:meth:`ORTTrainer.train_step` updates the internal instance of
:py:class:`.TrainStepInfo` residing on :py:class:`.ORTTrainer` with relevant information
from the forward pass.
This class shouldn't be accessed directly by the user, unless they really know what they are doing.
Instead, :py:class:`.ORTTrainer` passes it to relevant class methods automatically,
such as :py:method:`._LRScheduler.get_lr` or :py:class:`.LossScaler.update`.
Args:
optimizer_config (optim._OptimizerConfig): reference to optimizer config
all_finite (bool, default is True): flag that indicates whether all gradients are still finite after last step
fetches (list of str, default is []): list of output names to fetch from train_step/eval_step. Set it to [] to reset normal behavior.
optimization_step (int): indicates the number of optimizations performed. Used for learning rate scheduling
step (int): indicates current training step. Used for gradient accumulation
Example:
.. code-block:: python
info = TrainStepInfo(optimizer_config=optim.SGDConfig(lr=0.01))
if info.all_finite:
print(f'Yay, all gradients are finite at {step} step!')
"""
def __init__(self, optimizer_config, all_finite=True, fetches=[], optimization_step=0, step=0):
assert isinstance(optimizer_config, optim._OptimizerConfig),\
"optimizer_config must be a optim._OptimizerConfig"
assert isinstance(all_finite, bool),\
"all_finite must be a bool"
assert isinstance(fetches, list) and all([isinstance(item, str) for item in fetches]),\
"fetches must be a list of str"
assert isinstance(optimization_step, int) and optimization_step >= 0,\
"optimization_step must be a positive int"
assert (isinstance(step, int) and step >= 0),\
"step must be a positive int"
self.optimizer_config = optimizer_config
self.all_finite = all_finite
self.fetches = fetches
self.optimization_step = optimization_step
self.step = step
class ORTTrainer(object):
r"""Pytorch frontend for ONNX Runtime training
Entry point that exposes the C++ backend of ORT as a Pytorch frontend.
Args:
model (torch.nn.Module or onnx.ModelProto): either a PyTorch or ONNX model.
When a PyTorch model and :py:attr:`loss_fn` are specified, :py:attr:`model` and :py:obj:`loss_fn` are combined.
When a ONNX model is provided, the loss is identified by the flag :py:obj:`is_loss=True` in one of the :py:attr:`.model_desc.outputs` entries.
model_desc (dict): model input and output description.
This is used to identify inputs and outputs and their shapes, so that ORT can generate back propagation graph, plan memory allocation for
training, and perform optimizations.
:py:attr:`model_desc` must be consistent with the training :py:attr:`model` and have the following (:py:obj:`dict`) schema
:py:obj:`{ 'inputs': [tuple(name, shape)], 'outputs': [tuple(name, shape, is_loss)]}`.
:py:attr:`name` is a string representing the name of input or output of the model.
For :py:obj:`model_desc['inputs']` entries, :py:attr:`name` must match input names of the original PyTorch model's :py:meth:`torch.nn.Module.forward` method.
For ONNX models, both name and order of input names must match.
For :py:obj:`model_desc['outputs']` entries, the order must match the original PyTorch's output as returned by :py:meth:`torch.nn.Module.forward` method.
For ONNX models, both name and order of output names must match.
:py:attr:`shape` is a list of string or integers that describes the shape of the input/output.
Each dimension size can be either a string or an int. String means the dimension size is dynamic, while integers mean static dimensions.
An empty list implies a scalar.
Lastly, :py:attr:`is_loss` is a boolean (default is False) that flags if this output is considered a loss.
ORT backend needs to know which output is loss in order to generate back propagation graph.
Loss output must be specified when either :py:attr:`loss_fn` is specified or when loss is embedded in the model.
Note that only one loss output is supported per model.
optimizer_config (optim._OptimizerConfig): optimizer config.
One of :py:class:`.optim.AdamConfig`, :py:class:`.optim.LambConfig` or :py:class:`.optim.SGDConfig`.
loss_fn (callable, default is None): a PyTorch loss function.
It takes two inputs [prediction, label] and outputs a scalar loss tensor.
If provided, :py:attr:`loss_fn` is combined with the PyTorch :py:attr:`model` to form a combined PyTorch model.
Inputs to the combined PyTorch model are concatenation of the :py:attr:`model`'s input and :py:attr:`loss_fn`'s label input.
Outputs of the combined PyTorch model are concatenation of :py:attr:`loss_fn`'s loss output and :py:attr:`model`'s outputs.
options (ORTTrainerOptions, default is None): options for additional features.
Example:
.. code-block:: python
model = ...
loss_fn = ...
model_desc = {
"inputs": [
("input_ids", ["batch", "max_seq_len_in_batch"]),
("attention_mask", ["batch", "max_seq_len_in_batch"]),
("token_type_ids", ["batch", "max_seq_len_in_batch"]),
("masked_lm_labels", ["batch", "max_seq_len_in_batch"]),
("next_sentence_label", ["batch", 1])
],
"outputs": [
("loss", [], True),
],
}
optim_config = optim.LambConfig(param_groups = [ { 'params' : ['model_param0'], 'alpha' : 0.8, 'beta' : 0.7},
{ 'params' : ['model_param1' , 'model_param_2'], 'alpha' : 0.0}
],
alpha=0.9, beta=0.999)
ort_trainer = ORTTrainer(model, model_desc, optim_config, loss_fn)
"""
def __init__(self, model, model_desc, optim_config, loss_fn=None, options=None):
# Basic validation
assert model is not None, "'model' is required and must be either a 'torch.nn.Module' or ONNX model"
assert isinstance(model_desc, dict), "'model_desc' must be a 'dict'"
assert isinstance(optim_config, optim._OptimizerConfig),\
"'optim_config' is required and must be any of 'AdamConfig', 'LambConfig' or 'SGDConfig'"
assert loss_fn is None or (callable(loss_fn) and len(signature(loss_fn).parameters) == 2),\
"'loss_fn' must be either 'None' or a callable with two parameters"
assert options is None or isinstance(options, ORTTrainerOptions),\
"'options' must be either 'None' or 'ORTTrainerOptions'"
# Model + Loss validation
# Supported combinarios are
# ----------------------------------------
# | | Model | Loss |
# ----------------------------------------
# | 1 | torch.nn.Module | None |
# | 2 | torch.nn.Module | torch.nn.Module |
# | 3 | ONNX | None |
# ----------------------------------------
self._torch_model = None
self._onnx_model = None
if isinstance(model, torch.nn.Module):
assert loss_fn is None or isinstance(model, torch.nn.Module),\
"'loss_fn' must be either 'None' or 'torch.nn.Module'"
self._torch_model = model
self.loss_fn = loss_fn
# TODO: Subject to change after checkpoint redesign
self._torch_state_dict_keys = list(model.state_dict().keys())
elif isinstance(model, onnx.ModelProto):
assert loss_fn is None, "'loss_fn' must not be specified when 'model' is an ONNX model"
self._onnx_model = model
self.loss_fn = None
else:
raise ValueError("'model' must be either 'torch.nn.Module' or 'onnx.ModelProto'")
self.model_desc = _ORTTrainerModelDesc(model_desc)
self.optim_config = optim_config
# ORTTrainerOptions
if not options:
options = ORTTrainerOptions()
self.options = options
if self.options.mixed_precision.enabled and not self.options.mixed_precision.loss_scaler:
# TODO: Move this to model_desc_validation.py
self.options.mixed_precision.loss_scaler = amp.loss_scaler.DynamicLossScaler()
# Post processing ONNX model given as input
if self._onnx_model:
if self.options._internal_use.enable_internal_postprocess:
self._onnx_model = postprocess.run_postprocess(self._onnx_model)
if self.options._internal_use.extra_postprocess:
self._onnx_model = self.options._internal_use.extra_postprocess(self._onnx_model)
assert isinstance(self._onnx_model, onnx.ModelProto), "'extra_postprocess' must return a ONNX model"
# When input model is already ONNX (and not exported from Pytorch within ORTTrainer),
# append 'dtype' from ONNX into model description's
for idx_i, i_desc in enumerate(self.model_desc.inputs):
dtype = None
for onnx_input in self._onnx_model.graph.input:
if onnx_input.name == i_desc.name:
dtype = _utils.dtype_onnx_to_torch(onnx_input.type.tensor_type.elem_type)
self.model_desc.add_type_to_input_description(idx_i, dtype)
break
assert dtype is not None, f"ONNX model with unknown input type ({i_desc.name})"
for idx_o, o_desc in enumerate(self.model_desc.outputs):
dtype = None
for onnx_output in self._onnx_model.graph.output:
if onnx_output.name == o_desc.name:
dtype = _utils.dtype_onnx_to_torch(onnx_output.type.tensor_type.elem_type)
self.model_desc.add_type_to_output_description(idx_o, dtype)
break
assert dtype is not None, f"ONNX model with unknown output type ({o_desc.name})"
# Set GPU device and memory limit
if 'cuda' in self.options.device.id.lower():
mem_limit = self.options.device.mem_limit
if mem_limit > 0:
ort.set_cuda_mem_limit(self.options.device.mem_limit)
ort.set_cuda_device_id(_utils.get_device_index(self.options.device.id))
# TODO: Subject to change after checkpoint redesign
self._state_dict = {}
self._train_step_info = TrainStepInfo(self.optim_config)
self._training_session = None
self._init_session()
def eval_step(self, *args, **kwargs):
r"""Evaluation step method
Args:
*args: Arbitrary arguments that are used as model input (data only)
**kwargs: Arbitrary keyword arguments that are used as model input (data only)
Returns:
ordered :py:obj:`list` with model outputs as described by :py:attr:`.ORTTrainer.model_desc`
"""
# Get data. CombineTorchModelLossFn takes label as last input and outputs loss first
sample_input = self._prepare_model_input(self.model_desc.inputs,
None, None, *args, **kwargs)
# Export model to ONNX
if self._onnx_model is None:
if self._torch_model is not None:
self._init_onnx_model(sample_input)
else:
raise RuntimeError("Model is uninitialized. Only ONNX and PyTorch models are supported")
# Prepare input/output description
inputs_desc = self.model_desc.inputs
outputs_desc = self.model_desc.outputs
if self._train_step_info.fetches:
outputs_desc = [o_desc for o_desc in outputs_desc if o_desc.name in self._train_step_info.fetches]
if len(outputs_desc) != len(self._train_step_info.fetches):
raise RuntimeError("The specified fetches list contains invalid output names")
# Normalize input
if not isinstance(sample_input, (list, tuple)):
sample_input = (sample_input,)
# RunOptions
run_options = ort.RunOptions()
run_options.only_execute_path_to_fetches = True
run_options.training_mode = False
# Run a eval step and return
session_run_results = self._training_session_run_helper(False,
sample_input,
inputs_desc,
outputs_desc,
run_options)
# Output must be returned in the same order as defined in the model description
results = [session_run_results[o_desc.name] for o_desc in outputs_desc]
return results[0] if len (results) == 1 else results
def save_as_onnx(self, path):
r"""Persists ONNX model into :py:attr:`path`
The model will be saved as a Google Protocol Buffers (aka protobuf) file as per ONNX standard.
The graph includes full information, including inference and training metadata.
Args:
path (str): Full path, including filename, to save the ONNX model in the filesystem
Raises:
RuntimeWarning: raised when neither `train_step` or `eval_step` was called at least once
ValueError: raised when `path` is not valid path
"""
if not self._training_session:
warnings.warn("Training session is not initialized yet. "
"'train_step' or 'eval_step' methods must be executed at least once before calling 'save_as_onnx()'.")
return
state_tensors = self._training_session.get_state()
self._update_onnx_model_initializers(state_tensors)
assert isinstance(path, str), "'path' must be a valid path string"
dir_name = os.path.dirname(path)
file_name = os.path.basename(path)
if (dir_name and not os.path.exists(dir_name)) or not file_name:
warnings.warn("'path' is not valid or does not exist")
return
with open(path, "wb") as f:
f.write(self._onnx_model.SerializeToString())
def _check_model_export(self, input):
from onnx import helper, TensorProto, numpy_helper
import numpy as np
from numpy.testing import assert_allclose
import _test_helpers
onnx_model_copy = copy.deepcopy(self._onnx_model)
# Mute the dropout nodes
dropout_nodes = [n for n in onnx_model_copy.graph.node if n.op_type == 'Dropout']
for node in dropout_nodes:
ratio_node = [n for n in onnx_model_copy.graph.node if node.input[1] in n.output][0]
training_mode_node = [n for n in onnx_model_copy.graph.node if node.input[2] in n.output][0]
training_mode_node.attribute.pop()
ratio_node.attribute.pop()
new_training_mode_arr = np.array(False, dtype=bool)
new_ratio_arr = np.array(0.0, dtype=np.float32)
new_training_mode = numpy_helper.from_array(new_training_mode_arr)
new_ratio = numpy_helper.from_array(new_ratio_arr)
training_mode_node.attribute.add().t.CopyFrom(new_training_mode)
ratio_node.attribute.add().t.CopyFrom(new_ratio)
training_mode_node.attribute[0].type = 4
ratio_node.attribute[0].type = 4
training_mode_node.attribute[0].name = "value"
ratio_node.attribute[0].name = "value"
_inference_sess = ort.InferenceSession(onnx_model_copy.SerializeToString())
inf_inputs = {}
for i, input_elem in enumerate(input):
inf_inputs[_inference_sess.get_inputs()[i].name] = input_elem.cpu().numpy()
_inference_outs = _inference_sess.run(None, inf_inputs)
for torch_item, ort_item in zip(self.torch_sample_outputs, _inference_outs):
assert_allclose(torch_item, ort_item, rtol=1e-2, atol=1e-6,
err_msg="Mismatch between outputs of PyTorch model and exported ONNX model. "
"Note that different backends may exhibit small computational differences."
"If this is within acceptable margin, or if there is random generator "
"in the model causing inevitable mismatch, you can proceed training by "
"setting the flag debug.check_model_export to False.")
def train_step(self, *args, **kwargs):
r"""Train step method
After forward pass, an ordered list with all outputs described at :py:attr:`ORTTrainer.model_desc` is returned.
Additional information relevant to the train step is maintend by :py:attr:`ORTTrainer._train_step_info`.
See :py:class:`.TrainStepInfo` for details.
Args:
*args: Arbitrary arguments that are used as model input (data only)
**kwargs: Arbitrary keyword arguments that are used as model input (data only)
Returns:
ordered :py:obj:`list` with model outputs as described by :py:attr:`ORTTrainer.model_desc`
"""
# Export model to ONNX
if self._onnx_model is None:
sample_input = self._prepare_model_input(self.model_desc.inputs, None, None, *args, **kwargs)
self._init_onnx_model(sample_input)
# Debug Model Export if indicated
if self.options.debug.check_model_export:
self._check_model_export(sample_input)
# Prepare inputs+lr and output descriptions
inputs_desc = self._model_desc_inputs_with_lr
outputs_desc = self.model_desc.outputs
# Train step must be incremented *before* gradient accumulation code
# Gradients are accumulated when
# self._train_step_info.step % self.options.batch.gradient_accumulation_steps != 0,
# and they are updated otherwise
self._train_step_info.step += 1
# RunOptions
run_options = None
mixed_precision_without_fetches = False
if self._train_step_info.fetches:
outputs_desc = [o_desc for o_desc in outputs_desc if o_desc.name in self._train_step_info.fetches]
if len(outputs_desc) != len(self._train_step_info.fetches):
raise RuntimeError("The specified fetches list contains invalid output names")
elif self._train_step_info.step % self.options.batch.gradient_accumulation_steps != 0:
run_options = ort.RunOptions()
run_options.only_execute_path_to_fetches = True
outputs_desc = self._model_desc_outputs_with_gradient_accumulation
elif self.options.mixed_precision.enabled:
mixed_precision_without_fetches = True
outputs_desc = self._model_desc_outputs_with_all_finite
# Update Learning Rate if Necessary
lr = self.optim_config.lr
if self.options.lr_scheduler:
lr = self.options.lr_scheduler._step(self._train_step_info)[0]
# Loss Scale for mixed precision
loss_scale = None
if self.options.mixed_precision.enabled:
loss_scaler = self.options.mixed_precision.loss_scaler
assert loss_scaler, "Loss scaler is required when mixed precision is enabled"
loss_scale = loss_scaler.loss_scale
inputs_desc = self._model_desc_inputs_with_lr_and_loss_scale
# Get data. CombineTorchModelLossFn takes label as last input and outputs loss first
input = self._prepare_model_input(inputs_desc, lr, loss_scale, *args, **kwargs)
# Normalize input
if not isinstance(args, (list, tuple)):
args = (args,)
# Run a train step and return
session_run_results = self._training_session_run_helper(True, input, inputs_desc,
outputs_desc, run_options)
if mixed_precision_without_fetches:
# After session run with all_fp32_gradients_finite, we need to clear the training I/O binding's output
# Otherwise next run with only_execute_path_to_fetches will lead to gradient all reduce
# because all_fp32_gradients_finite is still in the feed.
self._train_io_binding.clear_binding_outputs()
is_all_finite = session_run_results[self.model_desc.all_finite.name]
self._train_step_info.all_finite = is_all_finite
if loss_scaler:
loss_scaler.update(self._train_step_info)
if is_all_finite:
# Optimization step must be incremented *after* optimization is successful
self._train_step_info.optimization_step += 1
elif self._train_step_info.step % self.options.batch.gradient_accumulation_steps == 0:
# Optimization step must be incremented *after* optimization is successful
self._train_step_info.optimization_step += 1
# Output must be returned in the same order as defined in the model description
# or in the order specified by TrainStepInfo.fetches, if applicable
if self._train_step_info.fetches:
results = [session_run_results[o_desc] for o_desc in self._train_step_info.fetches]
else:
results = [session_run_results[o_desc.name] for o_desc in self.model_desc.outputs]
return results[0] if len (results) == 1 else results
def _convert_torch_model_loss_fn_to_onnx(self, inputs, device):
# Dynamic axes
dynamic_axes = {}
for input in self.model_desc.inputs:
symbolic_axis = {}
for i, axis in enumerate(input.shape):
if isinstance(axis, str):
symbolic_axis[i] = axis
if len(symbolic_axis):
dynamic_axes[input.name] = symbolic_axis
for output in self.model_desc.outputs:
symbolic_axis = {}
for i, axis in enumerate(output.shape):
if isinstance(axis, str):
symbolic_axis[i] = axis
if len(symbolic_axis):
dynamic_axes[output.name] = symbolic_axis
if isinstance(inputs, torch.Tensor):
inputs = [inputs]
if isinstance(inputs, dict):
sample_inputs = [inputs[k.name_].to(device=device) for k in self.model_desc.inputs]
elif isinstance(inputs, (list, tuple)):
sample_inputs = [input.to(device=device) for i, input in enumerate(inputs) if i < len(self.model_desc.inputs)]
else:
raise RuntimeError("Unexpected input type. Only torch.Tensor, or dict/list/tuple of torch.Tensor is supported.")
# PyTorch ONNX exporter does not match argument names
# This is an issue because the ONNX graph depends on all inputs to be specified
# Validate loss_fn
if self.loss_fn:
sig_loss = signature(self.loss_fn)
if len(sig_loss.parameters) != 2:
raise RuntimeError("loss function should take two arguments - predict and label.")
# Basic input names from model
input_names = [input.name for input in self.model_desc.inputs]
sig = signature(self._torch_model.forward)
ordered_input_list = list(sig.parameters.keys())
# Label from loss_fn goes after model input
if self.loss_fn:
ordered_input_list = [*ordered_input_list,
list(sig_loss.parameters.keys())[1]]
class CombineTorchModelLossFnWrapInput(torch.nn.Module):
def __init__(self, model, loss_fn, input_names):
super().__init__()
self.model = model
self.loss_fn = loss_fn
self.input_names = input_names
def forward(self, *inputs):
sig = signature(self.model.forward)
input_dict = {}
for key in sig.parameters.keys():
if key in self.input_names:
input_dict[key] = inputs[self.input_names.index(key)]
model_out = self.model(**input_dict)
if self.loss_fn is None:
return model_out
label = inputs[-1]
preds = model_out
return self.loss_fn(preds, label), preds
model = CombineTorchModelLossFnWrapInput(self._torch_model, self.loss_fn, input_names)
# Do an inference to grab output types
model.eval()
with torch.no_grad():
# Deepcopy inputs, since input values may change after model run.
sample_inputs_copy = copy.deepcopy(sample_inputs)
try:
# Deepcopy model, in case model is stateful and changes after model run.
model_copy = copy.deepcopy(model)
except Exception:
model_copy = model
warnings.warn("This model cannot be deep copied (or pickled), which is a required step for stateful models to be properly exported to ONNX."
" Compute will continue, but unexpected results may occur!")
sample_outputs = model_copy(*sample_inputs_copy)
self.torch_sample_outputs = sample_outputs
model.train()
if isinstance(sample_outputs, torch.Tensor):
sample_outputs = [sample_outputs]
# Append 'dtype' for model description's inputs/outputs
for idx_i, sample_input in enumerate(sample_inputs):
if idx_i < len(self.model_desc.inputs):
self.model_desc.add_type_to_input_description(
idx_i, sample_input.dtype)
for idx_o, sample_output in enumerate(sample_outputs):
if idx_o < len(self.model_desc.outputs):
self.model_desc.add_type_to_output_description(
idx_o, sample_output.dtype)
# Export the model to ONNX
f = io.BytesIO()
# Deepcopy inputs, since input values may change after model run.
sample_inputs_copy = copy.deepcopy(sample_inputs)
# Handle contrib OPs support
from onnxruntime.training import register_custom_ops_pytorch_exporter
if self.options._internal_use.enable_onnx_contrib_ops:
# Enable contrib ops export from PyTorch
register_custom_ops_pytorch_exporter.register_custom_op()
else:
# Unregister contrib ops, if they were registered in previous calls
register_custom_ops_pytorch_exporter.unregister_custom_op()
# Export torch.nn.Module to ONNX
torch.onnx._export(model, tuple(sample_inputs_copy), f,
input_names=[input.name for input in self.model_desc.inputs],
output_names=[output.name for output in self.model_desc.outputs],
opset_version=self.options._internal_use.onnx_opset_version,
dynamic_axes=dynamic_axes,
_retain_param_name=True,
example_outputs=tuple(sample_outputs),
do_constant_folding=False,
training=torch.onnx.TrainingMode.TRAINING)
onnx_model = onnx.load_model_from_string(f.getvalue())
# Remove 'model.' prefix introduced by CombineTorchModelLossFn class
if isinstance(model, CombineTorchModelLossFnWrapInput):
replace_name_dict = {}
for n in onnx_model.graph.initializer:
if n.name.startswith('model.'):
replace_name_dict[n.name] = n.name[len('model.'):]
n.name = replace_name_dict[n.name]
for n in onnx_model.graph.node:
for i, name in enumerate(n.input):
if name in replace_name_dict:
n.input[i] = replace_name_dict[name]
return onnx_model
def _create_ort_training_session(self):
# Validating frozen_weights names
unused_frozen_weights = [n for n in self.options.utils.frozen_weights\
if n not in [i.name for i in self._onnx_model.graph.initializer]]
if unused_frozen_weights:
raise RuntimeError("{} params from 'frozen_weights' not found in the ONNX model.".format(
unused_frozen_weights))
# Get loss name from model description
loss_name = [item.name for item in self.model_desc.outputs if item.is_loss]
assert len(loss_name) == 1, f"Only one loss output is supported ({len(loss_name)} were specified)"
loss_name = loss_name[0]
# Parse optimizer parameters
optimizer_attributes_map = {}
optimizer_int_attributes_map = {}
trainable_params = set()
for initializer in self._onnx_model.graph.initializer:
if initializer.name in self.options.utils.frozen_weights:
continue # only trainable parameters are passed to the backend
trainable_params.add(initializer.name)
optimizer_attributes_map[initializer.name] = {}
optimizer_int_attributes_map[initializer.name] = {}
not_in_param_groups = True
for param_group in self.optim_config.params:
if initializer.name not in param_group['params']:
continue # keep looking for a matching param_group
not_in_param_groups = False
for k, v in param_group.items():
# 'params' is not a hyper parameter, skip it. 'lr' per weight is not supported
if k == 'params' or k == 'lr':
continue
if isinstance(v, float):
optimizer_attributes_map[initializer.name][k] = v
elif isinstance(v, int):
optimizer_int_attributes_map[initializer.name][k] = v
else:
raise ValueError("Optimizer attributes must be either float or int.")
# set default values for params not found in groups
if not_in_param_groups:
for k, v in self.optim_config.defaults.items():
if k == 'lr':
continue
if isinstance(v, float):
optimizer_attributes_map[initializer.name][k] = v
elif isinstance(v, int):
optimizer_int_attributes_map[initializer.name][k] = v
else:
raise ValueError("Optimizer attributes must be either float or int.")
# TrainingParameters
ort_parameters = ort.TrainingParameters()
ort_parameters.loss_output_name = loss_name
ort_parameters.use_mixed_precision = self.options.mixed_precision.enabled
ort_parameters.world_rank = self.options.distributed.world_rank
ort_parameters.world_size = self.options.distributed.world_size
ort_parameters.gradient_accumulation_steps = self.options.batch.gradient_accumulation_steps
ort_parameters.allreduce_post_accumulation = self.options.distributed.allreduce_post_accumulation
ort_parameters.deepspeed_zero_stage = self.options.distributed.deepspeed_zero_optimization.stage
ort_parameters.enable_grad_norm_clip = self.options.utils.grad_norm_clip
ort_parameters.set_gradients_as_graph_outputs = False
ort_parameters.use_invertible_layernorm_grad = self.options.utils.invertible_layer_norm_gradient
ort_parameters.training_optimizer_name = self.optim_config.name
ort_parameters.lr_params_feed_name = self.model_desc.learning_rate.name
ort_parameters.weights_to_train = trainable_params
ort_parameters.optimizer_attributes_map = optimizer_attributes_map
ort_parameters.optimizer_int_attributes_map = optimizer_int_attributes_map
# SessionOptions
session_options = ort.SessionOptions()
session_options.use_deterministic_compute = self.options.debug.deterministic_compute
# TrainingSession
self._training_session = ort.TrainingSession(self._onnx_model.SerializeToString(),
ort_parameters,
session_options)
# I/O bindings
self._train_io_binding = self._training_session.io_binding()
self._eval_io_binding = self._training_session.io_binding()
def _init_onnx_model(self, inputs):
if self._onnx_model is not None:
return
if self._torch_model is not None:
# PyTorch model is moved to cpu to save GPU memory
self._torch_model.cpu()
# PyTorch buffers (created using 'register_buffer') shouldn't be trained
torch_buffers = list(dict(self._torch_model.named_buffers()).keys())
self.options.utils.frozen_weights.extend(torch_buffers)
# Export to ONNX
self._onnx_model = self._convert_torch_model_loss_fn_to_onnx(inputs, 'cpu')
# Post processing for ONNX models expported from PyTorch
if self.options._internal_use.enable_internal_postprocess:
self._onnx_model = postprocess.run_postprocess(self._onnx_model)
if self.options._internal_use.extra_postprocess:
self._onnx_model = self.options._internal_use.extra_postprocess(self._onnx_model)
self._init_session()
def _init_session(self):
if self._onnx_model is None:
return
if self.options.utils.run_symbolic_shape_infer:
self._onnx_model = SymbolicShapeInference.infer_shapes(self._onnx_model, auto_merge=True, guess_output_rank=True)
# Create training session used by train_step
self._create_ort_training_session()
# Update model description to update dtype when mixed precision is enabled
# C++ backend modifies model's output dtype from float32 to float16 for mixed precision
# Note that for training we must use float32 and for evaluation we must use float16
for idx, o_desc in enumerate(self.model_desc.outputs):
if (self.options.mixed_precision.enabled and o_desc.dtype == torch.float32 and
not self._training_session.is_output_fp32_node(o_desc.name)):
self.model_desc.add_type_to_output_description(idx, o_desc.dtype, torch.float16)
# Update model description
self._model_desc_inputs_with_lr = [*self.model_desc.inputs, self.model_desc.learning_rate]
# Update Mixed Precision, if applicable
if self.options.mixed_precision.enabled:
self.model_desc.loss_scale_input = self._training_session.loss_scale_input_name
self._model_desc_inputs_with_lr_and_loss_scale = [
*self._model_desc_inputs_with_lr, self.model_desc.loss_scale_input]
self.model_desc.all_finite = _utils.get_all_gradients_finite_name_from_session(self._training_session)
self._model_desc_outputs_with_all_finite = [*self.model_desc.outputs, self.model_desc.all_finite]
elif self.options.mixed_precision.loss_scaler:
raise ValueError("Loss Scaler cannot be specified when Mixed Precision is not enabled")
# Update Loss Scaler Input Name, if applicable
if self.options.mixed_precision.enabled and self.options.mixed_precision.loss_scaler:
self.options.mixed_precision.loss_scaler.input_name = self.model_desc.loss_scale_input.name
elif not self.options.mixed_precision.enabled and self.options.mixed_precision.loss_scaler:
raise ValueError("Loss Scaler cannot be specified when Mixed Precision is not enabled")
# Update Gradient Accumulation, if applicable
if self.options.batch.gradient_accumulation_steps > 1:
self.model_desc.gradient_accumulation = _utils.get_gradient_accumulation_name_from_session(self._training_session)
self._model_desc_outputs_with_gradient_accumulation = [
*self.model_desc.outputs, self.model_desc.gradient_accumulation]
# TODO: Subject to change after checkpoint redesign
if self._state_dict:
checkpoint.experimental_load_state_dict(self, self._state_dict, self._load_state_dict_strict)
self._state_dict_debug = self._state_dict
self._state_dict = {}
def _prepare_model_input(self, inputs_desc, lr, loss_scale, *inputs, **kwargs):
# Normalize input to tuple of samples
if type(inputs) == tuple and len(inputs) == 1 and type(inputs[0]) == list:
input = tuple(inputs[0])
else:
input = inputs
# Append input from 'kwargs'
for input_desc in inputs_desc:
if input_desc.name in kwargs:
input = input + (kwargs[input_desc.name],)
# Append learning rate
extra_inputs = 0
if lr is not None:
lr = torch.tensor([lr])
input += (lr,)
extra_inputs += 1
# Append loss scale
if loss_scale is not None:
assert self.options.mixed_precision.enabled, "Loss scale cannot be used without mixed precision"
loss_scale = torch.tensor([loss_scale])
input += (loss_scale,)
extra_inputs += 1
# Only assert length of input when fetches is not used
assert self._train_step_info.fetches or len(self.model_desc.inputs) + extra_inputs == len(input)
return input
def _resolve_symbolic_dimensions(self, inputs, inputs_desc, outputs_desc):
outputs = copy.deepcopy(outputs_desc)
resolved_dims = {}
for input, i_desc in zip(inputs, inputs_desc):
for i_idx, i_axis in enumerate(i_desc.shape):
if isinstance(i_axis, str):
if i_axis not in resolved_dims:
resolved_dims[i_axis] = input.size()[i_idx]
else:
assert resolved_dims[i_axis] == input.size()[i_idx],\
f"Mismatch in dynamic shape {i_axis}"
for o_desc in outputs:
for idx_o, o_axis in enumerate(o_desc.shape):
if isinstance(o_axis, str):
o_desc.shape[idx_o] = resolved_dims[o_axis]
unknown_dim = [o_desc.name for dim in o_desc.shape for o_desc in outputs if isinstance(dim, str)]
if unknown_dim:
raise RuntimeError(f"Cannot execute model with unknown output dimensions ({unknown_dim}")
return outputs
def _training_session_run_helper(self, is_train, inputs, inputs_desc, outputs_desc, run_options=None):
# Select IO binding
if is_train:
iobinding = self._train_io_binding
else:
iobinding = self._eval_io_binding
# Bind input tensors
for input, input_desc in zip(inputs, inputs_desc):
device_index = _utils.get_device_index_from_input(input)
iobinding.bind_input(input_desc.name,
input.device.type,
device_index,
_utils.dtype_torch_to_numpy(input.dtype),
list(input.size()),
input.data_ptr())
# Bind output tensors
outputs_desc_resolved = self._resolve_symbolic_dimensions(inputs, inputs_desc, outputs_desc)
result = {}
for output_desc in outputs_desc_resolved:
torch_tensor = torch.zeros(output_desc.shape, device=self.options.device.id,
dtype=output_desc.dtype_amp if output_desc.dtype_amp else output_desc.dtype)
iobinding.bind_output(output_desc.name, torch_tensor.device.type, _utils.get_device_index(self.options.device.id),
_utils.dtype_torch_to_numpy(torch_tensor.dtype),
list(torch_tensor.size()), torch_tensor.data_ptr())
result[output_desc.name] = torch_tensor
# Run a train/eval step
self._training_session.run_with_iobinding(iobinding, run_options)
return result
def _update_onnx_model_initializers(self, state_tensors):
r""" Updates ONNX graph initializers with state_tensors's values
Usually called to save or load an ONNX model.
The tensors names of state_tensors are compared to all ONNX initializer tensors
and when the name matches, the ONNX graph is updated with the new value.
"""
assert isinstance(state_tensors, dict), "state_tensors must be a dict"
new_weights = []
replace_indices = []
for i, w in enumerate(self._onnx_model.graph.initializer):
if w.name in state_tensors:
new_weights.append(onnx.numpy_helper.from_array(state_tensors[w.name], w.name))
replace_indices.append(i)
replace_indices.sort(reverse=True)
for w_i in replace_indices:
del self._onnx_model.graph.initializer[w_i]
self._onnx_model.graph.initializer.extend(new_weights)
| {
"pile_set_name": "Github"
} |
/*******************************************************************************
* Copyright 2015, 2016 Francesco Benincasa ([email protected]).
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
package sqlite.kripton58.array;
import com.abubusoft.kripton.annotation.BindType;
import sqlite.kripton58.BeanInner;
// TODO: Auto-generated Javadoc
/**
* The Class BeanBean.
*/
@BindType
public class BeanBean {
/** The id. */
public long id;
/** The value. */
public BeanInner[] value;
/** The value 2. */
public BeanInner[] value2;
}
| {
"pile_set_name": "Github"
} |
pkgargs@{ stdenv, lib, haskellPackages, writeText, gawk }:
let
generic-fetcher =
import ./generic-fetcher.nix pkgargs;
in
args@{ repository ? "library", imageName, tag, ... }:
generic-fetcher ({
fetcher = "hocker-config";
name = "${repository}_${imageName}_${tag}-config.json";
tag = "unused";
} // args)
| {
"pile_set_name": "Github"
} |
var convert = require('./convert'),
func = convert('padStart', require('../padStart'));
func.placeholder = require('./placeholder');
module.exports = func;
| {
"pile_set_name": "Github"
} |
// RUN: %clang_cc1 -fsyntax-only -verify -Wc++11-compat %s
class C {
public:
auto int errx; // expected-error {{storage class specified for a member declaration}}
#if __cplusplus <= 199711L
// expected-warning@-2 {{'auto' storage class specifier is redundant}}
#else
// expected-warning@-4 {{'auto' storage class specifier is not permitted in C++11, and will not be supported in future releases}}
#endif
register int erry; // expected-error {{storage class specified for a member declaration}}
extern int errz; // expected-error {{storage class specified for a member declaration}}
static void sm() {
sx = 0;
this->x = 0; // expected-error {{invalid use of 'this' outside of a non-static member function}}
x = 0; // expected-error {{invalid use of member 'x' in static member function}}
}
class NestedC {
public:
NestedC(int);
void f() {
sx = 0;
x = 0; // expected-error {{use of non-static data member 'x' of 'C' from nested type 'NestedC'}}
sm();
m(); // expected-error {{call to non-static member function 'm' of 'C' from nested type 'NestedC'}}
}
};
int b : 1, w : 2;
int : 1, : 2;
typedef int E : 1; // expected-error {{typedef member 'E' cannot be a bit-field}}
static int sb : 1; // expected-error {{static member 'sb' cannot be a bit-field}}
static int vs;
typedef int func();
func tm;
func *ptm;
func btm : 1; // expected-error {{bit-field 'btm' has non-integral type}}
NestedC bc : 1; // expected-error {{bit-field 'bc' has non-integral type}}
enum E1 { en1, en2 };
int i = 0;
#if __cplusplus <= 199711L
// expected-warning@-2 {{in-class initialization of non-static data member is a C++11 extension}}
#endif
static int si = 0; // expected-error {{non-const static data member must be initialized out of line}}
static const NestedC ci = 0; // expected-error {{static data member of type 'const C::NestedC' must be initialized out of line}}
static const int nci = vs; // expected-error {{in-class initializer for static data member is not a constant expression}}
static const int vi = 0;
static const volatile int cvi = 0; // ok, illegal in C++11
#if __cplusplus >= 201103L
// expected-error@-2 {{static const volatile data member must be initialized out of line}}
#endif
static const E evi = 0;
void m() {
sx = 0;
this->x = 0;
y = 0;
this = 0; // expected-error {{expression is not assignable}}
}
int f1(int p) {
A z = 6;
return p + x + this->y + z;
}
typedef int A;
virtual int viv; // expected-error {{'virtual' can only appear on non-static member functions}}
virtual static int vsif(); // expected-error {{'virtual' can only appear on non-static member functions}}
virtual int vif();
private:
int x,y;
static int sx;
mutable int mi;
mutable int &mir; // expected-error {{'mutable' cannot be applied to references}}
mutable void mfn(); // expected-error {{'mutable' cannot be applied to functions}}
mutable const int mci; // expected-error {{'mutable' and 'const' cannot be mixed}}
static const int number = 50;
static int arr[number];
};
class C2 {
void f() {
static int lx;
class LC1 {
int m() { return lx; }
};
class LC2 {
int m() { return lx; }
};
}
};
struct C3 {
int i;
mutable int j;
};
void f()
{
const C3 c3 = { 1, 2 };
(void)static_cast<int*>(&c3.i); // expected-error {{static_cast from 'const int *' to 'int *' is not allowed}}
// but no error here
(void)static_cast<int*>(&c3.j);
}
// Play with mutable a bit more, to make sure it doesn't crash anything.
mutable int gi; // expected-error {{'mutable' can only be applied to member variables}}
mutable void gfn(); // expected-error {{illegal storage class on function}}
void ogfn()
{
mutable int ml; // expected-error {{'mutable' can only be applied to member variables}}
// PR3020: This used to crash due to double ownership of C4.
struct C4;
C4; // expected-warning {{declaration does not declare anything}}
}
struct C4 {
void f(); // expected-note{{previous declaration is here}}
int f; // expected-error{{duplicate member 'f'}}
};
// PR5415 - don't hang!
struct S
{
void f(); // expected-note 1 {{previous declaration}} expected-note {{previous declaration}}
void S::f() {} // expected-error {{extra qualification on member}} expected-error {{class member cannot be redeclared}}
void f() {} // expected-error {{class member cannot be redeclared}}
};
// Don't crash on this bogus code.
namespace pr6629 {
template<class T1, class T2> struct foo :
bogus<foo<T1,T2> > // expected-error {{unknown template name 'bogus'}}
{ };
template<> struct foo<unknown,unknown> { // expected-error {{undeclared identifier 'unknown'}}
template <typename U1, typename U2> struct bar {
typedef bar type;
static const int value = 0;
};
};
}
namespace PR7153 {
class EnclosingClass {
public:
struct A { } mutable *member;
};
void f(const EnclosingClass &ec) {
ec.member = 0;
}
}
namespace PR7196 {
struct A {
int a;
void f() {
char i[sizeof(a)];
enum { x = sizeof(i) };
enum { y = sizeof(a) };
}
};
}
namespace rdar8066414 {
class C {
C() {}
} // expected-error{{expected ';' after class}}
}
namespace rdar8367341 {
float foo();
#if __cplusplus >= 201103L
// expected-note@-2 {{declared here}}
#endif
struct A {
#if __cplusplus <= 199711L
static const float x = 5.0f; // expected-warning {{in-class initializer for static data member of type 'const float' is a GNU extension}}
static const float y = foo(); // expected-warning {{in-class initializer for static data member of type 'const float' is a GNU extension}} expected-error {{in-class initializer for static data member is not a constant expression}}
#else
static constexpr float x = 5.0f;
static constexpr float y = foo(); // expected-error {{constexpr variable 'y' must be initialized by a constant expression}} expected-note {{non-constexpr function 'foo' cannot be used in a constant expression}}
#endif
};
}
namespace with_anon {
struct S {
union {
char c;
};
};
void f() {
S::c; // expected-error {{invalid use of non-static data member}}
}
}
struct PR9989 {
static int const PR9989_Member = sizeof PR9989_Member;
};
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="utf-8"?>
<!--
Copyright (C) 2019 The Android Open Source Project
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<vector xmlns:android="http://schemas.android.com/apk/res/android"
android:height="24dp"
android:viewportHeight="24"
android:viewportWidth="24"
android:width="24dp" >
<path
android:fillColor="?android:attr/colorControlActivated"
android:pathData="M4,15.3V19c0,0.55,0.45,1,1,1h3.69l2.6,2.6c0.39,0.39,1.02,0.39,1.41,0l2.6-2.6H19c0.55,0,1-0.45,1-1v-3.69l2.6-2.6 c0.39-0.39,0.39-1.02,0-1.41L20,8.69V5c0-0.55-0.45-1-1-1h-3.69l-2.6-2.6c-0.39-0.39-1.02-0.39-1.41,0L8.69,4H5C4.45,4,4,4.45,4,5 v3.69l-2.6,2.6c-0.39,0.39-0.39,1.02,0,1.41L4,15.3z M12,7c2.76,0,5,2.24,5,5s-2.24,5-5,5s-5-2.24-5-5S9.24,7,12,7z" />
<path
android:fillColor="?android:attr/colorBackgroundFloating"
android:pathData="M 12 7 C 14.7614237492 7 17 9.23857625085 17 12 C 17 14.7614237492 14.7614237492 17 12 17 C 9.23857625085 17 7 14.7614237492 7 12 C 7 9.23857625085 9.23857625085 7 12 7 Z" />
</vector> | {
"pile_set_name": "Github"
} |
// Foundation for Apps
// by ZURB
// foundation.zurb.com
// Licensed under MIT Open Source
$foundation-version: "1.2.0";
// Make sure the charset is set appropriately
@charset "UTF-8";
// Libraries (let's make Normalize an external dependency eventually)
@import "vendor/normalize";
// Helpers
@import "helpers/functions", "helpers/mixins", "helpers/breakpoints",
"helpers/images";
// Global styles
@import "global";
// Components
@import "components/iconic", "components/block-list", "components/button",
"components/button-group", "components/card", "components/extras",
"components/forms", "components/grid", "components/title-bar",
"components/label", "components/list", "components/menu-bar",
"components/modal", "components/motion", "components/notification",
"components/off-canvas", "components/popup", "components/switch",
"components/tabs", "components/accordion", "components/typography",
"components/utilities";
| {
"pile_set_name": "Github"
} |
/*
* OpenPBS (Portable Batch System) v2.3 Software License
*
* Copyright (c) 1999-2000 Veridian Information Solutions, Inc.
* All rights reserved.
*
* ---------------------------------------------------------------------------
* For a license to use or redistribute the OpenPBS software under conditions
* other than those described below, or to purchase support for this software,
* please contact Veridian Systems, PBS Products Department ("Licensor") at:
*
* www.OpenPBS.org +1 650 967-4675 [email protected]
* 877 902-4PBS (US toll-free)
* ---------------------------------------------------------------------------
*
* This license covers use of the OpenPBS v2.3 software (the "Software") at
* your site or location, and, for certain users, redistribution of the
* Software to other sites and locations. Use and redistribution of
* OpenPBS v2.3 in source and binary forms, with or without modification,
* are permitted provided that all of the following conditions are met.
* After December 31, 2001, only conditions 3-6 must be met:
*
* 1. Commercial and/or non-commercial use of the Software is permitted
* provided a current software registration is on file at www.OpenPBS.org.
* If use of this software contributes to a publication, product, or
* service, proper attribution must be given; see www.OpenPBS.org/credit.html
*
* 2. Redistribution in any form is only permitted for non-commercial,
* non-profit purposes. There can be no charge for the Software or any
* software incorporating the Software. Further, there can be no
* expectation of revenue generated as a consequence of redistributing
* the Software.
*
* 3. Any Redistribution of source code must retain the above copyright notice
* and the acknowledgment contained in paragraph 6, this list of conditions
* and the disclaimer contained in paragraph 7.
*
* 4. Any Redistribution in binary form must reproduce the above copyright
* notice and the acknowledgment contained in paragraph 6, this list of
* conditions and the disclaimer contained in paragraph 7 in the
* documentation and/or other materials provided with the distribution.
*
* 5. Redistributions in any form must be accompanied by information on how to
* obtain complete source code for the OpenPBS software and any
* modifications and/or additions to the OpenPBS software. The source code
* must either be included in the distribution or be available for no more
* than the cost of distribution plus a nominal fee, and all modifications
* and additions to the Software must be freely redistributable by any party
* (including Licensor) without restriction.
*
* 6. All advertising materials mentioning features or use of the Software must
* display the following acknowledgment:
*
* "This product includes software developed by NASA Ames Research Center,
* Lawrence Livermore National Laboratory, and Veridian Information
* Solutions, Inc.
* Visit www.OpenPBS.org for OpenPBS software support,
* products, and information."
*
* 7. DISCLAIMER OF WARRANTY
*
* THIS SOFTWARE IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND. ANY EXPRESS
* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT
* ARE EXPRESSLY DISCLAIMED.
*
* IN NO EVENT SHALL VERIDIAN CORPORATION, ITS AFFILIATED COMPANIES, OR THE
* U.S. GOVERNMENT OR ANY OF ITS AGENCIES BE LIABLE FOR ANY DIRECT OR INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
* OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* This license will be governed by the laws of the Commonwealth of Virginia,
* without reference to its choice of law rules.
*/
#include <pbs_config.h> /* the master config generated by configure */
/* define the following so we get prototype for getsid() */
#define _XOPEN_SOURCE
#define _XOPEN_SOURCE_EXTENDED 1
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <limits.h>
#include <fcntl.h>
#include <netdb.h>
#include <string.h>
#include <errno.h>
#include <assert.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/socket.h>
#include <sys/time.h>
#include <netinet/in.h>
#include <netdb.h>
#ifdef _AIX
#include <arpa/inet.h>
#endif /* _AIX */
#include "dis.h"
#include "dis_init.h"
#include "tm.h"
#include "net_connect.h"
#include "pbs_ifl.h"
#include "../Libnet/lib_net.h"
#include "../Liblog/pbs_log.h" /* print_trace */
/*
** Set up a debug print macro.
*/
#ifdef DEBUG
#define TM_DBPRT(x) \
{ \
int err = errno; \
printf x; \
errno = err; \
}
#else
#define TM_DBPRT(x)
#endif
#ifndef MIN
#define MIN(a, b) (((a) < (b)) ? (a) : (b))
#endif
/*
** Allocate some string space to hold the values passed in the
** environment from MOM.
*/
static char *tm_jobid = NULL;
static int tm_jobid_len = 0;
static char *tm_jobcookie = NULL;
static int tm_jobcookie_len = 0;
static tm_task_id tm_jobtid = TM_NULL_TASK;
static tm_node_id tm_jobndid = TM_ERROR_NODE;
static int tm_momport = 0;
static int local_conn = -1;
static struct tcp_chan *static_chan = NULL;
int init_done = 0;
int *tm_conn = &local_conn;
int event_count = 0;
/*
** Events are the central focus of this library. They are tracked
** in a hash table. Many of the library calls return events. They
** are recorded and as information is received from MOM's, the
** event is updated and marked so tm_poll() can return it to the user.
*/
#define EVENT_HASH 128
typedef struct event_info
{
tm_event_t e_event; /* event number */
tm_node_id e_node; /* destination node */
int e_mtype; /* message type sent */
void *e_info; /* possible returned info */
struct event_info *e_next; /* link to next event */
struct event_info *e_prev; /* link to prev event */
} event_info;
static event_info *event_hash[EVENT_HASH];
/*
* check if the owner of this process matches the owner of pid
* returns TRUE if so, FALSE otherwise
*/
bool ispidowner(pid_t pid)
{
char path[MAXPATHLEN];
struct stat sbuf;
/* build path to pid */
snprintf(path, sizeof(path), "/proc/%d", pid);
/* do the stat */
/* if it fails, assume not owner */
if (stat(path, &sbuf) != 0)
return(FALSE);
/* see if caller is the owner of pid */
if (getuid() != sbuf.st_uid)
return(FALSE);
/* caller is owner */
return(TRUE);
}
/*
** Find an event number or return a NULL.
*/
event_info *find_event(
tm_event_t x)
{
event_info *ep;
for (ep = event_hash[x % EVENT_HASH]; ep; ep = ep->e_next)
{
if (ep->e_event == x)
break;
}
return ep;
}
/*
** Delete an event.
*/
void del_event(
event_info *ep)
{
/* unlink event from hash list */
if (ep->e_prev)
ep->e_prev->e_next = ep->e_next;
else
event_hash[ep->e_event % EVENT_HASH] = ep->e_next;
if (ep->e_next)
ep->e_next->e_prev = ep->e_prev;
/*
** Free any memory saved with the event. This depends
** on whay type of event it is.
*/
switch (ep->e_mtype)
{
case TM_INIT:
case TM_SPAWN:
case TM_SIGNAL:
case TM_OBIT:
case TM_POSTINFO:
break;
case TM_TASKS:
case TM_GETINFO:
case TM_RESOURCES:
free(ep->e_info);
break;
default:
TM_DBPRT(("del_event: unknown event command %d\n", ep->e_mtype))
break;
}
free(ep);
if (--event_count == 0)
{
close(local_conn);
local_conn = -1;
}
return;
}
/*
** Create a new event number.
*/
tm_event_t new_event(void)
{
static tm_event_t next_event = TM_NULL_EVENT + 1;
event_info *ep;
tm_event_t ret;
if (next_event == INT_MAX)
next_event = TM_NULL_EVENT + 1;
for (;;)
{
ret = next_event++;
for (ep = event_hash[ret % EVENT_HASH]; ep; ep = ep->e_next)
{
if (ep->e_event == ret)
break; /* inner loop: this number is in use */
}
if (ep == NULL)
break; /* this number is not in use */
}
return ret;
}
/*
** Link new event number into the above hash table.
*/
void add_event(
tm_event_t event,
tm_node_id node,
int type,
void *info)
{
event_info *ep, **head;
ep = (event_info *)calloc(1, sizeof(event_info));
assert(ep != NULL);
head = &event_hash[event % EVENT_HASH];
ep->e_event = event;
ep->e_node = node;
ep->e_mtype = type;
ep->e_info = info;
ep->e_next = *head;
ep->e_prev = NULL;
if (*head)
(*head)->e_prev = ep;
*head = ep;
event_count++;
return;
}
/*
** Sessions must be tracked by the library so tm_taskid objects
** can be resolved into real tasks on real nodes.
** We will use a hash table.
*/
#define TASK_HASH 256
typedef struct task_info
{
char *t_jobid; /* jobid */
tm_task_id t_task; /* task id */
tm_node_id t_node; /* node id */
struct task_info *t_next; /* link to next task */
} task_info;
static task_info *task_hash[TASK_HASH];
/*
** Find a task table entry for a given task number or return a NULL.
*/
task_info *find_task(
tm_task_id x)
{
task_info *tp;
for (tp = task_hash[x % TASK_HASH]; tp; tp = tp->t_next)
{
if (tp->t_task == x)
break;
}
return tp;
}
/*
** Create a new task entry and link it into the above hash
** table.
*/
tm_task_id new_task(
char *jobid,
tm_node_id node,
tm_task_id task)
{
task_info *tp, **head;
TM_DBPRT(("%s: jobid=%s node=%d task=%lu\n",
__func__, jobid, node, (unsigned long)task))
if (jobid != tm_jobid && strcmp(jobid, tm_jobid) != 0)
{
TM_DBPRT(("%s: task job %s not my job %s\n",
__func__, jobid, tm_jobid))
return TM_NULL_TASK;
}
if (node == TM_ERROR_NODE)
{
TM_DBPRT(("%s: called with TM_ERROR_NODE\n", __func__))
return TM_NULL_TASK;
}
if ((tp = find_task(task)) != NULL)
{
TM_DBPRT(("%s: task %lu found with node %d should be %d\n",
__func__, (unsigned long)task, tp->t_node, node))
return task;
}
if ((tp = (task_info *)calloc(1, sizeof(task_info))) == NULL)
return TM_NULL_TASK;
head = &task_hash[task % TASK_HASH];
tp->t_jobid = tm_jobid;
tp->t_task = task;
tp->t_node = node;
tp->t_next = *head;
*head = tp;
return task;
}
/*
** Delete a task.
===
=== right now, this is not used.
===
static void
del_task(x)
tm_task_id x;
{
task_info *tp, *prev;
prev = NULL;
for (tp=task_hash[x % TASK_HASH]; tp; prev=tp, tp=tp->t_next) {
if (tp->t_task == x)
break;
}
if (tp) {
if (prev)
prev->t_next = tp->t_next;
else
task_hash[x % TASK_HASH] = tp->t_next;
tp->t_next = NULL;
if (tp->t_jobid != tm_jobid)
free(tp->t_jobid);
free(tp);
}
return;
}
*/
/*
** The nodes are tracked in an array.
*/
static tm_node_id *node_table = NULL;
/*
** localmom() - make a connection to the local pbs_mom
**
** The connection will remain open as long as there is an
** outstanding event.
*/
#define PBS_NET_RC_FATAL -1
#define PBS_NET_RC_RETRY -2
static int localmom(void)
{
static int have_addr = 0;
static struct in_addr hostaddr;
struct addrinfo *addr_info;
int i;
int sock;
struct sockaddr_in remote;
struct linger ltime;
if (local_conn >= 0)
{
return(local_conn); /* already have open connection */
}
memset(&remote, 0, sizeof(remote));
if (have_addr == 0)
{
/* lookup "localhost" and save address */
if (pbs_getaddrinfo("localhost", NULL, &addr_info) != 0)
{
TM_DBPRT(("tm_init: localhost not found\n"))
return(-1);
}
hostaddr = ((struct sockaddr_in *)addr_info->ai_addr)->sin_addr;
have_addr = 1;
}
for (i = 0;i < 5;i++)
{
/* get socket */
sock = socket(AF_INET, SOCK_STREAM, 0);
if (sock < 0)
{
return(-1);
}
#ifndef HAVE_POLL
if (sock >= FD_SETSIZE)
{
close(sock);
return(-1);
}
#endif
/* make sure data goes out */
ltime.l_onoff = 1;
ltime.l_linger = 5;
setsockopt(sock, SOL_SOCKET, SO_LINGER, <ime, sizeof(ltime));
/* connect to specified local pbs_mom and port */
remote.sin_addr = hostaddr;
remote.sin_port = htons((unsigned short)tm_momport);
remote.sin_family = AF_INET;
if (connect(sock, (struct sockaddr *)&remote, sizeof(remote)) < 0)
{
switch (errno)
{
case EINTR:
case EADDRINUSE:
case ETIMEDOUT:
case ECONNREFUSED:
close(sock);
sleep(1);
continue;
/*NOTREACHED*/
break;
default:
close(sock);
return(-1);
/*NOTREACHED*/
break;
}
}
else
{
local_conn = sock;
break;
}
} /* END for (i) */
return(local_conn);
} /* END localmom() */
/*
** startcom() - send request header to local pbs_mom.
** If required, make connection to her.
*/
static int startcom(
int com,
tm_event_t event,
struct tcp_chan **pchan)
{
int ret = DIS_SUCCESS;
struct tcp_chan *chan = NULL;
if (localmom() == -1)
{
return(-1);
}
if ((chan = DIS_tcp_setup(local_conn)) == NULL)
goto done;
ret = diswsi(chan, TM_PROTOCOL);
if (ret != DIS_SUCCESS)
goto done;
ret = diswsi(chan, TM_PROTOCOL_VER);
if (ret != DIS_SUCCESS)
goto done;
ret = diswcs(chan, tm_jobid, tm_jobid_len);
if (ret != DIS_SUCCESS)
goto done;
ret = diswcs(chan, tm_jobcookie, tm_jobcookie_len);
if (ret != DIS_SUCCESS)
goto done;
ret = diswsi(chan, com);
if (ret != DIS_SUCCESS)
goto done;
ret = diswsi(chan, event);
if (ret != DIS_SUCCESS)
goto done;
ret = diswui(chan, tm_jobtid);
if (ret != DIS_SUCCESS)
goto done;
*pchan = chan;
return(DIS_SUCCESS);
done:
TM_DBPRT(("startcom: send error %s\n",
dis_emsg[ret]))
if (chan != NULL)
DIS_tcp_close(chan);
else
close(local_conn);
local_conn = -1;
return(ret);
} /* END startcom() */
/*
** Initialize the Task Manager interface.
*/
#ifdef __cplusplus
extern "C"
{
#endif
int tm_init(
void *info, /* in, currently unused */
struct tm_roots *roots) /* out */
{
tm_event_t nevent, revent;
char *env, *hold;
int err;
int nerr = 0;
struct tcp_chan *chan = NULL;
if (init_done)
{
return(TM_BADINIT);
}
if ((tm_jobid = getenv("PBS_JOBID")) == NULL)
{
return(TM_EBADENVIRONMENT);
}
tm_jobid_len = strlen(tm_jobid);
if ((tm_jobcookie = getenv("PBS_JOBCOOKIE")) == NULL)
return TM_EBADENVIRONMENT;
tm_jobcookie_len = strlen(tm_jobcookie);
if ((env = getenv("PBS_NODENUM")) == NULL)
return TM_EBADENVIRONMENT;
tm_jobndid = (tm_node_id)strtol(env, &hold, 10);
if (env == hold)
return TM_EBADENVIRONMENT;
if ((env = getenv("PBS_TASKNUM")) == NULL)
return TM_EBADENVIRONMENT;
if ((tm_jobtid = atoi(env)) == 0)
return TM_EBADENVIRONMENT;
if ((env = getenv("PBS_MOMPORT")) == NULL)
return TM_EBADENVIRONMENT;
if ((tm_momport = atoi(env)) == 0)
return TM_EBADENVIRONMENT;
init_done = 1;
nevent = new_event();
/*
* send the following request:
* header (tm_init)
* int node number
* int task number
*/
if (startcom(TM_INIT, nevent, &chan) != DIS_SUCCESS)
return TM_ESYSTEM;
DIS_tcp_wflush(chan);
DIS_tcp_cleanup(chan);
add_event(nevent, TM_ERROR_NODE, TM_INIT, (void *)roots);
while (TRUE)
{
if ((err = tm_poll(TM_NULL_EVENT, &revent, TM_POLL_WAIT, &nerr)) != TM_SUCCESS)
return err;
if (event_count == 0)
break;
}
return nerr;
}
/*
** Copy out node info. No communication with pbs_mom is needed.
*/
int tm_nodeinfo(
tm_node_id **list,
int *nnodes)
{
tm_node_id *np;
int i;
int n = 0;
if (!init_done)
{
return (TM_BADINIT);
}
if (node_table == NULL)
{
return (TM_ESYSTEM);
}
for (np = node_table; *np != TM_ERROR_NODE; np++)
n++; /* how many nodes */
if ((np = (tm_node_id *)calloc(n,sizeof(tm_node_id))) == NULL)
{
/* FAILURE - cannot alloc memory */
return(TM_ERROR);
}
for (i = 0; i < n; i++)
np[i] = node_table[i];
*list = np;
*nnodes = i;
return(TM_SUCCESS);
} /* END tm_nodeinfo() */
/*
** Starts <argv>[0] with environment <envp> at <where>.
*/
int tm_spawn(
int argc, /* in */
char **argv, /* in */
char **envp, /* in */
tm_node_id where, /* in */
tm_task_id *tid, /* out */
tm_event_t *event) /* out */
{
int rc = TM_SUCCESS;
char *cp;
int i;
struct tcp_chan *chan = NULL;
/* NOTE: init_done is global */
if (!init_done)
{
return(TM_BADINIT);
}
if ((argc <= 0) || (argv == NULL) || (argv[0] == NULL) || (*argv[0] == '\0'))
{
return(TM_ENOTFOUND);
}
*event = new_event();
if (startcom(TM_SPAWN, *event, &chan) != DIS_SUCCESS)
{
return(TM_ENOTCONNECTED);
}
if (diswsi(chan, where) != DIS_SUCCESS) /* send where */
{
rc = TM_ENOTCONNECTED;
goto tm_spawn_cleanup;
}
if (diswsi(chan, argc) != DIS_SUCCESS) /* send argc */
{
rc = TM_ENOTCONNECTED;
goto tm_spawn_cleanup;
}
/* send argv strings across */
for (i = 0;i < argc;i++)
{
cp = argv[i];
if (diswcs(chan, cp, strlen(cp)) != DIS_SUCCESS)
{
rc = TM_ENOTCONNECTED;
goto tm_spawn_cleanup;
}
}
/* send envp strings across */
if (getenv("PBSDEBUG") != NULL)
{
if (diswcs(chan, "PBSDEBUG=1", strlen("PBSDEBUG=1")) != DIS_SUCCESS)
{
rc = TM_ENOTCONNECTED;
goto tm_spawn_cleanup;
}
}
if (envp != NULL)
{
for (i = 0;(cp = envp[i]) != NULL;i++)
{
if (diswcs(chan, cp, strlen(cp)) != DIS_SUCCESS)
{
rc = TM_ENOTCONNECTED;
goto tm_spawn_cleanup;
}
}
}
if (diswcs(chan, "", 0) != DIS_SUCCESS)
{
rc = TM_ENOTCONNECTED;
goto tm_spawn_cleanup;
}
DIS_tcp_wflush(chan);
add_event(*event, where, TM_SPAWN, (void *)tid);
tm_spawn_cleanup:
if (chan != NULL)
DIS_tcp_cleanup(chan);
return(rc);
} /* END tm_spawn() */
/*
** Sends a <sig> signal to all the process groups in the task
** signified by the handle, <tid>.
*/
int tm_kill(
tm_task_id tid, /* in */
int sig, /* in */
tm_event_t *event) /* out */
{
int rc = TM_SUCCESS;
task_info *tp;
struct tcp_chan *chan = NULL;
if (!init_done)
{
rc = TM_BADINIT;
goto tm_kill_cleanup;
}
if ((tp = find_task(tid)) == NULL)
{
rc = TM_ENOTFOUND;
goto tm_kill_cleanup;
}
*event = new_event();
if (startcom(TM_SIGNAL, *event, &chan) != DIS_SUCCESS)
{
rc = TM_ENOTCONNECTED;
goto tm_kill_cleanup;
}
if (diswsi(chan, tp->t_node) != DIS_SUCCESS)
{
rc = TM_ENOTCONNECTED;
goto tm_kill_cleanup;
}
if (diswsi(chan, tid) != DIS_SUCCESS)
{
rc = TM_ENOTCONNECTED;
goto tm_kill_cleanup;
}
if (diswsi(chan, sig) != DIS_SUCCESS)
{
rc = TM_ENOTCONNECTED;
goto tm_kill_cleanup;
}
DIS_tcp_wflush(chan);
add_event(*event, tp->t_node, TM_SIGNAL, NULL);
tm_kill_cleanup:
if (chan != NULL)
DIS_tcp_cleanup(chan);
return rc;
}
/*
** Returns an event that can be used to learn when a task
** dies.
*/
int tm_obit(
tm_task_id tid, /* in */
int *obitval, /* out */
tm_event_t *event) /* out */
{
int rc = TM_SUCCESS;
task_info *tp;
struct tcp_chan *chan = NULL;
if (!init_done)
{
rc = TM_BADINIT;
goto tm_obit_cleanup;
}
if ((tp = find_task(tid)) == NULL)
{
rc = TM_ENOTFOUND;
goto tm_obit_cleanup;
}
*event = new_event();
if (startcom(TM_OBIT, *event, &chan) != DIS_SUCCESS)
{
rc = TM_ESYSTEM;
goto tm_obit_cleanup;
}
if (diswsi(chan, tp->t_node) != DIS_SUCCESS)
{
rc = TM_ESYSTEM;
goto tm_obit_cleanup;
}
if (diswsi(chan, tid) != DIS_SUCCESS)
{
rc = TM_ESYSTEM;
goto tm_obit_cleanup;
}
DIS_tcp_wflush(chan);
add_event(*event, tp->t_node, TM_OBIT, (void *)obitval);
tm_obit_cleanup:
if (chan != NULL)
DIS_tcp_cleanup(chan);
return rc;
}
struct taskhold
{
tm_task_id *list;
int size;
int *ntasks;
};
/*
** Makes a request for the list of tasks on <node>. If <node>
** is a valid node number, it returns the event that the list of
** tasks on <node> is available.
*/
int tm_taskinfo(
tm_node_id node, /* in */
tm_task_id *tid_list, /* out */
int list_size, /* in */
int *ntasks, /* out */
tm_event_t *event) /* out */
{
struct taskhold *thold;
struct tcp_chan *chan = NULL;
if (!init_done)
return TM_BADINIT;
if (tid_list == NULL || list_size == 0 || ntasks == NULL)
return TM_EBADENVIRONMENT;
*event = new_event();
if (startcom(TM_TASKS, *event, &chan) != DIS_SUCCESS)
return TM_ESYSTEM;
if (diswsi(chan, node) != DIS_SUCCESS)
{
DIS_tcp_cleanup(chan);
return TM_ESYSTEM;
}
DIS_tcp_wflush(chan);
DIS_tcp_cleanup(chan);
thold = (struct taskhold *)calloc(1, sizeof(struct taskhold));
assert(thold != NULL);
thold->list = tid_list;
thold->size = list_size;
thold->ntasks = ntasks;
add_event(*event, node, TM_TASKS, (void *)thold);
return TM_SUCCESS;
}
/*
** Returns the job-relative node number that holds or held <tid>. In
** case of an error, it returns TM_ERROR_NODE.
*/
int tm_atnode(
tm_task_id tid, /* in */
tm_node_id *node) /* out */
{
task_info *tp;
if (!init_done)
return TM_BADINIT;
if ((tp = find_task(tid)) == NULL)
return TM_ENOTFOUND;
*node = tp->t_node;
return TM_SUCCESS;
}
struct reschold
{
char *resc;
int len;
};
/*
** Makes a request for a string specifying the resources
** available on <node>. If <node> is a valid node number, it
** returns the event that the string specifying the resources on
** <node> is available. It returns ERROR_EVENT otherwise.
*/
int tm_rescinfo(
tm_node_id node, /* in */
char *resource, /* out */
int len, /* in */
tm_event_t *event) /* out */
{
struct reschold *rhold;
struct tcp_chan *chan = NULL;
if (!init_done)
return TM_BADINIT;
if (resource == NULL || len == 0)
return TM_EBADENVIRONMENT;
*event = new_event();
if (startcom(TM_RESOURCES, *event, &chan) != DIS_SUCCESS)
return TM_ESYSTEM;
if (diswsi(chan, node) != DIS_SUCCESS)
{
DIS_tcp_cleanup(chan);
return TM_ESYSTEM;
}
DIS_tcp_wflush(chan);
DIS_tcp_cleanup(chan);
rhold = (struct reschold *)calloc(1, sizeof(struct reschold));
assert(rhold != NULL);
rhold->resc = resource;
rhold->len = len;
add_event(*event, node, TM_RESOURCES, (void *)rhold);
return TM_SUCCESS;
} /* END tm_rescinfo() */
/*
** Posts the first <nbytes> of a copy of *<info> within MOM on
** this node, and associated with this task. If <info> is
** non-NULL, it returns the event that the effort to post *<info>
** is complete. It returns ERROR_EVENT otherwise.
*/
int tm_publish(
char *name, /* in */
void *info, /* in */
int len, /* in */
tm_event_t *event) /* out */
{
int rc = TM_SUCCESS;
struct tcp_chan *chan = NULL;
if (!init_done)
return TM_BADINIT;
*event = new_event();
if (startcom(TM_POSTINFO, *event, &chan) != DIS_SUCCESS)
return TM_ESYSTEM;
if (diswst(chan, name) != DIS_SUCCESS)
{
rc = TM_ESYSTEM;
goto tm_publish_cleanup;
}
if (diswcs(chan, (char *)info, len) != DIS_SUCCESS)
{
rc = TM_ESYSTEM;
goto tm_publish_cleanup;
}
DIS_tcp_wflush(chan);
add_event(*event, TM_ERROR_NODE, TM_POSTINFO, NULL);
tm_publish_cleanup:
if (chan != NULL)
DIS_tcp_cleanup(chan);
return rc;
} /* tm_publish() */
struct infohold
{
void *info;
int len;
int *info_len;
};
/*
** Makes a request for a copy of the info posted by <tid>. If
** <tid> is a valid task, it returns the event that the
** string specifying the info posted by <tid> is available.
*/
int tm_subscribe(
tm_task_id tid, /* in */
char *name, /* in */
void *info, /* out */
int len, /* in */
int *info_len,/* out */
tm_event_t *event) /* out */
{
int rc = TM_SUCCESS;
task_info *tp;
struct tcp_chan *chan = NULL;
struct infohold *ihold;
if (!init_done)
{
rc = TM_BADINIT;
goto tm_subscribe_cleanup;
}
if ((tp = find_task(tid)) == NULL)
{
rc = TM_ENOTFOUND;
goto tm_subscribe_cleanup;
}
*event = new_event();
if (startcom(TM_GETINFO, *event, &chan) != DIS_SUCCESS)
{
rc = TM_ESYSTEM;
goto tm_subscribe_cleanup;
}
if (diswsi(chan, tp->t_node) != DIS_SUCCESS)
{
rc = TM_ESYSTEM;
goto tm_subscribe_cleanup;
}
if (diswsi(chan, tid) != DIS_SUCCESS)
{
rc = TM_ESYSTEM;
goto tm_subscribe_cleanup;
}
if (diswst(chan, name) != DIS_SUCCESS)
{
rc = TM_ESYSTEM;
goto tm_subscribe_cleanup;
}
DIS_tcp_wflush(chan);
ihold = (struct infohold *)calloc(1, sizeof(struct infohold));
assert(ihold != NULL);
ihold->info = info;
ihold->len = len;
ihold->info_len = info_len;
add_event(*event, tp->t_node, TM_GETINFO, (void *)ihold);
tm_subscribe_cleanup:
if (chan != NULL)
DIS_tcp_cleanup(chan);
return rc;
}
/*
** tm_finalize() - close out task manager interface
**
** This function should be the last one called. It is illegal to call
** any other task manager function following this one. All events are
** freed and any connection to the task manager (pbs_mom) is closed.
** This call is synchronous.
*/
int tm_finalize(void)
{
event_info *e;
int i = 0;
if (!init_done)
return TM_BADINIT;
while (event_count && (i < EVENT_HASH))
{
while ((e = event_hash[i]) != NULL)
{
del_event(e);
}
++i; /* check next slot in hash table */
}
init_done = 0;
return TM_SUCCESS; /* what else */
}
/*
** tm_notify() - set the signal to be sent on event arrival.
*/
int tm_notify(int tm_signal)
{
if (!init_done)
return TM_BADINIT;
return TM_ENOTIMPLEMENTED;
}
/*
** tm_alloc() - make a request for additional resources.
*/
int
tm_alloc(char *resources, tm_event_t *event)
{
if (!init_done)
return TM_BADINIT;
return TM_ENOTIMPLEMENTED;
}
/*
** tm_dealloc() - drop a node from the job.
*/
int
tm_dealloc(tm_node_id node, tm_event_t *event)
{
if (!init_done)
return TM_BADINIT;
return TM_ENOTIMPLEMENTED;
}
/*
** tm_create_event() - create a persistent event.
*/
int
tm_create_event(tm_event_t *event)
{
if (!init_done)
return TM_BADINIT;
return TM_ENOTIMPLEMENTED;
}
/*
** tm_destroy_event() - destroy a persistent event.
*/
int
tm_destroy_event(tm_event_t *event)
{
if (!init_done)
return TM_BADINIT;
return TM_ENOTIMPLEMENTED;
}
/*
** tm_register() - link a persistent event with action requests
** from the task manager.
*/
int
tm_register(tm_whattodo_t *what, tm_event_t *event)
{
if (!init_done)
return TM_BADINIT;
return TM_ENOTIMPLEMENTED;
}
#define FOREVER 2592000
/*
** tm_poll - poll to see if an event has been completed.
**
** If "poll_event" is a valid event handle, see if it is completed;
** else if "poll_event" is the null event, check for the first event that
** is completed.
**
** result_event is set to the completed event or the null event.
**
** If wait is TM_POLL_WAIT (or a non-zero value for compatibility),
** wait for "poll_event" to be completed.
** If wait is TM_POLL_NOWAIT, do not wait for "poll_event" to be completed.
**
** If an error ocurs, set tm_errno non-zero.
*/
int tm_poll(
tm_event_t poll_event,
tm_event_t *result_event,
int wait,
int *tm_errno)
{
int num, i;
int ret, mtype, nnodes;
int prot, protver;
int *obitvalp;
event_info *ep = NULL;
tm_task_id tid, *tidp;
tm_event_t nevent;
tm_node_id node;
char *jobid = NULL;
char *info = NULL;
struct tm_roots *roots;
struct taskhold *thold;
struct infohold *ihold;
struct reschold *rhold;
extern time_t pbs_tcp_timeout;
if (!init_done)
{
return(TM_BADINIT);
}
if (result_event == NULL)
return(TM_EBADENVIRONMENT);
*result_event = TM_ERROR_EVENT;
if (poll_event != TM_NULL_EVENT)
return(TM_ENOTIMPLEMENTED);
if (tm_errno == NULL)
return(TM_EBADENVIRONMENT);
if (event_count == 0)
{
TM_DBPRT(("%s: no events waiting\n",
__func__))
return(TM_ENOTFOUND);
}
if (local_conn < 0)
{
TM_DBPRT(("%s: INTERNAL ERROR %d events but no connection (%d)\n",
__func__, event_count, local_conn))
if (static_chan != NULL)
{
DIS_tcp_cleanup(static_chan);
static_chan = NULL;
}
return(TM_ENOTCONNECTED);
}
if ((static_chan == NULL) && ((static_chan = DIS_tcp_setup(local_conn)) == NULL))
{
TM_DBPRT(("%s: Error allocating memory for sock buffer", __func__))
return TM_BADINIT;
}
/*
** Setup tcp dis routines with a wait value appropriate for
** the value of wait the user set.
*/
pbs_tcp_timeout = (wait == TM_POLL_NOWAIT) ? 1 : FOREVER;
prot = disrsi(static_chan, &ret);
if (ret == DIS_EOD)
{
*result_event = TM_NULL_EVENT;
DIS_tcp_cleanup(static_chan);
static_chan = NULL;
return TM_SUCCESS;
}
else if (ret != DIS_SUCCESS)
{
TM_DBPRT(("%s: protocol number dis error %d\n", __func__, ret))
goto tm_poll_error;
}
if (prot != TM_PROTOCOL)
{
TM_DBPRT(("%s: bad protocol number %d\n", __func__, prot))
goto tm_poll_error;
}
/*
** We have seen the start of a message. Set the timeout value
** so we wait for the remaining data of a message.
*/
pbs_tcp_timeout = FOREVER;
protver = disrsi(static_chan, &ret);
if (ret != DIS_SUCCESS)
{
TM_DBPRT(("%s: protocol version dis error %d\n", __func__, ret))
goto tm_poll_error;
}
if (protver != TM_PROTOCOL_VER)
{
TM_DBPRT(("%s: bad protocol version %d\n", __func__, protver))
goto tm_poll_error;
}
mtype = disrsi(static_chan, &ret);
if (ret != DIS_SUCCESS)
{
TM_DBPRT(("%s: mtype dis error %d\n", __func__, ret))
goto tm_poll_error;
}
nevent = disrsi(static_chan, &ret);
if (ret != DIS_SUCCESS)
{
TM_DBPRT(("%s: event dis error %d\n", __func__, ret))
goto tm_poll_error;
}
*result_event = nevent;
TM_DBPRT(("%s: got event %d return %d\n", __func__, nevent, mtype))
if ((ep = find_event(nevent)) == NULL)
{
TM_DBPRT(("%s: No event found for number %d\n", __func__, nevent));
DIS_tcp_close(static_chan);
static_chan = NULL;
local_conn = -1;
return TM_ENOEVENT;
}
if (mtype == TM_ERROR) /* problem, read error num */
{
*tm_errno = disrsi(static_chan, &ret);
TM_DBPRT(("%s: event %d error %d\n", __func__, nevent, *tm_errno));
goto tm_poll_done;
}
*tm_errno = TM_SUCCESS;
switch (ep->e_mtype)
{
/*
** auxiliary info (
** number of nodes int;
** nodeid[0] int;
** ...
** nodeid[n-1] int;
** parent jobid string;
** parent nodeid int;
** parent taskid int;
** )
*/
case TM_INIT:
nnodes = disrsi(static_chan, &ret);
if (ret != DIS_SUCCESS)
{
TM_DBPRT(("%s: INIT failed nnodes\n", __func__))
goto tm_poll_error;
}
node_table = (tm_node_id *)calloc(nnodes + 1,
sizeof(tm_node_id));
if (node_table == NULL)
{
perror("Memory allocation failed");
goto tm_poll_error;
}
TM_DBPRT(("%s: INIT nodes %d\n", __func__, nnodes))
for (i = 0; i < nnodes; i++)
{
node_table[i] = disrsi(static_chan, &ret);
if (ret != DIS_SUCCESS)
{
TM_DBPRT(("%s: INIT failed nodeid %d\n", __func__, i))
goto tm_poll_error;
}
}
node_table[nnodes] = TM_ERROR_NODE;
jobid = disrst(static_chan, &ret);
if (ret != DIS_SUCCESS)
{
TM_DBPRT(("%s: INIT failed jobid\n", __func__))
goto tm_poll_error;
}
TM_DBPRT(("%s: INIT daddy jobid %s\n", __func__, jobid))
node = disrsi(static_chan, &ret);
if (ret != DIS_SUCCESS)
{
TM_DBPRT(("%s: INIT failed parent nodeid\n", __func__))
goto tm_poll_error;
}
TM_DBPRT(("%s: INIT daddy node %d\n", __func__, node))
tid = disrsi(static_chan, &ret);
if (ret != DIS_SUCCESS)
{
TM_DBPRT(("%s: INIT failed parent taskid\n", __func__))
goto tm_poll_error;
}
TM_DBPRT(("%s: INIT daddy tid %lu\n", __func__, (unsigned long)tid))
roots = (struct tm_roots *)ep->e_info;
roots->tm_parent = new_task(jobid, node, tid);
roots->tm_me = new_task(tm_jobid,
tm_jobndid,
tm_jobtid);
roots->tm_nnodes = nnodes;
roots->tm_ntasks = 0; /* TODO */
roots->tm_taskpoolid = -1; /* what? */
roots->tm_tasklist = NULL; /* TODO */
break;
case TM_TASKS:
thold = (struct taskhold *)ep->e_info;
tidp = thold->list;
num = thold->size;
for (i = 0;; i++)
{
tid = disrsi(static_chan, &ret);
if (tid == TM_NULL_TASK)
break;
if (ret != DIS_SUCCESS)
goto tm_poll_error;
if (i < num)
{
tidp[i] = new_task(tm_jobid,
ep->e_node, tid);
}
}
if (i < num)
tidp[i] = TM_NULL_TASK;
*(thold->ntasks) = i;
break;
case TM_SPAWN:
tid = disrsi(static_chan, &ret);
if (ret != DIS_SUCCESS)
{
TM_DBPRT(("%s: SPAWN failed tid\n", __func__))
goto tm_poll_error;
}
tidp = (tm_task_id *)ep->e_info;
*tidp = new_task(tm_jobid, ep->e_node, tid);
break;
case TM_SIGNAL:
break;
case TM_OBIT:
obitvalp = (int *)ep->e_info;
*obitvalp = disrsi(static_chan, &ret);
if (ret != DIS_SUCCESS)
{
TM_DBPRT(("%s: OBIT failed obitval\n", __func__))
goto tm_poll_error;
}
break;
case TM_POSTINFO:
break;
case TM_GETINFO:
ihold = (struct infohold *)ep->e_info;
info = disrcs(static_chan, (size_t *)ihold->info_len, &ret);
if (ret != DIS_SUCCESS)
{
if (info != NULL)
free(info);
TM_DBPRT(("%s: GETINFO failed info\n", __func__))
break;
}
memcpy(ihold->info, info, MIN(*ihold->info_len, ihold->len));
free(info);
break;
case TM_RESOURCES:
rhold = (struct reschold *)ep->e_info;
info = disrst(static_chan, &ret);
if (ret != DIS_SUCCESS)
{
if (info != NULL)
free(info);
break;
}
snprintf(rhold->resc, rhold->len, "%s", info);
free(info);
break;
default:
TM_DBPRT(("%s: unknown event command %d\n", __func__, ep->e_mtype))
goto tm_poll_error;
}
DIS_tcp_wflush(static_chan);
tm_poll_done:
if (jobid != NULL)
free(jobid);
del_event(ep);
if (tcp_chan_has_data(static_chan) == FALSE)
{
DIS_tcp_cleanup(static_chan);
static_chan = NULL;
}
return TM_SUCCESS;
tm_poll_error:
if (jobid != NULL)
free(jobid);
if (ep)
del_event(ep);
close(local_conn);
DIS_tcp_cleanup(static_chan);
static_chan = NULL;
local_conn = -1;
return TM_ENOTCONNECTED;
}
/*
* tm_adopt() --
*
* When PBS is used in conjuction with an alternative (MPI) task
* spawning/management system (AMS) (like Quadrics RMS or SGI array
* services), only the script task on the mother superior node will
* be parented by (or even known to) a PBS MOM. Unless the AMS is
* PBS-(tm-)aware, all other tasks will be parented (and to varying
* extents managed) by the AMS. This means that PBS cannot track
* task resource usage (unless the AMS provides such info) nor
* manage (suspend, resume, signal, clean up, ...) the task (unless
* the AMS provides such functionality). For example pvmrun and
* some mpiruns simply use rsh to start remote processes - no AMS
* tracking or management facilities are available.
*
* This function allows any task (session) owned by the owner
* of the job to be adopted into a PBS job. It is used by:
* - "adopter" (which is in turn used by our pvmrun)
* - our rmsloader wrapper (a home-brew replacement for RMS'
* rmsloader that does some work and then exec()s the real
* rmsloader) to tell PBS to adopt its session id (which
* (hopefully) is also the session id for all its child
* processes).
* - anumpirun on SGI Altix systems
*
* Call this instead of tm_init() to ask the local pbs_mom to
* adopt a session (i.e. create a new task corresponding to the
* session id). Note that this may subvert all of the cookie stuff
* in PBS as the AMS task starter may not have any PBS cookie info
* (eg rmsloader)
*
* Arguments:
* char *id AMS altid (eg RMS resource id) or PBS_JOBID
* (depending on adoptCmd) of the job that will adopt
* sid. This is how pbs_mom works out which job will
* adopt the sid.
* int adoptCmd either TM_ADOPT_JOBID or TM_ADOPT_ALTID if task
* id is AMS altid
* pid_t pid process id of process to be adopted (always self?)
*
* Assumption:
* If TM_ADOPT_ALTID is used to identify tasks to be adopted, PBS
* must be configured to work with one and only one alternative task
* spawning/management system that uses it own task identifiers.
*
* Result:
* Returns TM_SUCCESS if the session was successfully adopted by
* the mom. Returns TM_ENOTFOUND if the mom couldn't find a job
* with the given RMS resource id. Returns TM_ESYSTEM or
* TM_ENOTCONNECTED if there was some sort of comms error talking
* to the mom. Returns TM_EPERM if an attempt was made to adopt
* a session not owned by the owner of the job.
*
* Side effects:
* Sets the tm_* globals to fake values if tm_init() has never
* been called. This mainly just prevents segfaults etc when
* these values are written to local_conn - the mom ignores most
* of them for this special adopt case
*
*/
int tm_adopt(
char *id,
int adoptCmd,
pid_t pid)
{
int rc = TM_SUCCESS;
int status, ret;
pid_t sid;
char *env;
struct tcp_chan *chan = NULL;
sid = getsid(pid);
/* do not adopt a sid not owned by caller */
if (!ispidowner(sid))
return(TM_EPERM);
/* Must be the only call to call to tm and
must only be called once */
if (init_done) return TM_BADINIT;
init_done = 1;
/* Fabricate the tm state as best we can - not really needed */
if ((tm_jobid = getenv("PBS_JOBID")) == NULL)
tm_jobid = (char *)"ADOPT JOB";
tm_jobid_len = strlen(tm_jobid);
if ((tm_jobcookie = getenv("PBS_JOBCOOKIE")) == NULL)
tm_jobcookie = (char *)"ADOPT COOKIE";
tm_jobcookie_len = strlen(tm_jobcookie);
/* We dont have the (right) node id or task id */
tm_jobndid = 0;
tm_jobtid = 0;
/* Fallback is system default MOM port if not known */
if ((env = getenv("PBS_MOMPORT")) == NULL || (tm_momport = atoi(env)) == 0)
tm_momport = PBS_MANAGER_SERVICE_PORT;
/* DJH 27 Feb 2002. two kinds of adoption now */
if (adoptCmd != TM_ADOPT_ALTID && adoptCmd != TM_ADOPT_JOBID)
return TM_EUNKNOWNCMD;
if (startcom(adoptCmd, TM_NULL_EVENT, &chan) != DIS_SUCCESS)
return TM_ESYSTEM;
/* send session id */
if (diswsi(chan, sid) != DIS_SUCCESS)
{
rc = TM_ENOTCONNECTED;
goto tm_adopt_cleanup;
}
/* write the pid so the adopted process can be part of the cpuset if needed */
if (diswsi(chan, pid) != DIS_SUCCESS)
{
rc = TM_ENOTCONNECTED;
goto tm_adopt_cleanup;
}
/* send job or alternative id */
if (diswcs(chan, id, strlen(id)) != DIS_SUCCESS)
{
rc = TM_ENOTCONNECTED;
goto tm_adopt_cleanup;
}
DIS_tcp_wflush(chan);
/* The mom should now attempt to adopt the task and will send back a
status flag to indicate whether it was successful or not. */
status = disrsi(chan, &ret);
if (ret != DIS_SUCCESS)
{
rc = TM_ENOTCONNECTED;
goto tm_adopt_cleanup;
}
/* Don't allow any more tm_* calls in this process. As well as
closing an unused socket it also prevents any problems related to
the fact that all adopted processes have a fake task id which
might break the tm mechanism */
tm_finalize();
/* Since we're not using events, tm_finalize won't actually
close the socket, so do it here. */
if (local_conn > -1)
{
close(local_conn);
local_conn = -1;
}
DIS_tcp_cleanup(chan);
return (status == TM_OKAY ?
TM_SUCCESS :
TM_ENOTFOUND);
tm_adopt_cleanup:
if (chan != NULL)
DIS_tcp_cleanup(chan);
return rc;
}
#ifdef __cplusplus
}
#endif
| {
"pile_set_name": "Github"
} |
require_relative 'processor_component'
require_relative 'target_resource'
module StructCore
module Processor
class TargetResourcesComponent
include ProcessorComponent
def initialize(structure, working_directory, resource_component = nil)
super(structure, working_directory)
@resource_component = resource_component
@resource_component ||= TargetResourceComponent.new(@structure, @working_directory)
end
def process(target, target_dsl = nil, dsl = nil)
output = []
output = process_xc_resources target if structure == :spec
output = process_spec_resources target, target_dsl, dsl if structure == :xcodeproj && !target_dsl.nil? && !dsl.nil?
output
end
# @param target [Xcodeproj::Project::Object::PBXNativeTarget]
def process_xc_resources(target)
target.resources_build_phase.files.select { |f|
!f.file_ref.name.nil? && f.file_ref.name.end_with?('.storyboard', '.strings', '.stringsdict')
}.map { |ref|
@resource_component.process ref.file_ref
}.compact.uniq
end
# @param target [StructCore::Specfile::Target]
# @param target_dsl [Xcodeproj::Project::Object::PBXNativeTarget]
# @param dsl [Xcodeproj::Project]
def process_spec_resources(target, target_dsl, dsl)
target.res_dir.select { |res_dir|
lfiles = Dir.glob(File.join(res_dir, '*.lproj', '**', '*'))
next if lfiles.empty?
resource_group = create_resource_group target, dsl
# Create a virtual path since lproj files go through a layer of indirection before hitting the filesystem
lproj_variant_files = map_lproj_entries lfiles, res_dir
lproj_variant_files.each { |lproj_file|
variant_group = resource_group.new_variant_group(lproj_file, res_dir, '<group>')
# Add all lproj files to the variant group
Dir.glob(File.join(res_dir, '*.lproj', lproj_file)).each { |file|
@resource_component.process file, target_dsl, variant_group
}
}
}
end
def create_resource_group(target, dsl)
resource_group = dsl.groups.find { |group| group.display_name == "$lang:#{target.name}" }
return resource_group unless resource_group.nil?
resource_group = dsl.new_group("$lang:#{target.name}", nil, '<group>')
resource_group.source_tree = 'SOURCE_ROOT'
resource_group
end
def map_lproj_entries(lfiles, res_dir)
lproj_variant_files = []
lfiles.map { |lfile|
new_lfile = lfile.sub(res_dir, '')
new_lfile = new_lfile.slice(1, new_lfile.length) if new_lfile.start_with? '/'
next new_lfile
}.each { |lfile|
lfile_components = lfile.split('/')
lfile_lproj_idx = lfile_components.index { |component|
component.include? '.lproj'
}
lfile_variant_components = []
lfile_variant_components.push(*lfile_components)
lfile_variant_components.shift(lfile_lproj_idx + 1)
lfile_variant_path = lfile_variant_components.join('/')
unless lproj_variant_files.include? lfile_variant_path
lproj_variant_files << lfile_variant_path
end
}
lproj_variant_files
end
private :create_resource_group
private :map_lproj_entries
end
end
end | {
"pile_set_name": "Github"
} |
// Copyright Aleksey Gurtovoy 2002-2004
//
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
//
// Preprocessed version of "boost/mpl/unpack_args.hpp" header
// -- DO NOT modify by hand!
namespace boost { namespace mpl {
namespace aux {
template< int size, typename F, typename Args >
struct unpack_args_impl;
template< typename F, typename Args >
struct unpack_args_impl< 0,F,Args >
: apply0<
F
>
{
};
template< typename F, typename Args >
struct unpack_args_impl< 1,F,Args >
: apply1<
F
, typename at_c< Args,0 >::type
>
{
};
template< typename F, typename Args >
struct unpack_args_impl< 2,F,Args >
: apply2<
F
, typename at_c< Args,0 >::type, typename at_c< Args,1 >::type
>
{
};
template< typename F, typename Args >
struct unpack_args_impl< 3,F,Args >
: apply3<
F
, typename at_c< Args,0 >::type, typename at_c< Args,1 >::type
, typename at_c< Args,2 >::type
>
{
};
template< typename F, typename Args >
struct unpack_args_impl< 4,F,Args >
: apply4<
F
, typename at_c< Args,0 >::type, typename at_c< Args,1 >::type
, typename at_c< Args,2 >::type, typename at_c< Args,3 >::type
>
{
};
template< typename F, typename Args >
struct unpack_args_impl< 5,F,Args >
: apply5<
F
, typename at_c< Args,0 >::type, typename at_c< Args,1 >::type
, typename at_c< Args,2 >::type, typename at_c< Args,3 >::type
, typename at_c< Args,4 >::type
>
{
};
}
template<
typename F
>
struct unpack_args
{
template< typename Args > struct apply
: aux::unpack_args_impl< size<Args>::value,F, Args >
{
};
};
BOOST_MPL_AUX_PASS_THROUGH_LAMBDA_SPEC(1, unpack_args)
}}
| {
"pile_set_name": "Github"
} |
version: 1
n_points: 4
{
767.584 516.866
767.584 1464.025
1742.601 1464.025
1742.601 516.866
}
| {
"pile_set_name": "Github"
} |
--TEST--
Test copy() function: usage variations - identical names
--FILE--
<?php
/* Prototype: bool copy ( string $source, string $dest );
Description: Makes a copy of the file source to dest.
Returns TRUE on success or FALSE on failure.
*/
/* Test copy(): Try copying source file to desntination file, where destination file name is identical to source name */
$file_path = dirname(__FILE__);
echo "*** Test copy(): Trying to create a copy of file with the same source name ***\n";
$file = $file_path."/copy_variation10.tmp";
$file_handle = fopen($file, "w");
fwrite($file_handle, str_repeat(b"Hello2world...\n", 100));
fclose($file_handle);
var_dump( copy($file, $file) );
var_dump( file_exists($file) );
var_dump( filesize($file) );
echo "*** Done ***\n";
?>
--CLEAN--
<?php
unlink(dirname(__FILE__)."/copy_variation10.tmp");
?>
--EXPECTF--
*** Test copy(): Trying to create a copy of file with the same source name ***
bool(false)
bool(true)
int(1500)
*** Done ***
| {
"pile_set_name": "Github"
} |
<!DOCTYPE html>
<html lang="zh-CN">
<head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1">
<link rel="stylesheet" href="/blog/numerGrow/dist/css/bootstrap.css"/>
<title>数字效果</title>
<style type="text/css">
html,body,.body {
height: 100%;
}
.body .bc {
text-align: center;
margin: 200px auto;
width: 300px;
font-size: 40px;
}
.stats {
padding: 0;
list-style: none;
text-align: center;
margin: 0 auto;
background-color: #000;
color: #fff;
}
.stats li {
display: inline-block;
font-size: 40px;
padding: 20px;
width: 200px;
text-align: center;
}
.stats li + li {
border-left: 1px solid #ccc;
}
</style>
</head>
<body>
<div class="body">
<div class="bc">往下滚动</div>
</div>
<ul class="stats">
<li data-ride="numberGrow" data-value="3478" data-time="2">0</li>
<li data-ride="numberGrow" data-value="22767" data-time="2">0</li>
<li data-ride="numberGrow" data-value="782349" data-time="2">0</li>
<li data-ride="numberGrow" data-value="6970274" data-time="2">0</li>
<li data-ride="numberGrow" data-value="97712345" data-time="2">0</li>
</ul>
<script src="/blog/numerGrow/dist/js/lib/sea.js"></script>
<script src="/blog/numerGrow/dist/js/common.js"></script>
<script>
seajs.use('/blog/numerGrow/dist/js/app/demo2.js');
</script>
</body>
</html> | {
"pile_set_name": "Github"
} |
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
// An implementation of Invalidator that wraps an invalidation
// client. Handles the details of connecting to XMPP and hooking it
// up to the invalidation client.
//
// You probably don't want to use this directly; use
// NonBlockingInvalidator.
#ifndef COMPONENTS_INVALIDATION_IMPL_INVALIDATION_NOTIFIER_H_
#define COMPONENTS_INVALIDATION_IMPL_INVALIDATION_NOTIFIER_H_
#include <memory>
#include <string>
#include "base/compiler_specific.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "base/memory/weak_ptr.h"
#include "base/sequence_checker.h"
#include "base/sequenced_task_runner.h"
#include "base/single_thread_task_runner.h"
#include "components/invalidation/impl/invalidation_state_tracker.h"
#include "components/invalidation/impl/invalidator.h"
#include "components/invalidation/impl/invalidator_registrar.h"
#include "components/invalidation/impl/sync_invalidation_listener.h"
#include "components/invalidation/public/invalidation_export.h"
namespace syncer {
// This class must live on the IO thread.
class INVALIDATION_EXPORT InvalidationNotifier
: public Invalidator,
public SyncInvalidationListener::Delegate {
public:
// |invalidation_state_tracker| must be initialized.
InvalidationNotifier(
std::unique_ptr<SyncNetworkChannel> network_channel,
const std::string& invalidator_client_id,
const UnackedInvalidationsMap& saved_invalidations,
const std::string& invalidation_bootstrap_data,
const base::WeakPtr<InvalidationStateTracker>& invalidation_state_tracker,
scoped_refptr<base::SingleThreadTaskRunner>
invalidation_state_tracker_task_runner,
const std::string& client_info);
~InvalidationNotifier() override;
// Invalidator implementation.
void RegisterHandler(InvalidationHandler* handler) override;
bool UpdateRegisteredIds(InvalidationHandler* handler,
const ObjectIdSet& ids) override;
void UnregisterHandler(InvalidationHandler* handler) override;
InvalidatorState GetInvalidatorState() const override;
void UpdateCredentials(const std::string& email,
const std::string& token) override;
void RequestDetailedStatus(base::Callback<void(const base::DictionaryValue&)>
callback) const override;
// SyncInvalidationListener::Delegate implementation.
void OnInvalidate(const ObjectIdInvalidationMap& invalidation_map) override;
void OnInvalidatorStateChange(InvalidatorState state) override;
private:
// We start off in the STOPPED state. When we get our initial
// credentials, we connect and move to the CONNECTING state. When
// we're connected we start the invalidation client and move to the
// STARTED state. We never go back to a previous state.
enum State {
STOPPED,
CONNECTING,
STARTED
};
State state_;
InvalidatorRegistrar registrar_;
// Passed to |invalidation_listener_|.
const UnackedInvalidationsMap saved_invalidations_;
// Passed to |invalidation_listener_|.
const base::WeakPtr<InvalidationStateTracker> invalidation_state_tracker_;
scoped_refptr<base::SequencedTaskRunner>
invalidation_state_tracker_task_runner_;
// Passed to |invalidation_listener_|.
const std::string client_info_;
// The client ID to pass to |invalidation_listener_|.
const std::string invalidator_client_id_;
// The initial bootstrap data to pass to |invalidation_listener_|.
const std::string invalidation_bootstrap_data_;
// The invalidation listener.
SyncInvalidationListener invalidation_listener_;
SEQUENCE_CHECKER(sequence_checker_);
DISALLOW_COPY_AND_ASSIGN(InvalidationNotifier);
};
} // namespace syncer
#endif // COMPONENTS_INVALIDATION_IMPL_INVALIDATION_NOTIFIER_H_
| {
"pile_set_name": "Github"
} |
python ./gen-records.py --input ./mount/data/ai2018/sentiment/train.csv
| {
"pile_set_name": "Github"
} |
/*
* Copyright 2020 Confluent Inc.
*
* Licensed under the Confluent Community License (the "License"; you may not use
* this file except in compliance with the License. You may obtain a copy of the
* License at
*
* http://www.confluent.io/confluent-community-license
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package io.confluent.ksql.parser.tree;
import com.google.errorprone.annotations.Immutable;
import io.confluent.ksql.parser.NodeLocation;
import java.util.Objects;
import java.util.Optional;
@Immutable
public class JoinedSource extends Relation {
private final Relation relation;
private final Type type;
private final JoinCriteria criteria;
private final Optional<WithinExpression> withinExpression;
public JoinedSource(
final Optional<NodeLocation> location,
final Relation relation,
final Type type,
final JoinCriteria criteria,
final Optional<WithinExpression> withinExpression
) {
super(location);
this.relation = Objects.requireNonNull(relation, "relation");
this.type = Objects.requireNonNull(type, "type");
this.criteria = Objects.requireNonNull(criteria, "criteria");
this.withinExpression = Objects.requireNonNull(withinExpression, "withinExpression");
}
public Relation getRelation() {
return relation;
}
public Type getType() {
return type;
}
public JoinCriteria getCriteria() {
return criteria;
}
public Optional<WithinExpression> getWithinExpression() {
return withinExpression;
}
@Override
public <R, C> R accept(final AstVisitor<R, C> visitor, final C context) {
return visitor.visitJoinedSource(this, context);
}
@Override
public boolean equals(final Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
final JoinedSource that = (JoinedSource) o;
return Objects.equals(relation, that.relation)
&& Objects.equals(type, that.type)
&& Objects.equals(criteria, that.criteria)
&& Objects.equals(withinExpression, that.withinExpression);
}
@Override
public int hashCode() {
return Objects.hash(relation, type, criteria, withinExpression);
}
@Override
public String toString() {
return "JoinedSource{"
+ "relation=" + relation
+ ", type=" + type
+ ", criteria=" + criteria
+ ", withinExpression=" + withinExpression
+ '}';
}
public enum Type {
INNER("INNER"), LEFT("LEFT OUTER"), OUTER("FULL OUTER");
private final String formattedText;
Type(final String formattedText) {
this.formattedText = Objects.requireNonNull(formattedText, "formattedText");
}
public String getFormatted() {
return formattedText;
}
}
} | {
"pile_set_name": "Github"
} |
Page({
data: {
array:["中国","美国","巴西","日本"],
index:0,
date:"2016-09-01",
time:"12:01"
},
bindPickerChange: function(e) {
console.log('picker发送选择改变,携带值为', e.detail.value)
this.setData({
index: e.detail.value
})
},
bindDateChange:function(e){
this.setData({
date:e.detail.value
})
},
bindTimeChange:function(e){
this.setData({
time:e.detail.value
})
}
})
| {
"pile_set_name": "Github"
} |
'use strict';
module.exports = function (t, a) {
var o = new Date(), o2;
o2 = t.call(o);
a.not(o, o2, "Different objects");
a.ok(o2 instanceof Date, "Instance of Date");
a(o.getTime(), o2.getTime(), "Same time");
};
| {
"pile_set_name": "Github"
} |
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !plan9,!windows
package ipv4
import (
"net"
"syscall"
)
// ReadFrom reads a payload of the received IPv4 datagram, from the
// endpoint c, copying the payload into b. It returns the number of
// bytes copied into b, the control message cm and the source address
// src of the received datagram.
func (c *payloadHandler) ReadFrom(b []byte) (n int, cm *ControlMessage, src net.Addr, err error) {
if !c.ok() {
return 0, nil, nil, syscall.EINVAL
}
oob := newControlMessage(&c.rawOpt)
var oobn int
switch c := c.PacketConn.(type) {
case *net.UDPConn:
if n, oobn, _, src, err = c.ReadMsgUDP(b, oob); err != nil {
return 0, nil, nil, err
}
case *net.IPConn:
if sockOpts[ssoStripHeader].name > 0 {
if n, oobn, _, src, err = c.ReadMsgIP(b, oob); err != nil {
return 0, nil, nil, err
}
} else {
nb := make([]byte, maxHeaderLen+len(b))
if n, oobn, _, src, err = c.ReadMsgIP(nb, oob); err != nil {
return 0, nil, nil, err
}
hdrlen := int(nb[0]&0x0f) << 2
copy(b, nb[hdrlen:])
n -= hdrlen
}
default:
return 0, nil, nil, errInvalidConnType
}
if cm, err = parseControlMessage(oob[:oobn]); err != nil {
return 0, nil, nil, err
}
if cm != nil {
cm.Src = netAddrToIP4(src)
}
return
}
// WriteTo writes a payload of the IPv4 datagram, to the destination
// address dst through the endpoint c, copying the payload from b. It
// returns the number of bytes written. The control message cm allows
// the datagram path and the outgoing interface to be specified.
// Currently only Darwin and Linux support this. The cm may be nil if
// control of the outgoing datagram is not required.
func (c *payloadHandler) WriteTo(b []byte, cm *ControlMessage, dst net.Addr) (n int, err error) {
if !c.ok() {
return 0, syscall.EINVAL
}
oob := marshalControlMessage(cm)
if dst == nil {
return 0, errMissingAddress
}
switch c := c.PacketConn.(type) {
case *net.UDPConn:
n, _, err = c.WriteMsgUDP(b, oob, dst.(*net.UDPAddr))
case *net.IPConn:
n, _, err = c.WriteMsgIP(b, oob, dst.(*net.IPAddr))
default:
return 0, errInvalidConnType
}
if err != nil {
return 0, err
}
return
}
| {
"pile_set_name": "Github"
} |
<?php
// Copyright (c) ppy Pty Ltd <[email protected]>. Licensed under the GNU Affero General Public License v3.0.
// See the LICENCE file in the repository root for full licence text.
use Illuminate\Database\Migrations\Migration;
use Illuminate\Database\Schema\Blueprint;
use Illuminate\Support\Facades\Schema;
class AddDiscussionLockedToBeatmapsets extends Migration
{
/**
* Run the migrations.
*
* @return void
*/
public function up()
{
Schema::table('osu_beatmapsets', function (Blueprint $table) {
$table->boolean('discussion_locked')->after('discussion_enabled')->default(0);
});
}
/**
* Reverse the migrations.
*
* @return void
*/
public function down()
{
Schema::table('osu_beatmapsets', function (Blueprint $table) {
$table->dropColumn('discussion_locked');
});
}
}
| {
"pile_set_name": "Github"
} |
# /* **************************************************************************
# * *
# * (C) Copyright Paul Mensonides 2002.
# * Distributed under the Boost Software License, Version 1.0. (See
# * accompanying file LICENSE_1_0.txt or copy at
# * http://www.boost.org/LICENSE_1_0.txt)
# * *
# ************************************************************************** */
#
# /* See http://www.boost.org for most recent version. */
#
# ifndef BOOST_PREPROCESSOR_PUNCTUATION_PAREN_IF_HPP
# define BOOST_PREPROCESSOR_PUNCTUATION_PAREN_IF_HPP
#
# include <boost/preprocessor/config/config.hpp>
# include <boost/preprocessor/control/if.hpp>
# include <boost/preprocessor/facilities/empty.hpp>
# include <boost/preprocessor/punctuation/paren.hpp>
#
# /* BOOST_PP_LPAREN_IF */
#
# if ~BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_EDG()
# define BOOST_PP_LPAREN_IF(cond) BOOST_PP_IF(cond, BOOST_PP_LPAREN, BOOST_PP_EMPTY)()
# else
# define BOOST_PP_LPAREN_IF(cond) BOOST_PP_LPAREN_IF_I(cond)
# define BOOST_PP_LPAREN_IF_I(cond) BOOST_PP_IF(cond, BOOST_PP_LPAREN, BOOST_PP_EMPTY)()
# endif
#
# /* BOOST_PP_RPAREN_IF */
#
# if ~BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_EDG()
# define BOOST_PP_RPAREN_IF(cond) BOOST_PP_IF(cond, BOOST_PP_RPAREN, BOOST_PP_EMPTY)()
# else
# define BOOST_PP_RPAREN_IF(cond) BOOST_PP_RPAREN_IF_I(cond)
# define BOOST_PP_RPAREN_IF_I(cond) BOOST_PP_IF(cond, BOOST_PP_RPAREN, BOOST_PP_EMPTY)()
# endif
#
# endif
| {
"pile_set_name": "Github"
} |
# $FreeBSD: src/usr.bin/rup/Makefile,v 1.4.2.1 2001/04/25 11:29:37 ru Exp $
PROG= rup
DPADD= ${LIBRPCSVC}
LDADD= -lrpcsvc
NO_WCAST_FUNCTION_TYPE=
.include <bsd.prog.mk>
| {
"pile_set_name": "Github"
} |
<annotation>
<folder>imagesRaw</folder>
<filename>2017-12-15 19:55:44.056336.jpg</filename>
<path>/Users/abell/Development/other.nyc/Camera/imagesRaw/2017-12-15 19:55:44.056336.jpg</path>
<source>
<database>Unknown</database>
</source>
<size>
<width>352</width>
<height>240</height>
<depth>3</depth>
</size>
<segmented>0</segmented>
<object>
<name>bus</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<bndbox>
<xmin>18</xmin>
<ymin>147</ymin>
<xmax>104</xmax>
<ymax>219</ymax>
</bndbox>
</object>
<object>
<name>car</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<bndbox>
<xmin>207</xmin>
<ymin>156</ymin>
<xmax>227</xmax>
<ymax>169</ymax>
</bndbox>
</object>
<object>
<name>car</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<bndbox>
<xmin>204</xmin>
<ymin>168</ymin>
<xmax>235</xmax>
<ymax>186</ymax>
</bndbox>
</object>
<object>
<name>car</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<bndbox>
<xmin>158</xmin>
<ymin>141</ymin>
<xmax>172</xmax>
<ymax>148</ymax>
</bndbox>
</object>
<object>
<name>car</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<bndbox>
<xmin>158</xmin>
<ymin>134</ymin>
<xmax>171</xmax>
<ymax>140</ymax>
</bndbox>
</object>
</annotation>
| {
"pile_set_name": "Github"
} |
// *********************************************************************
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License
// *********************************************************************
using DataX.Contract;
using DataX.Flow.Common;
using DataX.Flow.Common.Models;
using Newtonsoft.Json;
using System;
using System.Collections.Generic;
using System.Net.Http;
using System.Net.Http.Headers;
using System.Text;
using System.Threading.Tasks;
using Microsoft.Extensions.Logging;
namespace DataX.Flow.InteractiveQuery.HDInsight
{
/// <summary>
/// Implementation of KernelService for HDInsight stack. KernelService creates and destroys kernels and maintains the life cycle of kernels.
/// </summary>
public class HDInsightKernelService : KernelService
{
private readonly string _username = string.Empty;
private readonly string _password = string.Empty;
private readonly string _token = string.Empty;
private string _baseUrl = "https://$name.azurehdinsight.net/jupyter";
/// <summary>
/// Constructor for HDInisghtKernelService
/// </summary>
/// <param name="flowConfig">Config of floe</param>
/// <param name="connectionInfo">Spark connection info</param>
/// <param name="logger">Logger for errors/results</param>
public HDInsightKernelService(FlowConfigObject flowConfig, SparkConnectionInfo connectionInfo, ILogger logger) : base(flowConfig, connectionInfo, logger)
{
_baseUrl = _baseUrl.Replace("$name", flowConfig.JobURLBase);
_username = connectionInfo.UserName;
_password = connectionInfo.Password;
_token = Base64Encode($"{_username}:{_password}");
}
/// <summary>
/// CreateKernelAsync - calls into the Rest api for creating the kernel
/// </summary>
/// <returns>ApiResult which contains kernelid</returns>
public override async Task<ApiResult> CreateKernelAsync()
{
try
{
// Set body
string body = "{\"name\":\"sparkkernel\"}";
var content = new StringContent(body);
// Call service
HttpClient client = GetHttpClient();
var response = await client.PostAsync($"{_baseUrl}/api/kernels", content);
var responseString = await response.Content.ReadAsStringAsync();
string id = JsonConvert.DeserializeObject<CreateHDIKernelResponse>(responseString).Id;
client.Dispose();
return ApiResult.CreateSuccess(id);
}
catch (Exception ex)
{
Logger.LogError(ex, ex.Message);
return ApiResult.CreateError(ex.ToString());
}
}
/// <summary>
/// This is called for deleting the kernel by directly calling into the Rest Api's provided by the jupyter kernel
/// </summary>
/// <param name="kernelId">KernelId</param>
/// <returns>Returns success or error as the case maybe as ApiResult</returns>
public override async Task<ApiResult> DeleteKernelAsync(string kernelId)
{
try
{
if (string.IsNullOrEmpty(kernelId))
{
return ApiResult.CreateSuccess("Success");
}
HttpClient client = GetHttpClient();
var response = await client.DeleteAsync($"{_baseUrl}/api/kernels/{kernelId}");
client.Dispose();
if (response.IsSuccessStatusCode)
{
return ApiResult.CreateSuccess("Success");
}
else
{
var result = await response.Content.ReadAsStringAsync();
return ApiResult.CreateError(result);
}
}
catch (Exception ex)
{
return ApiResult.CreateError(ex.ToString());
}
}
/// <summary>
/// Gets the kernel of the desired kernelId
/// </summary>
/// <param name="kernelId">Id of kernel</param>
/// <returns>Kernel for HDInsight</returns>
public override IKernel GetKernel(string kernelId)
{
Dictionary<string, string> hs = new Dictionary<string, string>
{
{ "Authorization", $"Basic {_token}" }
};
HDInsightKernel kernel = new HDInsightKernel(kernelId, _baseUrl, null, null, null, hs);
return kernel;
}
/// <summary>
/// Creates HttpClient with the right headers
/// </summary>
/// <returns>HttpClient to communicate with the Spark HDInsight cluster</returns>
private HttpClient GetHttpClient()
{
HttpClient client = new HttpClient();
client.DefaultRequestHeaders.Authorization = new AuthenticationHeaderValue("Basic", _token);
client.DefaultRequestHeaders.Accept.Add(new MediaTypeWithQualityHeaderValue("application/json"));//ACCEPT header
return client;
}
}
/// <summary>
/// This is the object that is returned from the Kernel. This helps extract out the kernelID
/// </summary>
public class CreateHDIKernelResponse
{
public string Id { get; set; }
public string Name { get; set; }
}
}
| {
"pile_set_name": "Github"
} |
---
title: Google Secret Manager with Firebase
lastmod: 2020-04-15T13:08:29-07:00
publishdate: 2020-04-15T13:08:29-07:00
author: Jeff Delaney
draft: false
description: Use Secret Manager to handle sensitive data in Firebase Cloud Functions
tags:
- firebase
- cloud-functions
- gcp
- pro
pro: true
# youtube:
# github:
vimeo: 408508546
pro: true
# disable_toc: true
# disable_qna: true
# courses
# step: 0
# versions:
# rxdart: 0.20
---
[Secret Manager](https://cloud.google.com/secret-manager/) is a new service on Google Cloud that allows us to store and version sensitive data like private API keys and passwords, then access it from any cloud service. It provides encryption, audit logging, versioning, and IAM roles for secure fine-grained control over sensitive information.
A common use-case for a Firebase apps is the management of secret API keys in a Cloud Function. The following lesson will teach you how add secrets via the Google Cloud console, then read them from a Firebase Cloud Function with Node.js.
## Secret Manager OR Functions Environment Variable?
Should you use Secret Manager OR a [Cloud Functions Environment Variable](https://cloud.google.com/functions/docs/env-var)? It is possible to set environment variables in Firebase by running a command like this:
```text
firebase functions:config:set someservice.key="mysecret"
```
This works great most of the time, but has a few drawbacks...
- requires all cloud functions to re-deploy when changed
- can't be shared with other services
- can't be versioned
Secret Manager solves all of these problems, BUT is also has some drawbacks of it's own:
- more work to setup initially
- secrets retrieved asynchronously, so my incur a performance penalty
Those are the tradeoffs. Cloud Functions env vars are great for most situations, but Secret Manager is nice when dealing the dynamic secrets shared across multiple Google services.
## Create a Secret
### Enable the Secret Manager API
First, enable the Secret Manager API for your project from the Google Cloud console.
{{< figure src="img/enable-secret-manager.png" caption="Enable the Secret Manager API" >}}
### Add a Secret from the Console
From the GCP console, navigate to *Security >> Secret Manager*. Add a new secret and make note of its name.
{{< figure src="img/create-secret.png" caption="Create a secret from the console" >}}
### Grant Cloud Functions the required IAM role
By default, only the primary Google Cloud admin account can read/write secrets. In order to give the Cloud Functions runtime access, find the **App Engine default service account** member in the IAM tab and edit its roles to include the **secretmanager.secretAccessor** role.
{{< figure src="img/iam-cloud-functions.png" caption="Add Secret Accessor role to the App Engine default service account" >}}
## Read Secrets in Cloud Functions
Perform the following steps from your Cloud Functions environment. This example uses Node.js and TypeScript.
### Installation
{{< file "terminal" "command line" >}}
```text
npm install @google-cloud/secret-manager
```
This package requires Node 10. Update the engine value to enable the Node 10 runtime.
{{< file "npm" "package.json" >}}
```json
"engines": {
"node": "10"
},
```
If using TypeScript, update your `tsconfig` with the following values:
{{< file "typescript" "tsconfig.json" >}}
```json
{
"compilerOptions": {
// ...omitted
"lib": ["ESNext"],
"strictNullChecks": false
},
}
```
Instantiate the Secret Manager client in your code to read and manage secrets. Currently, it can only READ our secret values because of the IAM role assigned in the previous section. Upgrade the IAM role if you also want your functions to modify secrets.
{{< file "typescript" "index.ts" >}}
```typescript
import * as functions from 'firebase-functions';
import { SecretManagerServiceClient } from '@google-cloud/secret-manager';
const secrets = new SecretManagerServiceClient();
```
### Read Secret Values
Create a helper function to read your secret as a string. The payload also contains additional metadata about the secret. It expects the full path to the secret value, along with the version - use the keyword `latest` to grab the most recent version.
{{< file "typescript" "index.ts" >}}
```typescript
async function getSecretValue(name: string) {
const [version] = await secrets.accessSecretVersion({
name: `projects/stripe-js-course/secrets/${name}/versions/latest`,
});
const payload = version.payload?.data?.toString();
return payload;
}
```
You can now use this value in your Cloud Functions.
```typescript
export const helloWorld = functions.https.onRequest(
async (request, response) => {
const mySecret = await getSecretValue('hello-world');
// Warning: not a good idea to console log secrets in production
console.log(mySecret)
}
);
``` | {
"pile_set_name": "Github"
} |
/*
* Copyright (c) 2010, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/**
* Verify that throws is not a reserved keyword
*
* @test
* @run
*/
var throws = function() { print('i throw'); }
throws();
| {
"pile_set_name": "Github"
} |
<system>
<name>HD 11506</name>
<rightascension>01 52 50.5345</rightascension>
<declination>-19 30 25.1107</declination>
<distance errorminus="1.6" errorplus="1.6">51.7</distance>
<star>
<name>HD 11506</name>
<name>HIP 8770</name>
<name>BD-20 358</name>
<name>PPM 210762</name>
<name>SAO 148079</name>
<name>TYC 5858-2028-1</name>
<name>Gaia DR2 5137855525488932864</name>
<mass>1.19</mass>
<radius errorminus="0.05" errorplus="0.05">1.33</radius>
<magV>7.51</magV>
<magJ errorminus="0.019" errorplus="0.019">6.508</magJ>
<magH errorminus="0.031" errorplus="0.031">6.265</magH>
<magK errorminus="0.017" errorplus="0.017">6.168</magK>
<magB errorminus="0.01" errorplus="0.01">8.11</magB>
<metallicity>0.31</metallicity>
<spectraltype>G0V</spectraltype>
<planet>
<name>HD 11506 b</name>
<name>HIP 8770 b</name>
<name>BD-20 358 b</name>
<name>PPM 210762 b</name>
<name>SAO 148079 b</name>
<name>TYC 5858-2028-1 b</name>
<name>Gaia DR2 5137855525488932864 b</name>
<list>Confirmed planets</list>
<mass errorminus="0.07" errorplus="0.07">4.21</mass>
<period errorminus="5.9" errorplus="5.9">1627.5</period>
<semimajoraxis errorminus="0.007" errorplus="0.007">2.708</semimajoraxis>
<eccentricity errorminus="0.01" errorplus="0.01">0.37</eccentricity>
<description>The star HD 11506 is a yellow dwarf star in the constellation Cetus. The star hosts two planets. The inner planer was discovered by using Bayesian analysis (Tuomi et al 2009). The N2K campaign improves the data in 2014 with some further years of observations since the discovery.</description>
<discoverymethod>RV</discoverymethod>
<lastupdate>14/11/19</lastupdate>
<discoveryyear>2007</discoveryyear>
<temperature>200.7</temperature>
</planet>
<planet>
<name>HD 11506 c</name>
<name>HIP 8770 c</name>
<name>BD-20 358 c</name>
<name>PPM 210762 c</name>
<name>SAO 148079 c</name>
<name>TYC 5858-2028-1 c</name>
<name>Gaia DR2 5137855525488932864 c</name>
<list>Confirmed planets</list>
<mass errorminus="0.02" errorplus="0.02">0.36</mass>
<period errorminus="0.6" errorplus="0.6">223.6</period>
<semimajoraxis errorminus="0.001" errorplus="0.001">0.721</semimajoraxis>
<eccentricity errorminus="0.05" errorplus="0.05">0.24</eccentricity>
<description>The star HD 11506 is a yellow dwarf star in the constellation Cetus. The star hosts two planets. The inner planer was discovered by using Bayesian analysis (Tuomi et al 2009). The N2K campaign improves the data in 2014 with some further years of observations since the discovery.</description>
<discoverymethod>RV</discoverymethod>
<lastupdate>14/11/19</lastupdate>
<discoveryyear>2009</discoveryyear>
<temperature>388.0</temperature>
</planet>
<temperature>6058.0</temperature>
</star>
</system>
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="utf-8"?>
<style xmlns="http://purl.org/net/xbiblio/csl" version="1.0" default-locale="de-DE">
<info>
<title>Durchstarten zur Diplomarbeit (BHS) (in-text, German)</title>
<id>http://www.zotero.org/styles/durchstarten-zur-diplomarbeit-in-text</id>
<link href="http://www.zotero.org/styles/durchstarten-zur-diplomarbeit-in-text" rel="self"/>
<link href="http://www.zotero.org/styles/die-bachelorarbeit-samac-et-al-in-text" rel="independent-parent"/>
<link href="http://www.veritas.at/durchstarten-zur-diplomarbeit-bhs.html" rel="documentation"/>
<author>
<name>Sebastian Karcher</name>
</author>
<category citation-format="author-date"/>
<category field="generic-base"/>
<updated>2013-07-05T14:17:48+00:00</updated>
<rights license="http://creativecommons.org/licenses/by-sa/3.0/">This work is licensed under a Creative Commons Attribution-ShareAlike 3.0 License</rights>
</info>
</style>
| {
"pile_set_name": "Github"
} |
<?php
/**
* Copyright (c) BoonEx Pty Limited - http://www.boonex.com/
* CC-BY License - http://creativecommons.org/licenses/by/3.0/
*/
bx_import('BxDolExport');
class BxPfwExport extends BxDolExport
{
protected function __construct($aSystem)
{
parent::__construct($aSystem);
$this->_aTables = array(
'bx_pfw_cart' => '`client_id` = {profile_id}',
'bx_pfw_transactions' => '`client_id` = {profile_id} OR `seller_id` = {profile_id}',
'bx_pfw_transactions_pending' => '`client_id` = {profile_id} OR `seller_id` = {profile_id}',
'bx_pfw_user_values' => '`user_id` = {profile_id}'
);
}
}
| {
"pile_set_name": "Github"
} |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.atlas.discovery;
import org.apache.atlas.SortOrder;
import org.apache.atlas.model.discovery.SearchParameters.FilterCriteria;
import org.apache.atlas.model.instance.AtlasEntity;
import org.apache.atlas.repository.Constants;
import org.apache.atlas.repository.graphdb.*;
import org.apache.atlas.repository.store.graph.v2.AtlasGraphUtilsV2;
import org.apache.atlas.type.AtlasClassificationType;
import org.apache.atlas.util.SearchPredicateUtil;
import org.apache.atlas.utils.AtlasPerfTracer;
import org.apache.commons.collections.CollectionUtils;
import org.apache.commons.collections.Predicate;
import org.apache.commons.collections.PredicateUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.tinkerpop.gremlin.process.traversal.Order;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.*;
/**
* This class is needed when this is a registered classification type or wildcard search,
* registered classification includes special type as well. (tag filters will be ignored, and front-end should not enable
* tag-filter for special classification types, including wildcard search - classification name contains *)
*/
public class ClassificationSearchProcessor extends SearchProcessor {
private static final Logger LOG = LoggerFactory.getLogger(ClassificationSearchProcessor.class);
private static final Logger PERF_LOG = AtlasPerfTracer.getPerfLogger("ClassificationSearchProcessor");
private final AtlasIndexQuery indexQuery;
private final AtlasIndexQuery classificationIndexQuery;
private final AtlasGraphQuery tagGraphQueryWithAttributes;
private final Predicate traitPredicate;
private final Predicate isEntityPredicate;
private Predicate activePredicate;
// Some index engines may take space as a delimiter, when basic search is
// executed, unsatisfying results may be returned.
// eg, an entity A has classification "cls" and B has "cls 1"
// when user execute a exact search for "cls", only A should be returned
// but both A and B are returned. To avoid this, we should filter the res.
private boolean whiteSpaceFilter = false;
public ClassificationSearchProcessor(SearchContext context) {
super(context);
final FilterCriteria filterCriteria = context.getSearchParameters().getTagFilters();
final Set<String> indexAttributes = new HashSet<>();
final Set<String> graphAttributes = new HashSet<>();
final Set<String> allAttributes = new HashSet<>();
final Set<String> typeAndSubTypes = context.getClassificationTypeNames();
final String typeAndSubTypesQryStr = context.getClassificationTypesQryStr();
final boolean isWildcardSearch = context.isWildCardSearch();
final Set<AtlasClassificationType> classificationTypes = context.getClassificationTypes();
processSearchAttributes(classificationTypes, filterCriteria, indexAttributes, graphAttributes, allAttributes);
/* for classification search, if any attribute can't be handled by index query - switch to all filter by Graph query
There are four cases in the classification type :
1. unique classification type, including not classified, single wildcard (*), match all classified
2. wildcard search, including starting/ending/mid wildcard, like cls*, *c*, *ion.
3. registered classification type, like PII, PHI
4. classification is not present in the search parameter
each of above cases with either has empty/or not tagFilters
*/
final boolean useIndexSearchForEntity = (CollectionUtils.isNotEmpty(classificationTypes) || isWildcardSearch) &&
!context.hasAttributeFilter(filterCriteria) &&
(typeAndSubTypesQryStr.length() <= MAX_QUERY_STR_LENGTH_TAGS);
/* If classification's attributes can be applied index filter, we can use direct index
* to query classification index as well.
*/
final boolean useIndexSearchForClassification = (CollectionUtils.isNotEmpty(classificationTypes) &&
classificationTypes.iterator().next() != SearchContext.MATCH_ALL_NOT_CLASSIFIED &&
!isWildcardSearch) &&
(typeAndSubTypesQryStr.length() <= MAX_QUERY_STR_LENGTH_TAGS) &&
CollectionUtils.isNotEmpty(indexAttributes) &&
canApplyIndexFilter(classificationTypes, filterCriteria, false);
final boolean useGraphSearchForClassification = (CollectionUtils.isNotEmpty(classificationTypes) &&
classificationTypes.iterator().next() != SearchContext.MATCH_ALL_NOT_CLASSIFIED &&
!isWildcardSearch && CollectionUtils.isNotEmpty(graphAttributes));
traitPredicate = buildTraitPredict(classificationTypes);
isEntityPredicate = SearchPredicateUtil.generateIsEntityVertexPredicate(context.getTypeRegistry());
if (context.getSearchParameters().getExcludeDeletedEntities()) {
activePredicate = SearchPredicateUtil.getEQPredicateGenerator()
.generatePredicate(Constants.STATE_PROPERTY_KEY, AtlasEntity.Status.ACTIVE.name(), String.class);
}
Predicate attributePredicate = null;
Predicate typeNamePredicate = null;
AtlasGraph graph = context.getGraph();
// index query directly on entity
if (useIndexSearchForEntity) {
StringBuilder queryString = new StringBuilder();
graphIndexQueryBuilder.addActiveStateQueryFilter(queryString);
if (isWildcardSearch) {
// tagFilters is not allowed in wildcard search
graphIndexQueryBuilder.addClassificationTypeFilter(queryString);
} else {
if (classificationTypes.iterator().next() == SearchContext.MATCH_ALL_NOT_CLASSIFIED) {
// tagFilters is not allowed in unique classificationType search
graphIndexQueryBuilder.addClassificationFilterForBuiltInTypes(queryString);
} else {
// only registered classification will search for subtypes
graphIndexQueryBuilder.addClassificationAndSubTypesQueryFilter(queryString);
whiteSpaceFilter = true;
}
}
String indexQueryString = STRAY_AND_PATTERN.matcher(queryString).replaceAll(")");
indexQueryString = STRAY_OR_PATTERN.matcher(indexQueryString).replaceAll(")");
indexQueryString = STRAY_ELIPSIS_PATTERN.matcher(indexQueryString).replaceAll("");
indexQuery = graph.indexQuery(Constants.VERTEX_INDEX, indexQueryString);
LOG.debug("Using query string '{}'.", indexQuery);
} else {
indexQuery = null;
}
// index query directly on classification
if (useIndexSearchForClassification) {
StringBuilder queryString = new StringBuilder();
graphIndexQueryBuilder.addActiveStateQueryFilter(queryString);
graphIndexQueryBuilder.addTypeAndSubTypesQueryFilter(queryString, typeAndSubTypesQryStr);
constructFilterQuery(queryString, classificationTypes, filterCriteria, indexAttributes);
String indexQueryString = STRAY_AND_PATTERN.matcher(queryString).replaceAll(")");
indexQueryString = STRAY_OR_PATTERN.matcher(indexQueryString).replaceAll(")");
indexQueryString = STRAY_ELIPSIS_PATTERN.matcher(indexQueryString).replaceAll("");
this.classificationIndexQuery = graph.indexQuery(Constants.VERTEX_INDEX, indexQueryString);
typeNamePredicate = isClassificationRootType() ? null :
SearchPredicateUtil.getINPredicateGenerator().generatePredicate(Constants.TYPE_NAME_PROPERTY_KEY, typeAndSubTypes, String.class);
attributePredicate = constructInMemoryPredicate(classificationTypes, filterCriteria, indexAttributes);
} else {
classificationIndexQuery = null;
}
// only registered classification will search with tag filters
if (useGraphSearchForClassification) {
AtlasGraphQuery query = graph.query();
if (!isClassificationRootType()) {
query.in(Constants.TYPE_NAME_PROPERTY_KEY, typeAndSubTypes);
}
tagGraphQueryWithAttributes = toGraphFilterQuery(classificationTypes, filterCriteria, allAttributes, query);
typeNamePredicate = isClassificationRootType() ? null :
SearchPredicateUtil.getINPredicateGenerator().generatePredicate(Constants.TYPE_NAME_PROPERTY_KEY, typeAndSubTypes, String.class);
attributePredicate = constructInMemoryPredicate(classificationTypes, filterCriteria, allAttributes);
} else {
tagGraphQueryWithAttributes = null;
}
if (typeNamePredicate != null) {
inMemoryPredicate = inMemoryPredicate == null ? typeNamePredicate : PredicateUtils.andPredicate(inMemoryPredicate, typeNamePredicate);
}
if (attributePredicate != null) {
inMemoryPredicate = inMemoryPredicate == null ? attributePredicate : PredicateUtils.andPredicate(inMemoryPredicate, attributePredicate);
}
}
@Override
public List<AtlasVertex> execute() {
if (LOG.isDebugEnabled()) {
LOG.debug("==> ClassificationSearchProcessor.execute({})", context);
}
List<AtlasVertex> ret = new ArrayList<>();
AtlasPerfTracer perf = null;
if (AtlasPerfTracer.isPerfTraceEnabled(PERF_LOG)) {
perf = AtlasPerfTracer.getPerfTracer(PERF_LOG, "ClassificationSearchProcessor.execute(" + context + ")");
}
try {
final int startIdx = context.getSearchParameters().getOffset();
final int limit = context.getSearchParameters().getLimit();
// query to start at 0, even though startIdx can be higher - because few results in earlier retrieval could
// have been dropped: like non-active-entities or duplicate-entities (same entity pointed to by multiple
// classifications in the result)
//
// first 'startIdx' number of entries will be ignored
int qryOffset = 0;
int resultIdx = qryOffset;
final Set<String> processedGuids = new HashSet<>();
final List<AtlasVertex> entityVertices = new ArrayList<>();
final List<AtlasVertex> classificationVertices = new ArrayList<>();
final String sortBy = context.getSearchParameters().getSortBy();
final SortOrder sortOrder = context.getSearchParameters().getSortOrder();
for (; ret.size() < limit; qryOffset += limit) {
entityVertices.clear();
classificationVertices.clear();
if (context.terminateSearch()) {
LOG.warn("query terminated: {}", context.getSearchParameters());
break;
}
boolean isLastResultPage = true;
if (indexQuery != null) {
Iterator<AtlasIndexQuery.Result> queryResult;
if (StringUtils.isNotEmpty(sortBy)) {
Order qrySortOrder = sortOrder == SortOrder.ASCENDING ? Order.asc : Order.desc;
queryResult = indexQuery.vertices(qryOffset, limit, sortBy, qrySortOrder);
} else {
queryResult = indexQuery.vertices(qryOffset, limit);
}
getVerticesFromIndexQueryResult(queryResult, entityVertices);
isLastResultPage = entityVertices.size() < limit;
// Do in-memory filtering
CollectionUtils.filter(entityVertices, traitPredicate);
CollectionUtils.filter(entityVertices, isEntityPredicate);
} else {
if (classificationIndexQuery != null) {
Iterator<AtlasIndexQuery.Result> queryResult = classificationIndexQuery.vertices(qryOffset, limit);
getVerticesFromIndexQueryResult(queryResult, classificationVertices);
isLastResultPage = classificationVertices.size() < limit;
CollectionUtils.filter(classificationVertices, inMemoryPredicate);
} else if (tagGraphQueryWithAttributes != null) {
Iterator<AtlasVertex> queryResult = tagGraphQueryWithAttributes.vertices(qryOffset, limit).iterator();
getVertices(queryResult, classificationVertices);
isLastResultPage = classificationVertices.size() < limit;
CollectionUtils.filter(classificationVertices, inMemoryPredicate);
}
}
// Since tag filters are present, we need to collect the entity vertices after filtering the classification
// vertex results (as these might be lower in number)
if (CollectionUtils.isNotEmpty(classificationVertices)) {
for (AtlasVertex classificationVertex : classificationVertices) {
Iterable<AtlasEdge> edges = classificationVertex.getEdges(AtlasEdgeDirection.IN, Constants.CLASSIFICATION_LABEL);
for (AtlasEdge edge : edges) {
AtlasVertex entityVertex = edge.getOutVertex();
String guid = AtlasGraphUtilsV2.getIdFromVertex(entityVertex);
if (processedGuids.contains(guid)) {
continue;
}
entityVertices.add(entityVertex);
processedGuids.add(guid);
}
}
}
if (whiteSpaceFilter) {
filterWhiteSpaceClassification(entityVertices);
}
// Do in-memory filtering
CollectionUtils.filter(entityVertices, isEntityPredicate);
if (activePredicate != null) {
CollectionUtils.filter(entityVertices, activePredicate);
}
super.filter(entityVertices);
resultIdx = collectResultVertices(ret, startIdx, limit, resultIdx, entityVertices);
if (isLastResultPage) {
break;
}
}
} finally {
AtlasPerfTracer.log(perf);
}
if (LOG.isDebugEnabled()) {
LOG.debug("<== ClassificationSearchProcessor.execute({}): ret.size()={}", context, ret.size());
}
return ret;
}
@Override
public void filter(List<AtlasVertex> entityVertices) {
if (LOG.isDebugEnabled()) {
LOG.debug("==> ClassificationSearchProcessor.filter({})", entityVertices.size());
}
if (inMemoryPredicate != null) {
//in case of classification type + index attributes
CollectionUtils.filter(entityVertices, traitPredicate);
//filter attributes (filterCriteria). Find classification vertex(typeName = classification) from entity vertex (traitName = classification)
final Set<String> processedGuids = new HashSet<>();
List<AtlasVertex> matchEntityVertices = new ArrayList<>();
if (CollectionUtils.isNotEmpty(entityVertices)) {
for (AtlasVertex entityVertex : entityVertices) {
Iterable<AtlasEdge> edges = entityVertex.getEdges(AtlasEdgeDirection.OUT, Constants.CLASSIFICATION_LABEL);
for (AtlasEdge edge : edges) {
AtlasVertex classificationVertex = edge.getInVertex();
AtlasVertex matchVertex = (AtlasVertex) CollectionUtils.find(Collections.singleton(classificationVertex), inMemoryPredicate);
if (matchVertex != null) {
String guid = AtlasGraphUtilsV2.getIdFromVertex(entityVertex);
if (processedGuids.contains(guid)) {
continue;
}
matchEntityVertices.add(entityVertex);
processedGuids.add(guid);
break;
}
}
}
}
entityVertices.clear();
entityVertices.addAll(matchEntityVertices);
} else {
//in case of only classsification type
CollectionUtils.filter(entityVertices, traitPredicate);
CollectionUtils.filter(entityVertices, isEntityPredicate);
}
super.filter(entityVertices);
if (LOG.isDebugEnabled()) {
LOG.debug("<== ClassificationSearchProcessor.filter(): ret.size()={}", entityVertices.size());
}
}
@Override
public long getResultCount() {
return (indexQuery != null) ? indexQuery.vertexTotals() : -1;
}
}
| {
"pile_set_name": "Github"
} |
(function($) {
$.extend($.summernote.lang, {
'es-ES': {
font: {
bold: 'Negrita',
italic: 'Cursiva',
underline: 'Subrayado',
clear: 'Quitar estilo de fuente',
height: 'Altura de línea',
name: 'Fuente',
strikethrough: 'Tachado',
superscript: 'Superíndice',
subscript: 'Subíndice',
size: 'Tamaño de la fuente'
},
image: {
image: 'Imagen',
insert: 'Insertar imagen',
resizeFull: 'Redimensionar a tamaño completo',
resizeHalf: 'Redimensionar a la mitad',
resizeQuarter: 'Redimensionar a un cuarto',
floatLeft: 'Flotar a la izquierda',
floatRight: 'Flotar a la derecha',
floatNone: 'No flotar',
shapeRounded: 'Forma: Redondeado',
shapeCircle: 'Forma: Círculo',
shapeThumbnail: 'Forma: Marco',
shapeNone: 'Forma: Ninguna',
dragImageHere: 'Arrastrar una imagen o texto aquí',
dropImage: 'Suelta la imagen o texto',
selectFromFiles: 'Seleccionar desde los archivos',
maximumFileSize: 'Tamaño máximo del archivo',
maximumFileSizeError: 'Has superado el tamaño máximo del archivo.',
url: 'URL de la imagen',
remove: 'Eliminar imagen',
original: 'Original'
},
video: {
video: 'Vídeo',
videoLink: 'Link del vídeo',
insert: 'Insertar vídeo',
url: '¿URL del vídeo?',
providers: '(YouTube, Vimeo, Vine, Instagram, DailyMotion o Youku)'
},
link: {
link: 'Link',
insert: 'Insertar link',
unlink: 'Quitar link',
edit: 'Editar',
textToDisplay: 'Texto para mostrar',
url: '¿Hacia que URL lleva el link?',
openInNewWindow: 'Abrir en una nueva ventana'
},
table: {
table: 'Tabla',
addRowAbove: 'Añadir fila encima',
addRowBelow: 'Añadir fila debajo',
addColLeft: 'Añadir columna izquierda',
addColRight: 'Añadir columna derecha',
delRow: 'Borrar fila',
delCol: 'Eliminar columna',
delTable: 'Eliminar tabla'
},
hr: {
insert: 'Insertar línea horizontal'
},
style: {
style: 'Estilo',
p: 'p',
blockquote: 'Cita',
pre: 'Código',
h1: 'Título 1',
h2: 'Título 2',
h3: 'Título 3',
h4: 'Título 4',
h5: 'Título 5',
h6: 'Título 6'
},
lists: {
unordered: 'Lista desordenada',
ordered: 'Lista ordenada'
},
options: {
help: 'Ayuda',
fullscreen: 'Pantalla completa',
codeview: 'Ver código fuente'
},
paragraph: {
paragraph: 'Párrafo',
outdent: 'Menos tabulación',
indent: 'Más tabulación',
left: 'Alinear a la izquierda',
center: 'Alinear al centro',
right: 'Alinear a la derecha',
justify: 'Justificar'
},
color: {
recent: 'Último color',
more: 'Más colores',
background: 'Color de fondo',
foreground: 'Color de fuente',
transparent: 'Transparente',
setTransparent: 'Establecer transparente',
reset: 'Restaurar',
resetToDefault: 'Restaurar por defecto'
},
shortcut: {
shortcuts: 'Atajos de teclado',
close: 'Cerrar',
textFormatting: 'Formato de texto',
action: 'Acción',
paragraphFormatting: 'Formato de párrafo',
documentStyle: 'Estilo de documento',
extraKeys: 'Teclas adicionales'
},
help: {
'insertParagraph': 'Insertar párrafo',
'undo': 'Deshacer última acción',
'redo': 'Rehacer última acción',
'tab': 'Tabular',
'untab': 'Eliminar tabulación',
'bold': 'Establecer estilo negrita',
'italic': 'Establecer estilo cursiva',
'underline': 'Establecer estilo subrayado',
'strikethrough': 'Establecer estilo tachado',
'removeFormat': 'Limpiar estilo',
'justifyLeft': 'Alinear a la izquierda',
'justifyCenter': 'Alinear al centro',
'justifyRight': 'Alinear a la derecha',
'justifyFull': 'Justificar',
'insertUnorderedList': 'Insertar lista desordenada',
'insertOrderedList': 'Insertar lista ordenada',
'outdent': 'Reducir tabulación del párrafo',
'indent': 'Aumentar tabulación del párrafo',
'formatPara': 'Cambiar estilo del bloque a párrafo (etiqueta P)',
'formatH1': 'Cambiar estilo del bloque a H1',
'formatH2': 'Cambiar estilo del bloque a H2',
'formatH3': 'Cambiar estilo del bloque a H3',
'formatH4': 'Cambiar estilo del bloque a H4',
'formatH5': 'Cambiar estilo del bloque a H5',
'formatH6': 'Cambiar estilo del bloque a H6',
'insertHorizontalRule': 'Insertar línea horizontal',
'linkDialog.show': 'Mostrar panel enlaces'
},
history: {
undo: 'Deshacer',
redo: 'Rehacer'
},
specialChar: {
specialChar: 'CARACTERES ESPECIALES',
select: 'Selecciona Caracteres especiales'
}
}
});
})(jQuery);
| {
"pile_set_name": "Github"
} |
#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Verify SConsignFile() when used with dumbdbm.
"""
import TestSCons
_python_ = TestSCons._python_
test = TestSCons.TestSCons()
try:
import dbm.dumb
use_dbm='dbm.dumb'
except ImportError:
test.skip_test('No dbm.dumb in this version of Python; skipping test.\n')
test.subdir('subdir')
test.write('build.py', r"""
import sys
with open(sys.argv[1], 'wb') as ofp, open(sys.argv[2], 'rb') as ifp:
ofp.write(ifp.read())
sys.exit(0)
""")
#
test.write('SConstruct', """
import sys
import %(use_dbm)s
SConsignFile('.sconsign', %(use_dbm)s)
B = Builder(action = r'%(_python_)s build.py $TARGETS $SOURCES')
env = Environment(BUILDERS = { 'B' : B })
env.B(target = 'f1.out', source = 'f1.in')
env.B(target = 'f2.out', source = 'f2.in')
env.B(target = 'subdir/f3.out', source = 'subdir/f3.in')
env.B(target = 'subdir/f4.out', source = 'subdir/f4.in')
""" % locals())
test.write('f1.in', "f1.in\n")
test.write('f2.in', "f2.in\n")
test.write(['subdir', 'f3.in'], "subdir/f3.in\n")
test.write(['subdir', 'f4.in'], "subdir/f4.in\n")
test.run()
test.must_exist(test.workpath('.sconsign.dat'))
test.must_exist(test.workpath('.sconsign.dir'))
test.must_not_exist(test.workpath('.sconsign'))
test.must_not_exist(test.workpath('.sconsign.dblite'))
test.must_not_exist(test.workpath('subdir', '.sconsign'))
test.must_not_exist(test.workpath('subdir', '.sconsign.dblite'))
test.must_not_exist(test.workpath('subdir', '.sconsign.dat'))
test.must_not_exist(test.workpath('subdir', '.sconsign.dir'))
test.must_match('f1.out', "f1.in\n")
test.must_match('f2.out', "f2.in\n")
test.must_match(['subdir', 'f3.out'], "subdir/f3.in\n")
test.must_match(['subdir', 'f4.out'], "subdir/f4.in\n")
test.up_to_date(arguments = '.')
test.must_exist(test.workpath('.sconsign.dat'))
test.must_exist(test.workpath('.sconsign.dir'))
test.must_not_exist(test.workpath('.sconsign'))
test.must_not_exist(test.workpath('.sconsign.dblite'))
test.must_not_exist(test.workpath('subdir', '.sconsign'))
test.must_not_exist(test.workpath('subdir', '.sconsign.dblite'))
test.must_not_exist(test.workpath('subdir', '.sconsign.dat'))
test.must_not_exist(test.workpath('subdir', '.sconsign.dir'))
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| {
"pile_set_name": "Github"
} |
/*
* Copyright (c) 2019 Moddable Tech, Inc.
*
* This file is part of the Moddable SDK.
*
* This work is licensed under the
* Creative Commons Attribution 4.0 International License.
* To view a copy of this license, visit
* <https://creativecommons.org/licenses/by/4.0>.
* or send a letter to Creative Commons, PO Box 1866,
* Mountain View, CA 94042, USA.
*
*/
/* this is a simple demonstration of using serial.poll() */
/* It is intended to be connected to a serial/terminal port */
/* Type characters. If a terminator if encountered or the chunk */
/* size is read, the onDataReceived() callback will invoked. */
/* If "stop" is encountered then polling will be terminated */
import Timer from "timer";
import Serial from "serial";
let serial = new Serial();
export default function() {
serial.onDataReceived = function(str, len) {
trace("got: [" + str + "]\n");
if (-1 != str.indexOf("stop"))
serial.poll();
}
serial.poll({ terminators: "\r\n", trim: 1, chunkSize: 16 });
Timer.repeat(() => {
serial.writeLine("tick.");
}, 10000);
}
| {
"pile_set_name": "Github"
} |
@comment $NetBSD: PLIST,v 1.3 2015/05/08 11:27:51 wiz Exp $
share/texmf-dist/fonts/afm/public/ocherokee/Cherokee-Bold.afm
share/texmf-dist/fonts/afm/public/ocherokee/Cherokee.afm
share/texmf-dist/fonts/map/dvips/ocherokee/cherokee.map
share/texmf-dist/fonts/ofm/public/ocherokee/OCherokee.ofm
share/texmf-dist/fonts/ofm/public/ocherokee/OCherokeeb.ofm
share/texmf-dist/fonts/ofm/public/ocherokee/OCherokeebo.ofm
share/texmf-dist/fonts/ofm/public/ocherokee/OCherokeeo.ofm
share/texmf-dist/fonts/ovf/public/ocherokee/OCherokee.ovf
share/texmf-dist/fonts/ovf/public/ocherokee/OCherokeeb.ovf
share/texmf-dist/fonts/ovf/public/ocherokee/OCherokeebo.ovf
share/texmf-dist/fonts/ovf/public/ocherokee/OCherokeeo.ovf
share/texmf-dist/fonts/ovp/public/ocherokee/OCherokee.ovp
share/texmf-dist/fonts/ovp/public/ocherokee/OCherokeeb.ovp
share/texmf-dist/fonts/ovp/public/ocherokee/OCherokeebo.ovp
share/texmf-dist/fonts/ovp/public/ocherokee/OCherokeeo.ovp
share/texmf-dist/fonts/tfm/public/ocherokee/Cherokee.tfm
share/texmf-dist/fonts/tfm/public/ocherokee/Cherokeeb.tfm
share/texmf-dist/fonts/tfm/public/ocherokee/Cherokeebo.tfm
share/texmf-dist/fonts/tfm/public/ocherokee/Cherokeeo.tfm
share/texmf-dist/fonts/type1/public/ocherokee/Cherokee-Bold.pfb
share/texmf-dist/fonts/type1/public/ocherokee/Cherokee.pfb
share/texmf-dist/omega/ocp/ocherokee/cher2uni.ocp
share/texmf-dist/omega/otp/ocherokee/cher2uni.otp
share/texmf-dist/tex/lambda/ocherokee/lchcmr.fd
share/texmf-dist/tex/lambda/ocherokee/lchenc.def
share/texmf-dist/tex/lambda/ocherokee/ocherokee.sty
| {
"pile_set_name": "Github"
} |
/**
* Copyright 2004-present Facebook. All Rights Reserved.
*/
/*global exports:true*/
/**
* Implements ES7 object spread property.
* https://gist.github.com/sebmarkbage/aa849c7973cb4452c547
*
* { ...a, x: 1 }
*
* Object.assign({}, a, {x: 1 })
*
*/
var Syntax = require('esprima-fb').Syntax;
var utils = require('../src/utils');
function visitObjectLiteralSpread(traverse, node, path, state) {
utils.catchup(node.range[0], state);
utils.append('Object.assign({', state);
// Skip the original {
utils.move(node.range[0] + 1, state);
var previousWasSpread = false;
for (var i = 0; i < node.properties.length; i++) {
var property = node.properties[i];
if (property.type === Syntax.SpreadProperty) {
// Close the previous object or initial object
if (!previousWasSpread) {
utils.append('}', state);
}
if (i === 0) {
// Normally there will be a comma when we catch up, but not before
// the first property.
utils.append(',', state);
}
utils.catchup(property.range[0], state);
// skip ...
utils.move(property.range[0] + 3, state);
traverse(property.argument, path, state);
utils.catchup(property.range[1], state);
previousWasSpread = true;
} else {
utils.catchup(property.range[0], state);
if (previousWasSpread) {
utils.append('{', state);
}
traverse(property, path, state);
utils.catchup(property.range[1], state);
previousWasSpread = false;
}
}
// Strip any non-whitespace between the last item and the end.
// We only catch up on whitespace so that we ignore any trailing commas which
// are stripped out for IE8 support. Unfortunately, this also strips out any
// trailing comments.
utils.catchupWhiteSpace(node.range[1] - 1, state);
// Skip the trailing }
utils.move(node.range[1], state);
if (!previousWasSpread) {
utils.append('}', state);
}
utils.append(')', state);
return false;
}
visitObjectLiteralSpread.test = function(node, path, state) {
if (node.type !== Syntax.ObjectExpression) {
return false;
}
// Tight loop optimization
var hasAtLeastOneSpreadProperty = false;
for (var i = 0; i < node.properties.length; i++) {
var property = node.properties[i];
if (property.type === Syntax.SpreadProperty) {
hasAtLeastOneSpreadProperty = true;
} else if (property.kind !== 'init') {
return false;
}
}
return hasAtLeastOneSpreadProperty;
};
exports.visitorList = [
visitObjectLiteralSpread
];
| {
"pile_set_name": "Github"
} |
18
charge = 0
C 0.952712 -0.108497 -0.167080
C 2.440076 -0.038828 -0.055333
C 3.112715 1.108826 0.003062
C 4.607630 1.181923 0.053297
C 5.229803 -0.162296 -0.306989
N 4.572047 -1.287890 0.332585
C 3.166979 -1.357452 -0.022796
H 0.506099 0.885802 -0.219739
H 0.515836 -0.636999 0.687537
H 0.652387 -0.663923 -1.062713
H 2.561854 2.046903 0.002735
H 4.936894 1.497041 1.053374
H 4.965992 1.958509 -0.632629
H 6.292936 -0.181284 -0.052803
H 5.159879 -0.313911 -1.390255
H 4.654203 -1.192046 1.338054
H 2.665281 -2.032194 0.680191
H 3.071278 -1.840403 -1.006988 | {
"pile_set_name": "Github"
} |
# Query Theme
Query Theme is a React component used in managing the fetching of individual theme objects.
## Usage
Render the component, passing `siteId` and `themeId`. It does not accept any children, nor does it render any elements to the page. You can use it adjacent to other sibling components which make use of the fetched data made available through the global application state.
```jsx
import React from 'react';
import { connect } from 'react-redux';
import QueryTheme from 'components/data/query-theme';
import Theme from 'components/theme';
import { getTheme } from 'state/themes/selectors';
function MyTheme( { theme } ) {
return (
<div>
<QueryTheme siteId={ 3584907 } themeId={ 'twentysixteen' } />
<Theme theme={ theme } />} }
</div>
);
}
export default connect( ( state ) => ( {
theme: getTheme( state, 3584907, 'twentysixteen' ),
} ) )( MyTheme );
```
## Props
### `siteId`
<table>
<tr><th>Type</th><td>Number</td></tr>
<tr><th>Required</th><td>No</td></tr>
<tr><th>Default</th><td><code>null</code></td></tr>
</table>
The site ID for which themes should be queried.
### `themeId`
<table>
<tr><th>Type</th><td>string</td></tr>
<tr><th>Required</th><td>Yes</td></tr>
<tr><th>Default</th><td><code>''</code></td></tr>
</table>
The theme Id of theme we wish to obtain.
| {
"pile_set_name": "Github"
} |
var assert = require('chai').assert;
var assetsDir = '../../../../WordPressEditor/src/main/assets';
var underscore = require(assetsDir + '/libs/underscore-min.js');
// Set up globals needed by shortcode, wpload, and wpsave
global.window = {};
global._ = underscore;
global.wp = {};
// wp-admin libraries
var shortcode = require(assetsDir + '/libs/shortcode.js');
var wpload = require(assetsDir + '/libs/wpload.js');
var wpsave = require(assetsDir + '/libs/wpsave.js');
var formatterlib = require(assetsDir + '/editor-utils-formatter.js');
var formatter = formatterlib.Formatter;
// Media strings
// Image strings
var imageSrc = 'content://com.android.providers.media.documents/document/image%3A12951';
var plainImageHtml = '<img src="' + imageSrc + '" alt="" class="wp-image-123 size-full" width="172" height="244">';
var imageWrappedInLinkHtml = '<a href="' + imageSrc + '">' + plainImageHtml + '</a>';
// Captioned image strings
var imageCaptionShortcode = '[caption width="600" align="alignnone"]' + imageSrc + 'Text[/caption]';
var imageWithCaptionHtml = '<label class="wp-temp" data-wp-temp="caption" onclick="">' +
'<span class="wp-caption" style="width:600px; max-width:100% !important;" data-caption-width="600" ' +
'data-caption-align="alignnone">' + imageSrc + 'Text</span></label>';
var linkedImageCaptionShortcode = '[caption width="600" align="alignnone"]' + imageWrappedInLinkHtml + 'Text[/caption]';
var linkedImageCaptionHtml = '<label class="wp-temp" data-wp-temp="caption" onclick="">' +
'<span class="wp-caption" style="width:600px; max-width:100% !important;" data-caption-width="600" ' +
'data-caption-align="alignnone">' + imageWrappedInLinkHtml + 'Text</span></label>';
// Video strings
var videoSrc = 'content://com.android.providers.media.documents/document/video%3A12966';
var videoShortcode = '[video src="' + videoSrc + '" poster=""][/video]';
var videoHtml = '<span class="edit-container" contenteditable="false"><span class="delete-overlay"></span>' +
'<video webkit-playsinline src="' + videoSrc + '" poster="" preload="metadata" onclick="" controls="controls">' +
'</video></span>';
// VideoPress video strings
var vpVideoShortcode = '[wpvideo ABCD1234]';
var vpVideoHtml = '<span class="edit-container" contenteditable="false"><span class="delete-overlay"></span>' +
'<video data-wpvideopress="ABCD1234" webkit-playsinline src="" preload="metadata" poster="svg/wpposter.svg" ' +
'onclick="" onerror="ZSSEditor.sendVideoPressInfoRequest(\'ABCD1234\');"></video></span>';
describe('HTML to Visual formatter should correctly convert', function () {
it('single-line HTML', function () {
assert.equal('<p>Some text</p>\n', formatter.htmlToVisual('Some text'));
});
it('multi-paragraph HTML', function () {
assert.equal('<p>Some text</p>\n<p>More text</p>\n', formatter.htmlToVisual('Some text\n\nMore text'));
});
testMediaParagraphWrapping('non-linked image', plainImageHtml, plainImageHtml);
testMediaParagraphWrapping('linked image', imageWrappedInLinkHtml, imageWrappedInLinkHtml);
testMediaParagraphWrapping('non-linked image, with caption', imageCaptionShortcode, imageWithCaptionHtml);
testMediaParagraphWrapping('linked image, with caption', linkedImageCaptionShortcode, linkedImageCaptionHtml);
testMediaParagraphWrapping('non-VideoPress video', videoShortcode, videoHtml);
testMediaParagraphWrapping('VideoPress video', vpVideoShortcode, vpVideoHtml);
});
function testMediaParagraphWrapping(mediaType, htmlModeMediaHtml, visualModeMediaHtml) {
describe(mediaType, function () {
it('alone in post', function () {
var visualFormattingApplied = formatter.htmlToVisual(htmlModeMediaHtml);
assert.equal('<p>' + visualModeMediaHtml + '</p>\n', visualFormattingApplied);
var convertedToDivs = formatter.convertPToDiv(visualFormattingApplied).replace(/\n/g, '');
assert.equal('<div>' + visualModeMediaHtml + '</div><div><br></div>', convertedToDivs);
});
it('with paragraphs above and below', function () {
var imageBetweenParagraphs = 'Line 1\n\n' + htmlModeMediaHtml + '\n\nLine 2';
var visualFormattingApplied = formatter.htmlToVisual(imageBetweenParagraphs);
assert.equal('<p>Line 1</p>\n<p>' + visualModeMediaHtml + '</p>\n<p>Line 2</p>\n', visualFormattingApplied);
var convertedToDivs = formatter.convertPToDiv(visualFormattingApplied).replace(/\n/g, '');
assert.equal('<div>Line 1</div><div>' + visualModeMediaHtml + '</div><div>Line 2</div>', convertedToDivs);
});
it('with line breaks above and below', function () {
var imageBetweenLineBreaks = 'Line 1\n' + htmlModeMediaHtml + '\nLine 2';
var visualFormattingApplied = formatter.htmlToVisual(imageBetweenLineBreaks);
assert.equal('<p>Line 1<br />\n' + visualModeMediaHtml + '<br />\nLine 2</p>\n', visualFormattingApplied);
var convertedToDivs = formatter.convertPToDiv(visualFormattingApplied).replace(/\n/g, '');
assert.equal('<div>Line 1</div><div>' + visualModeMediaHtml + '</div><div>Line 2</div>', convertedToDivs);
});
it('start of post, with paragraph underneath', function () {
var imageFollowedByParagraph = htmlModeMediaHtml + '\n\nLine 2';
var visualFormattingApplied = formatter.htmlToVisual(imageFollowedByParagraph);
assert.equal('<p>' + visualModeMediaHtml + '</p>\n<p>Line 2</p>\n', visualFormattingApplied);
var convertedToDivs = formatter.convertPToDiv(visualFormattingApplied).replace(/\n/g, '');
assert.equal('<div>' + visualModeMediaHtml + '</div><div>Line 2</div>', convertedToDivs);
});
it('start of post, with line break underneath', function () {
var imageFollowedByLineBreak = htmlModeMediaHtml + '\nLine 2';
var visualFormattingApplied = formatter.htmlToVisual(imageFollowedByLineBreak);
assert.equal('<p>' + visualModeMediaHtml + '<br \/>\nLine 2</p>\n', visualFormattingApplied);
var convertedToDivs = formatter.convertPToDiv(visualFormattingApplied).replace(/\n/g, '');
assert.equal('<div>' + visualModeMediaHtml + '</div><div>Line 2</div>', convertedToDivs);
});
it('end of post, with paragraph above', function () {
var imageUnderParagraph = 'Line 1\n\n' + htmlModeMediaHtml;
var visualFormattingApplied = formatter.htmlToVisual(imageUnderParagraph);
assert.equal('<p>Line 1</p>\n<p>' + visualModeMediaHtml + '</p>\n', visualFormattingApplied);
var convertedToDivs = formatter.convertPToDiv(visualFormattingApplied).replace(/\n/g, '');
assert.equal('<div>Line 1</div><div>' + visualModeMediaHtml + '</div><div><br></div>', convertedToDivs);
});
it('end of post, with line break above', function () {
var imageUnderLineBreak = 'Line 1\n' + htmlModeMediaHtml;
var visualFormattingApplied = formatter.htmlToVisual(imageUnderLineBreak);
assert.equal('<p>Line 1<br \/>\n' + visualModeMediaHtml + '</p>\n', visualFormattingApplied);
var convertedToDivs = formatter.convertPToDiv(visualFormattingApplied).replace(/\n/g, '');
assert.equal('<div>Line 1</div><div>' + visualModeMediaHtml + '</div><div><br></div>', convertedToDivs);
});
});
}
| {
"pile_set_name": "Github"
} |
# This file is part of Tryton. The COPYRIGHT file at the top level of
# this repository contains the full copyright notices and license terms.
from .screen import Screen
__all__ = [Screen]
| {
"pile_set_name": "Github"
} |
---
- name: Set async task
set_fact:
docker_async_tasks: {}
- name: Save all images
vars:
name: "{{ item.key }}"
image: "{{ item.value }}"
include_tasks: "save_image.yml"
with_dict: "{{ images }}"
when: docker_repository_cache.enabled and not docker_images_remote_exists[name]
- name: Wait for tasks
vars:
name: "{{ item.key }}"
task_id: "{{ item.value }}"
include_tasks: wait.yml
with_dict: "{{ docker_async_tasks }}"
- name: Save image to a file {{ build_logs }}/container_images.txt for IP Plan report generator
lineinfile:
dest: '{{ build_logs }}/container_images.txt'
line: '{{ docker_repository_cache_registry }}/{{ docker_version_prefix }}/{{ item.key }}:{{ docker_images_remote_tags[item.key] }}'
state: present
create: true
with_dict: "{{ images }}"
when: docker_repository_cache.enabled and build_logs | default(False)
- name: Save image to a file {{ build_logs }}/container_images.txt for IP Plan report generator
lineinfile:
dest: '{{ build_logs }}/container_images.txt'
line: '{{ docker_repository_cache_registry }}/{{ docker_version_prefix }}/{{ item.key }}:{{ docker_images_remote_tags[item.key] }}'
state: present
create: true
with_dict: "{{ images }}"
when: not docker_repository_cache.enabled and build_logs | default(False)
| {
"pile_set_name": "Github"
} |
package build
import (
"flag"
"testing"
"github.com/google/go-cmp/cmp"
)
func TestFlags(t *testing.T) {
cases := []struct {
args []string
want Options
}{{
// Defaults
args: []string{},
want: Options{},
}, {
args: []string{"-index", "/tmp"},
want: Options{
IndexDir: "/tmp",
},
}, {
// single large file pattern
args: []string{"-large_file", "*.md"},
want: Options{
LargeFiles: []string{"*.md"},
},
}, {
// multiple large file pattern
args: []string{"-large_file", "*.md", "-large_file", "*.yaml"},
want: Options{
LargeFiles: []string{"*.md", "*.yaml"},
},
}}
for _, c := range cases {
c.want.SetDefaults()
got := Options{}
fs := flag.NewFlagSet("", flag.ContinueOnError)
got.Flags(fs)
if err := fs.Parse(c.args); err != nil {
t.Errorf("failed to parse args %v: %v", c.args, err)
} else if !cmp.Equal(got, c.want) {
t.Errorf("mismatch for %v (-want +got):\n%s", c.args, cmp.Diff(c.want, got))
}
}
}
| {
"pile_set_name": "Github"
} |
###DESCRIPTION
## Factoring Trinomials
##
##ENDDESCRIPTION
## DBsubject(Algebra)
## DBchapter(Factoring)
## DBsection(Factoring trinomials)
## Institution(The College of Idaho)
## Author(RA Cruz)
## Level(3)
## TitleText1('Essentials of Intermediate Algebra')
## AuthorText1('Blitzer')
## EditionText1('1')
## Section1('5.4')
## Problem1('')
## KEYWORDS('factoring')
## Date: 2007/11
DOCUMENT(); # This should be the first executable line in the problem.
loadMacros(
"PGstandard.pl",
"MathObjects.pl",
"CofIdaho_macros.pl",
"PGcourse.pl"
);
TEXT(beginproblem());
######################################
# Setup
@alphabet = ("a","x","y");
$n = random(0,2,1);
$var= $alphabet[$n];
Context()->variables->are($var=>'Real');
$a = random(1,3,1);
do {$b = random(2,9,1);} until (gcd($a,$b)==1);
do {$c = random(2,5,1);} until (gcd($b,$c)==1 && $a!=$c);
$ac = $a * $c;
$bb = -1*$b * $b;
$abMinusbc = $b * ($a - $c);
$polynomial = Formula("$ac $var^3 + $abMinusbc $var^2 + $bb $var")->reduce->TeX;
######################################
# Main text
BEGIN_TEXT
Factor:
$PAR
\( $polynomial = \) \{ ans_rule(30) \}
END_TEXT
######################################
# Answer
$answer="$var* ($a * $var - $b) * ($c * $var + $b)";
ANS(FactoringEvaluator($answer,$var));
$showPartialCorrectAnswers = 1;
######################################
;
ENDDOCUMENT();
| {
"pile_set_name": "Github"
} |
// !$*UTF8*$!
{
0876689915AE859F00EF628E /* PBXTextBookmark */ = {
isa = PBXTextBookmark;
fRef = 08F45DAC15AD73C70076CEF1 /* untitled */;
name = "untitled: 67";
rLen = 0;
rLoc = 6434;
rType = 0;
vrLen = 3340;
vrLoc = 5179;
};
08766D1C15AED96900EF628E /* XCBuildMessageTextBookmark */ = {
isa = PBXTextBookmark;
comments = "'app' undeclared (first use in this function)";
fRef = 29B97316FDCFA39411CA2CEA /* main.m */;
fallbackIsa = XCBuildMessageTextBookmark;
rLen = 1;
rLoc = 20;
rType = 1;
};
08766E6315AF04B100EF628E /* PBXTextBookmark */ = {
isa = PBXTextBookmark;
fRef = 29B97316FDCFA39411CA2CEA /* main.m */;
name = "main.m: 21";
rLen = 0;
rLoc = 622;
rType = 0;
vrLen = 689;
vrLoc = 0;
};
08F45B2B15AD37400076CEF1 /* TestApp */ = {
isa = PBXExecutable;
activeArgIndices = (
);
argumentStrings = (
);
autoAttachOnCrash = 1;
breakpointsEnabled = 0;
configStateDict = {
};
customDataFormattersEnabled = 1;
dataTipCustomDataFormattersEnabled = 1;
dataTipShowTypeColumn = 1;
dataTipSortType = 0;
debuggerPlugin = GDBDebugging;
disassemblyDisplayState = 0;
enableDebugStr = 1;
environmentEntries = (
);
executableSystemSymbolLevel = 0;
executableUserSymbolLevel = 0;
libgmallocEnabled = 0;
name = TestApp;
showTypeColumn = 0;
sourceDirectories = (
);
};
08F45B3F15AD37450076CEF1 /* Source Control */ = {
isa = PBXSourceControlManager;
fallbackIsa = XCSourceControlManager;
isSCMEnabled = 0;
scmConfiguration = {
repositoryNamesForRoots = {
"" = "";
};
};
};
08F45B4015AD37450076CEF1 /* Code sense */ = {
isa = PBXCodeSenseManager;
indexTemplatePath = "";
};
08F45B5B15AD41610076CEF1 /* PBXTextBookmark */ = {
isa = PBXTextBookmark;
fRef = 1D3623240D0F684500981E51 /* TestAppAppDelegate.h */;
name = "TestAppAppDelegate.h: 1";
rLen = 0;
rLoc = 0;
rType = 0;
vrLen = 339;
vrLoc = 0;
};
08F45DAC15AD73C70076CEF1 /* untitled */ = {
uiCtxt = {
sepNavIntBoundsRect = "{{0, 0}, {1651, 2052}}";
sepNavSelRange = "{6434, 0}";
sepNavVisRange = "{5179, 3340}";
};
};
08F45FA615AD91EB0076CEF1 /* PBXTextBookmark */ = {
isa = PBXTextBookmark;
fRef = 32CA4F630368D1EE00C91783 /* TestApp_Prefix.pch */;
name = "TestApp_Prefix.pch: 1";
rLen = 0;
rLoc = 0;
rType = 0;
vrLen = 183;
vrLoc = 0;
};
1D3623240D0F684500981E51 /* TestAppAppDelegate.h */ = {
uiCtxt = {
sepNavIntBoundsRect = "{{0, 0}, {790, 561}}";
sepNavSelRange = "{0, 0}";
sepNavVisRange = "{0, 339}";
};
};
1D6058900D05DD3D006BFB54 /* TestApp */ = {
activeExec = 0;
executables = (
08F45B2B15AD37400076CEF1 /* TestApp */,
);
};
29B97313FDCFA39411CA2CEA /* Project object */ = {
activeBuildConfigurationName = Release;
activeExecutable = 08F45B2B15AD37400076CEF1 /* TestApp */;
activeSDKPreference = iphoneos4.0;
activeTarget = 1D6058900D05DD3D006BFB54 /* TestApp */;
addToTargets = (
1D6058900D05DD3D006BFB54 /* TestApp */,
);
codeSenseManager = 08F45B4015AD37450076CEF1 /* Code sense */;
executables = (
08F45B2B15AD37400076CEF1 /* TestApp */,
);
perUserDictionary = {
PBXConfiguration.PBXFileTableDataSource3.PBXFileTableDataSource = {
PBXFileTableDataSourceColumnSortingDirectionKey = "-1";
PBXFileTableDataSourceColumnSortingKey = PBXFileDataSource_Filename_ColumnID;
PBXFileTableDataSourceColumnWidthsKey = (
20,
1473,
20,
48,
43,
43,
20,
);
PBXFileTableDataSourceColumnsKey = (
PBXFileDataSource_FiletypeID,
PBXFileDataSource_Filename_ColumnID,
PBXFileDataSource_Built_ColumnID,
PBXFileDataSource_ObjectSize_ColumnID,
PBXFileDataSource_Errors_ColumnID,
PBXFileDataSource_Warnings_ColumnID,
PBXFileDataSource_Target_ColumnID,
);
};
PBXConfiguration.PBXTargetDataSource.PBXTargetDataSource = {
PBXFileTableDataSourceColumnSortingDirectionKey = "-1";
PBXFileTableDataSourceColumnSortingKey = PBXFileDataSource_Filename_ColumnID;
PBXFileTableDataSourceColumnWidthsKey = (
20,
1433,
60,
20,
48,
43,
43,
);
PBXFileTableDataSourceColumnsKey = (
PBXFileDataSource_FiletypeID,
PBXFileDataSource_Filename_ColumnID,
PBXTargetDataSource_PrimaryAttribute,
PBXFileDataSource_Built_ColumnID,
PBXFileDataSource_ObjectSize_ColumnID,
PBXFileDataSource_Errors_ColumnID,
PBXFileDataSource_Warnings_ColumnID,
);
};
PBXPerProjectTemplateStateSaveDate = 363780293;
PBXWorkspaceStateSaveDate = 363780293;
};
perUserProjectItems = {
0876689915AE859F00EF628E /* PBXTextBookmark */ = 0876689915AE859F00EF628E /* PBXTextBookmark */;
08766D1C15AED96900EF628E /* XCBuildMessageTextBookmark */ = 08766D1C15AED96900EF628E /* XCBuildMessageTextBookmark */;
08766E6315AF04B100EF628E /* PBXTextBookmark */ = 08766E6315AF04B100EF628E /* PBXTextBookmark */;
08F45B5B15AD41610076CEF1 /* PBXTextBookmark */ = 08F45B5B15AD41610076CEF1 /* PBXTextBookmark */;
08F45FA615AD91EB0076CEF1 /* PBXTextBookmark */ = 08F45FA615AD91EB0076CEF1 /* PBXTextBookmark */;
};
sourceControlManager = 08F45B3F15AD37450076CEF1 /* Source Control */;
userBuildSettings = {
};
};
29B97316FDCFA39411CA2CEA /* main.m */ = {
uiCtxt = {
sepNavIntBoundsRect = "{{0, 0}, {1447, 485}}";
sepNavSelRange = "{625, 0}";
sepNavVisRange = "{0, 689}";
sepNavWindowFrame = "{{15, 169}, {1220, 884}}";
};
};
32CA4F630368D1EE00C91783 /* TestApp_Prefix.pch */ = {
uiCtxt = {
sepNavIntBoundsRect = "{{0, 0}, {790, 536}}";
sepNavSelRange = "{0, 0}";
sepNavVisRange = "{0, 183}";
};
};
}
| {
"pile_set_name": "Github"
} |
// Mantid Repository : https://github.com/mantidproject/mantid
//
// Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
// NScD Oak Ridge National Laboratory, European Spallation Source,
// Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
// SPDX - License - Identifier: GPL - 3.0 +
#pragma once
#include "MantidAPI/ImplicitFunctionParameter.h"
#include "MantidGeometry/MDGeometry/MDTypes.h"
#include "MantidKernel/Matrix.h"
#include "MantidKernel/System.h"
namespace Mantid {
namespace DataObjects {
/// Convenience typedef for a specific matrix type.
using AffineMatrixType = Mantid::Kernel::Matrix<coord_t>;
/** Type to wrap an affine matrix and allow serialization via xml.
*
* @author Owen Arnold
* @date 20/07/2011
*/
class DLLExport AffineMatrixParameter
: public Mantid::API::ImplicitFunctionParameter {
public:
// ImplcitFunctionParameter Methods.
std::string getName() const override;
bool isValid() const override;
std::string toXMLString() const override;
AffineMatrixParameter *clone() const override;
void setMatrix(const AffineMatrixType &newMatrix);
AffineMatrixParameter(size_t outD, size_t inD);
AffineMatrixParameter(const AffineMatrixParameter &);
AffineMatrixParameter &operator=(const AffineMatrixParameter &other);
~AffineMatrixParameter() override;
coord_t **getRawMatrix();
AffineMatrixType getAffineMatrix() const;
/**
* Gets the type parameter name.
* @return parameter name.
*/
static std::string parameterName() { return "AffineMatrixParameter"; }
private:
void copyRawMatrix();
/// Raw matrix used for speed (array of pointers to columns).
coord_t **m_rawMatrix;
/// pointer to large memory block (matrix)
coord_t *m_rawMem;
/// Affine matrix.
AffineMatrixType m_affineMatrix;
};
} // namespace DataObjects
} // namespace Mantid
| {
"pile_set_name": "Github"
} |
/* Copyright (c) 2014 Apple Inc. All rights reserved. */
#ifndef __LA_ARITHMETIC_HEADER__
#define __LA_ARITHMETIC_HEADER__
#include <vecLib/LinearAlgebra/object.h>
#if __has_feature(assume_nonnull)
//// If assume_nonnull is available, use it and use nullability qualifiers.
_Pragma("clang assume_nonnull begin")
#else
//// Otherwise, neuter the nullability qualifiers.
#define __nullable
#define __nonnull
#endif
/*!
@abstract
Transpose a vector or matrix.
@discussion
Returns a matrix that is the transpose of the source vector or matrix. If the
source object is not a vector or matrix, the returned object will have status
LA_INVALID_PARAMETER_ERROR.
*/
LA_FUNCTION LA_AVAILABILITY LA_RETURNS_RETAINED
la_object_t la_transpose(la_object_t matrix);
/*!
@abstract
Multiply a matrix or vector by a scalar given by a float.
@discussion
Returns a matrix whose entries are the product of the scalar and the
corresponding element of the source matrix. If the source object is not
a vector or matrix, the returned object will have status
LA_INVALID_PARAMETER_ERROR.
If the scalar type of matrix is not float LA_PRECISION_MISMATCH_ERROR is
returned.
*/
LA_FUNCTION LA_AVAILABILITY LA_RETURNS_RETAINED
la_object_t la_scale_with_float(la_object_t matrix, float scalar);
/*!
@abstract
Multiply a matrix or vector by a scalar given by a double.
@discussion
Returns a matrix whose entries are the product of the scalar and the
corresponding element of the source matrix. If the source object is not
a vector or matrix, the returned object will have status
LA_INVALID_PARAMETER_ERROR.
If the scalar type of matrix is not double LA_PRECISION_MISMATCH_ERROR is
returned.
*/
LA_FUNCTION LA_AVAILABILITY LA_RETURNS_RETAINED
la_object_t la_scale_with_double(la_object_t matrix, double scalar);
/*!
@abstract
Compute the element-wise sum of two vectors or matrices.
@discussion
If either source operand is not a vector or matrix or splat, or if both
operands are splats, the result has status LA_INVALID_PARAMETER_ERROR.
The two operands must have the same dimensions. If they do not, the result
will have status LA_DIMENSION_MISMATCH_ERROR. For simplicity, a vector
of length n, a 1xn matrix, and an nx1 matrix are all treated as having the
same dimensions. If 1xn and nx1 or nx1 and 1xn vectors are passed, an nx1
vector will be created, otherwise orientation matches input.
The result has the same dimensions as the operands, and each element in
the result is the sum of the corresponding elements in the source operands.
*/
LA_FUNCTION LA_AVAILABILITY LA_RETURNS_RETAINED
la_object_t la_sum(la_object_t obj_left, la_object_t obj_right);
/*!
@abstract
Compute the element-wise difference of two vectors or matrices.
@discussion
If either source operand is not a vector or matrix or splat, or if both
operands are splats, the result has status LA_INVALID_PARAMETER_ERROR.
The two operands must have the same dimensions. If they do not, the result
will have status LA_DIMENSION_MISMATCH_ERROR. For simplicity, a vector
of length n, a 1xn matrix, and an nx1 matrix are all treated as having the
same dimensions. If 1xn and nx1 or nx1 and 1xn vectors are passed, an nx1
vector will be created, otherwise orientation matches input.
The result has the same dimensions as the operands, and each element in
the result is given by subtracting the corresponding element of obj_right
from the corresponding element of obj_left.
*/
LA_FUNCTION LA_AVAILABILITY LA_RETURNS_RETAINED
la_object_t la_difference(la_object_t obj_left, la_object_t obj_right);
/*!
@abstract
Compute the element-wise product of two vectors or matrices.
@discussion
If either source operand is not a vector or matrix or splat, or if both
operands are splats, the result has status LA_INVALID_PARAMETER_ERROR.
The two operands must have the same dimensions. If they do not, the result
will have status LA_DIMENSION_MISMATCH_ERROR. For simplicity, a vector
of length n, a 1xn matrix, and an nx1 matrix are all treated as having the
same dimensions. If 1xn and nx1 or nx1 and 1xn vectors are passed, an nx1
vector will be created, otherwise orientation matches input.
The result has the same dimensions as the operands, and each element in
the result is the product of the corresponding elements in the source operands.
*/
LA_FUNCTION LA_AVAILABILITY LA_RETURNS_RETAINED
la_object_t la_elementwise_product(la_object_t obj_left, la_object_t obj_right);
/*!
@abstract
Compute the inner product of two vectors.
@discussion
If either operand is a matrix that is not 1xn or nx1, the result has the
status LA_INVALID_PARAMETER_ERROR.
If either operand is not a vector or matrix or splat, or if both operands
are splats, the result has the status LA_INVALID_PARAMETER_ERROR.
If the lengths of the two operands do not match, the result has the status
LA_DIMENSION_MISMATCH_ERROR.
Otherwise the result is a 1x1 matrix containing the inner product:
sum_{i=0...length} vector_left[i] * vector_right[i]
*/
LA_FUNCTION LA_AVAILABILITY LA_RETURNS_RETAINED
la_object_t la_inner_product(la_object_t vector_left, la_object_t vector_right);
/*!
@abstract
Compute the outer product of two vectors.
@discussion
Splats are not supported by this function. If either operand
is a splat, the result has status LA_INVALID_PARAMETER_ERROR.
If either operand is a matrix that is not 1xn or nx1, the result has the
status LA_INVALID_PARAMETER_ERROR.
If either operand is not a vector or matrix, the result has the status
LA_INVALID_PARAMETER_ERROR.
Otherwise the result is a matrix containg the outer product. It has
length(vector_left) rows and length(vector_right) columns. The i,jth
element of the matrix is vector_left[i] * vector_right[j].
*/
LA_FUNCTION LA_AVAILABILITY LA_RETURNS_RETAINED
la_object_t la_outer_product(la_object_t vector_left, la_object_t vector_right);
/*!
@abstract
Compute a matrix product.
@discussion
Left splat operands are treated as 1 x rows(matrix_right) vectors and right
splat operands are treated as cols(matrix_left) x 1 vectors.
For convenience, in certain situations vector operands may be implicitly
transposed. Specifically,
If cols(matrix_left) == rows(matrix_right)
rows(matrix_left) x cols(matrix_right) = matrix_left * matrix_right
Else if cols(matrix_left) == 1 and rows(matrix_left) == rows(matrix_right)
1 x cols(matrix_right) = transpose(matrix_left) * matrix_right
Else if rows(matrix_right) == 1 and cols(matrix_left) == cols(matrix_right)
rows(matrix_left) x 1 = matrix_left * transpose(matrix_right)
Else
result has the status LA_DIMENSION_MISMATCH_ERROR.
If either operand is not a vector or matrix or splat, or if both operands
are splats, the result has the status LA_INVALID_PARAMETER_ERROR.
Otherwise the result is a matrix with 1 row if matrix_left is vector or splat
and rows(matrix_left) otherwise, and 1 column if matrix_right is vector or
splat and cols(matrix_right) otherwise.
If cols(matrix_left) == rows(matrix_right), the i,jth element of the matrix is:
sum_{k=0...cols(matrix_left)} matrix_left[i,k] * matrix_right[k,j]
If cols(matrix_left) == 1 and rows(matrix_left) == rows(matrix_right), the
0,jth element of matrix is:
sum_{k=0...rows(matrix_right)} matrix_left[k,0] * matrix_right[k,j]
If rows(matrix_right) == 1 and cols(matrix_left) == cols(matrix_right), the
i,0th element of matrix is:
sum_{k=0...cols(matrix_left)} matrix_left[i,k] * matrix_right[0,k]
*/
LA_FUNCTION LA_AVAILABILITY LA_RETURNS_RETAINED
la_object_t la_matrix_product(la_object_t matrix_left,
la_object_t matrix_right);
#if __has_feature(assume_nonnull)
_Pragma("clang assume_nonnull end")
#endif
#endif // __LA_ARITHMETIC_HEADER__
| {
"pile_set_name": "Github"
} |
fileFormatVersion: 2
guid: aeeb7d6f27703ad449e51aedf6f147d2
TextureImporter:
internalIDToNameTable:
- first:
213: 21300000
second: hair_longovereye_0
- first:
213: 21300002
second: hair_longovereye_1
- first:
213: 21300004
second: hair_longovereye_2
- first:
213: 21300006
second: hair_longovereye_3
externalObjects: {}
serializedVersion: 10
mipmaps:
mipMapMode: 0
enableMipMap: 0
sRGBTexture: 1
linearTexture: 0
fadeOut: 0
borderMipMap: 0
mipMapsPreserveCoverage: 0
alphaTestReferenceValue: 0.5
mipMapFadeDistanceStart: 1
mipMapFadeDistanceEnd: 3
bumpmap:
convertToNormalMap: 0
externalNormalMap: 0
heightScale: 0.25
normalMapFilter: 0
isReadable: 1
streamingMipmaps: 0
streamingMipmapsPriority: 0
grayScaleToAlpha: 0
generateCubemap: 6
cubemapConvolution: 0
seamlessCubemap: 0
textureFormat: 1
maxTextureSize: 2048
textureSettings:
serializedVersion: 2
filterMode: 0
aniso: -1
mipBias: -100
wrapU: 1
wrapV: 1
wrapW: 1
nPOTScale: 0
lightmap: 0
compressionQuality: 50
spriteMode: 2
spriteExtrude: 1
spriteMeshType: 0
alignment: 0
spritePivot: {x: 0.5, y: 0.5}
spritePixelsToUnits: 32
spriteBorder: {x: 0, y: 0, z: 0, w: 0}
spriteGenerateFallbackPhysicsShape: 1
alphaUsage: 1
alphaIsTransparency: 1
spriteTessellationDetail: -1
textureType: 8
textureShape: 1
singleChannelComponent: 0
maxTextureSizeSet: 0
compressionQualitySet: 0
textureFormatSet: 0
platformSettings:
- serializedVersion: 3
buildTarget: DefaultTexturePlatform
maxTextureSize: 2048
resizeAlgorithm: 0
textureFormat: 4
textureCompression: 1
compressionQuality: 50
crunchedCompression: 0
allowsAlphaSplitting: 0
overridden: 0
androidETC2FallbackOverride: 0
forceMaximumCompressionQuality_BC6H_BC7: 1
- serializedVersion: 3
buildTarget: Standalone
maxTextureSize: 2048
resizeAlgorithm: 0
textureFormat: 4
textureCompression: 1
compressionQuality: 50
crunchedCompression: 0
allowsAlphaSplitting: 0
overridden: 0
androidETC2FallbackOverride: 0
forceMaximumCompressionQuality_BC6H_BC7: 1
spriteSheet:
serializedVersion: 2
sprites:
- serializedVersion: 2
name: hair_longovereye_0
rect:
serializedVersion: 2
x: 0
y: 0
width: 32
height: 32
alignment: 0
pivot: {x: 0, y: 0}
border: {x: 0, y: 0, z: 0, w: 0}
outline: []
physicsShape: []
tessellationDetail: 0
bones: []
spriteID: b16489681e753a24ab39d1f4a26cd82a
internalID: 21300000
vertices: []
indices:
edges: []
weights: []
- serializedVersion: 2
name: hair_longovereye_1
rect:
serializedVersion: 2
x: 32
y: 0
width: 32
height: 32
alignment: 0
pivot: {x: 0, y: 0}
border: {x: 0, y: 0, z: 0, w: 0}
outline: []
physicsShape: []
tessellationDetail: 0
bones: []
spriteID: b16489681e753a24ab39d1f4a26cd82a
internalID: 21300002
vertices: []
indices:
edges: []
weights: []
- serializedVersion: 2
name: hair_longovereye_2
rect:
serializedVersion: 2
x: 64
y: 0
width: 32
height: 32
alignment: 0
pivot: {x: 0, y: 0}
border: {x: 0, y: 0, z: 0, w: 0}
outline: []
physicsShape: []
tessellationDetail: 0
bones: []
spriteID: b16489681e753a24ab39d1f4a26cd82a
internalID: 21300004
vertices: []
indices:
edges: []
weights: []
- serializedVersion: 2
name: hair_longovereye_3
rect:
serializedVersion: 2
x: 96
y: 0
width: 32
height: 32
alignment: 0
pivot: {x: 0, y: 0}
border: {x: 0, y: 0, z: 0, w: 0}
outline: []
physicsShape: []
tessellationDetail: 0
bones: []
spriteID: b16489681e753a24ab39d1f4a26cd82a
internalID: 21300006
vertices: []
indices:
edges: []
weights: []
outline: []
physicsShape: []
bones: []
spriteID: 280b93732caf29f42b334a277f625760
internalID: 0
vertices: []
indices:
edges: []
weights: []
secondaryTextures: []
spritePackingTag:
pSDRemoveMatte: 0
pSDShowRemoveMatteOption: 0
userData:
assetBundleName:
assetBundleVariant:
| {
"pile_set_name": "Github"
} |
/** @format */
const webdriver = require( 'selenium-webdriver' );
const { forEach } = require( 'lodash' );
const explicitWaitMS = 20000;
const by = webdriver.By;
exports.highlightElement = async function( driver, element, color = 'gold' ) {
if ( process.env.HIGHLIGHT_ELEMENT === 'true' ) {
return await driver.executeScript(
`arguments[0].setAttribute('style', 'background: ${color}; border: 3px solid red;');`,
element
);
}
};
exports.clickWhenClickable = async function( driver, selector, waitOverride ) {
let self = this;
const timeoutWait = waitOverride ? waitOverride : explicitWaitMS;
return driver.wait(
function() {
return driver.findElement( selector ).then(
async function( element ) {
await self.highlightElement( driver, element );
await driver.sleep( 300 );
return element.click().then(
function() {
return true;
} );
},
function() {
return false;
}
);
},
timeoutWait,
`Timed out waiting for element with ${ selector.using } of '${
selector.value
}' to be clickable`
);
};
exports.waitTillNotPresent = function( driver, selector, waitOverride ) {
const timeoutWait = waitOverride ? waitOverride : explicitWaitMS;
let self = this;
return driver.wait(
function() {
return self.isElementPresent( driver, selector ).then( function( isPresent ) {
return ! isPresent;
} );
},
timeoutWait,
`Timed out waiting for element with ${ selector.using } of '${
selector.value
}' to NOT be present`
);
};
exports.followLinkWhenFollowable = function( driver, selector, waitOverride ) {
let self = this;
const timeoutWait = waitOverride ? waitOverride : explicitWaitMS;
return driver.wait(
function() {
return driver.findElement( selector ).then(
async function( element ) {
await self.highlightElement( driver, element );
return element.getAttribute( 'href' ).then(
function( href ) {
driver.get( href );
return true;
},
function() {
return false;
}
);
},
function() {
return false;
}
);
},
timeoutWait,
`Timed out waiting for link with ${ selector.using } of '${ selector.value }' to be followable`
);
};
exports.waitTillPresentAndDisplayed = function( driver, selector, waitOverride ) {
let self = this;
const timeoutWait = waitOverride ? waitOverride : explicitWaitMS;
return driver.wait(
function() {
return driver.findElement( selector ).then(
async function( element ) {
await self.highlightElement( driver, element, '' );
await driver.sleep( 300 );
return element.isDisplayed().then(
function() {
return true;
},
function() {
return false;
}
);
},
function() {
return false;
}
);
},
timeoutWait,
`Timed out waiting for element with ${ selector.using } of '${
selector.value
}' to be present and displayed`
);
};
exports.isEventuallyPresentAndDisplayed = function( driver, selector, waitOverride ) {
let self = this;
const timeoutWait = waitOverride ? waitOverride : explicitWaitMS;
return driver
.wait( function() {
return driver.findElement( selector ).then(
async function( element ) {
await self.highlightElement( driver, element, '' );
return element.isDisplayed().then(
function() {
return true;
},
function() {
return false;
}
);
},
function() {
return false;
}
);
}, timeoutWait )
.then(
shown => {
return shown;
},
() => {
return false;
}
);
};
exports.clickIfPresent = function( driver, selector, attempts ) {
let self = this;
if ( attempts === undefined ) {
attempts = 1;
}
for ( let x = 0; x < attempts; x++ ) {
driver.findElement( selector ).then(
async function( element ) {
await self.highlightElement( driver, element );
element.click().then(
function() {
return true;
},
function() {
return true;
}
);
},
function() {
return true;
}
);
}
};
exports.isElementPresent = async function( driver, selector ) {
const elements = await driver.findElements( selector );
return !! elements.length;
};
exports.getErrorMessageIfPresent = function( driver ) {
const errorNoticeTextSelector = by.css( '.notice.is-error .notice__text' );
return driver.findElement( errorNoticeTextSelector ).then(
el => {
return el.getText();
},
() => {}
);
};
exports.checkForConsoleErrors = function( driver ) {
driver
.manage()
.logs()
.get( 'browser' )
.then( function( logs ) {
if ( logs.length > 0 ) {
forEach( logs, log => {
// Ignore chrome cast errors in Chrome - http://stackoverflow.com/questions/24490323/google-chrome-cast-sender-error-if-chrome-cast-extension-is-not-installed-or-usi/26095117#26095117
// Also ignore post message errors - this is a known limitation at present
// Also ignore 404 errors for viewing sites or posts/pages that are private
if (
log.message.indexOf( 'cast_sender.js' ) === -1 &&
log.message.indexOf( '404' ) === -1 &&
log.message.indexOf( "Failed to execute 'postMessage' on 'DOMWindow'" ) === -1
) {
driver.getCurrentUrl().then( url => {
console.log( `Found console error: "${ log.message }" on url '${ url }'` );
} );
}
} );
}
} );
};
exports.closeCurrentWindow = function( driver ) {
return driver.close();
};
exports.scrollIntoView = async function( driver, selector ) {
let selectorElement = await driver.findElement( selector );
return await driver.executeScript(
'arguments[0].scrollIntoView( { block: "center", inline: "center" } )',
selectorElement
);
};
exports.setWhenSettable = function(
driver,
selector,
value,
{ secureValue = false, pauseBetweenKeysMS = 0 } = {}
) {
const self = this;
const logValue = secureValue === true ? '*********' : value;
return driver.wait(
async function() {
await self.waitForFieldClearable( driver, selector );
const element = await driver.findElement( selector );
await self.highlightElement( driver, element );
await driver.sleep( 300 );
if ( pauseBetweenKeysMS === 0 ) {
await element.sendKeys( value );
} else {
for ( let i = 0; i < value.length; i++ ) {
await driver.sleep( pauseBetweenKeysMS );
await element.sendKeys( value[ i ] );
}
}
const actualValue = await element.getAttribute( 'value' );
return actualValue === value;
},
explicitWaitMS,
`Timed out waiting for element with ${ selector.using } of '${
selector.value
}' to be settable to: '${ logValue }'`
);
};
exports.waitForFieldClearable = function( driver, selector ) {
let self = this;
return driver.wait(
function() {
return driver.findElement( selector ).then(
async element => {
await self.highlightElement( driver, element, '' );
return element.clear().then(
function() {
return element.getAttribute( 'value' ).then( value => {
return value === '';
} );
},
function() {
return false;
}
);
},
function() {
return false;
}
);
},
explicitWaitMS,
`Timed out waiting for element with ${ selector.using } of '${
selector.value
}' to be clearable`
);
};
exports.selectElementByText = async function( driver, selector, text ) {
const element = async () => {
const allElements = await driver.findElements( selector );
return await webdriver.promise.filter( allElements, async e => ( await e.getText() ) === text );
};
return await this.clickWhenClickable( driver, element );
};
| {
"pile_set_name": "Github"
} |
import { CmsPage } from "@shopware-pwa/commons/interfaces/models/content/cms/CmsPage";
export interface CmsPageForHeadless extends CmsPage {}
| {
"pile_set_name": "Github"
} |
using System;
using MixERP.Net.FrontEnd.Base;
namespace MixERP.Net.Core.Modules.BackOffice.Tax
{
public partial class TaxMaster : MixERPUserControl
{
public override void OnControlLoad(object sender, EventArgs e)
{
}
}
} | {
"pile_set_name": "Github"
} |
using FluentValidation;
using NzbDrone.Core.Configuration;
using Sonarr.Http.Validation;
namespace Sonarr.Api.V3.Config
{
public class IndexerConfigModule : SonarrConfigModule<IndexerConfigResource>
{
public IndexerConfigModule(IConfigService configService)
: base(configService)
{
SharedValidator.RuleFor(c => c.MinimumAge)
.GreaterThanOrEqualTo(0);
SharedValidator.RuleFor(c => c.Retention)
.GreaterThanOrEqualTo(0);
SharedValidator.RuleFor(c => c.RssSyncInterval)
.IsValidRssSyncInterval();
}
protected override IndexerConfigResource ToResource(IConfigService model)
{
return IndexerConfigResourceMapper.ToResource(model);
}
}
} | {
"pile_set_name": "Github"
} |
/*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include "EnumClinitAnalysis.h"
#include <gtest/gtest.h>
#include "DexClass.h"
#include "DexLoader.h"
#include "DexStore.h"
#include "JarLoader.h"
#include "RedexTest.h"
constexpr const char* ENUM_SAFE = "Lcom/facebook/redextest/EnumSafe;";
constexpr const char* ENUM_SAFE_A =
"Lcom/facebook/redextest/EnumSafe;.A:Lcom/facebook/redextest/EnumSafe;";
constexpr const char* ENUM_SAFE_B =
"Lcom/facebook/redextest/EnumSafe;.B:Lcom/facebook/redextest/EnumSafe;";
constexpr const char* ENUM_SAFE_NAME =
"Lcom/facebook/redextest/EnumSafe;.name:Ljava/lang/String;";
constexpr const char* ENUM_SAFE_VALUE =
"Lcom/facebook/redextest/EnumSafe;.value:I";
constexpr const char* ENUM_SAFE_IS_USEFUL =
"Lcom/facebook/redextest/EnumSafe;.isUseful:Z";
class EnumClinitAnalysisTest : public RedexIntegrationTest {};
/*
* Check that analyze_enum_clinit returns the correct enum field -> ordinal and
* name mapping.
*/
TEST_F(EnumClinitAnalysisTest, OrdinalAnalysis) {
using namespace optimize_enums;
ASSERT_TRUE(load_class_file(std::getenv("enum_class_file")));
// EnumSafe
auto enum_cls = type_class(DexType::get_type(ENUM_SAFE));
auto attributes = analyze_enum_clinit(enum_cls);
auto& enum_constants = attributes.m_constants_map;
auto& ifield_map = attributes.m_field_map;
EXPECT_EQ(enum_constants.size(), 2);
EXPECT_EQ(ifield_map.size(), 3);
auto field = static_cast<DexField*>(DexField::get_field(ENUM_SAFE_A));
ASSERT_EQ(enum_constants.count(field), 1);
EXPECT_EQ(enum_constants[field].ordinal, 0);
EXPECT_EQ(enum_constants[field].name, DexString::make_string("A"));
field = static_cast<DexField*>(DexField::get_field(ENUM_SAFE_B));
ASSERT_EQ(enum_constants.count(field), 1);
EXPECT_EQ(enum_constants[field].ordinal, 1);
EXPECT_EQ(enum_constants[field].name, DexString::make_string("B"));
auto ifield = DexField::get_field(ENUM_SAFE_NAME);
ASSERT_EQ(ifield_map.count(ifield), 1);
ASSERT_EQ(ifield_map[ifield].size(), 2);
EXPECT_EQ(ifield_map[ifield][0].string_value, DexString::make_string("zero"));
EXPECT_EQ(ifield_map[ifield][1].string_value, nullptr);
ifield = DexField::get_field(ENUM_SAFE_VALUE);
ASSERT_EQ(ifield_map.count(ifield), 1);
ASSERT_EQ(ifield_map[ifield].size(), 2);
EXPECT_EQ(ifield_map[ifield][0].primitive_value, 0);
EXPECT_EQ(ifield_map[ifield][1].primitive_value, 1);
ifield = DexField::get_field(ENUM_SAFE_IS_USEFUL);
ASSERT_EQ(ifield_map.count(ifield), 1);
ASSERT_EQ(ifield_map[ifield].size(), 2);
EXPECT_EQ(ifield_map[ifield][0].primitive_value, 1);
EXPECT_EQ(ifield_map[ifield][1].primitive_value, 1);
// These enums should not be optimized.
for (const char* enum_name : {"Lcom/facebook/redextest/EnumUnsafe1;",
"Lcom/facebook/redextest/EnumUnsafe2;"}) {
enum_cls = type_class(DexType::get_type(enum_name));
attributes = analyze_enum_clinit(enum_cls);
EXPECT_TRUE(attributes.m_constants_map.empty());
EXPECT_TRUE(attributes.m_field_map.empty());
}
}
| {
"pile_set_name": "Github"
} |
@a: 2;
@x: (@a * @a);
@y: (@x + 1);
@z: (@x * 2 + @y);
@var: -1;
.variables {
width: (@z + 1cm); // 14cm
}
@b: @a * 10;
@c: #888;
@fonts: "Trebuchet MS", Verdana, sans-serif;
@f: @fonts;
@quotes: "~" "~";
@q: @quotes;
@onePixel: 1px;
.variables {
height: (@b + @x + 0px); // 24px
color: @c;
font-family: @f;
quotes: @q;
}
.redef {
@var: 0;
.inition {
@var: 4;
@var: 2;
three: @var;
@var: 3;
}
zero: @var;
}
.values {
minus-one: @var;
@a: 'Trebuchet';
@multi: 'A', B, C;
font-family: @a, @a, @a;
color: @c !important;
multi: something @multi, @a;
}
.variable-names {
@var: 'hello';
@name: 'var';
name: @@name;
}
.alpha {
@var: 42;
filter: alpha(opacity=@var);
}
.polluteMixin() {
@a: 'pollution';
}
.testPollution {
@a: 'no-pollution';
a: @a;
.polluteMixin();
a: @a;
}
.units {
width: @onePixel;
same-unit-as-previously: (@onePixel / @onePixel);
square-pixel-divided: (@onePixel * @onePixel / @onePixel);
odd-unit: unit((@onePixel * 4em / 2cm));
percentage: (10 * 50%);
pixels: (50px * 10);
conversion-metric-a: (20mm + 1cm);
conversion-metric-b: (1cm + 20mm);
conversion-imperial: (1in + 72pt + 6pc);
custom-unit: (42octocats * 10);
custom-unit-cancelling: (8cats * 9dogs / 4cats);
mix-units: (1px + 1em);
invalid-units: (1px * 1px);
}
~~~~~~~~~~
| {
"pile_set_name": "Github"
} |
typedef int OptionValueType;
typedef struct
{
double freq;
int units;
} ValueUnion; // this is actually a union of other stuff and this :(
typedef unsigned char CARD8;
typedef unsigned short CARD16;
typedef unsigned int CARD32;
typedef unsigned long IOADDRESS;
typedef void * pointer;
typedef int Bool;
typedef int INT32;
typedef struct
{
const char *modname;
const char *vendor;
CARD32 _modinfo1_;
CARD32 _modinfo2_;
CARD32 xf86version;
CARD8 majorversion;
CARD8 minorversion;
CARD16 patchlevel;
const char *abiclass;
CARD32 abiversion;
const char *moduleclass;
CARD32 checksum[4];
} XF86ModuleVersionInfo;
typedef pointer ModuleSetupProc (pointer module, pointer opts, int *errmaj, int *errmin);
typedef void ModuleTearDownProc (pointer module);
typedef struct
{
XF86ModuleVersionInfo *vers;
ModuleSetupProc *setup;
ModuleTearDownProc *teardown;
} XF86ModuleData;
typedef struct
{
int token;
const char *name;
OptionValueType type;
ValueUnion value;
Bool found;
} OptionInfoRec;
typedef OptionInfoRec *OptionInfoPtr;
typedef OptionInfoRec OptionInfoRecs[];
typedef void IdentifyFunc (int flags);
typedef OptionInfoRecs *AvailableOptionsFunc (int chipid, int bustype);
typedef struct
{
int driverVersion;
char *driverName;
IdentifyFunc *Identify;
ProbeFunc *Probe;
AvailableOptionsFunc *AvailableOptions;
pointer module;
int refCount;
} DriverRec;
typedef DriverRec *DriverPtr;
typedef Bool ProbeFunc (DriverRec *drv, int flags);
void xf86AddDriver(DriverPtr driver, pointer module, int flags);
typedef const char *Sym;
typedef Sym SymList[];
void xf86LoaderRefSymLists(SymList *p, ...);
void xf86LoaderRefSymbols(const char *p, ...);
void LoaderRefSymLists(SymList *p, ...);
void LoaderRefSymbols(const char *p, ...);
typedef struct
{
char *identifier;
char *driver;
pointer commonOptions;
pointer extraOptions;
} IDevRec;
typedef IDevRec *IDevPtr;
typedef struct
{
int vendor;
int chipType;
int chipRev;
int subsysVendor;
int subsysCard;
int bus;
int device;
int func;
int class;
int subclass;
int interface;
memType memBase[6];
memType ioBase[6];
int size[6];
unsigned char type[6];
memType biosBase;
int biosSize;
pointer thisCard;
Bool validSize;
Bool validate;
CARD32 listed_class;
} pciVideoRec;
typedef pciVideoRec *pciVideoPtr;
typedef struct
{
int frameX0;
int frameY0;
int virtualX;
int virtualY;
int depth;
int fbbpp;
rgb weight;
rgb blackColour;
rgb whiteColour;
int defaultVisual;
char **modes;
pointer options;
} DispRec;
typedef DispRec *DispPtr;
typedef struct
{
char *identifier;
pointer options;
} confXvPortRec;
typedef confXvPortRec *confXvPortPtr;
typedef struct
{
char *identifier;
int numports;
confXvPortPtr ports;
pointer options;
} confXvAdaptorRec;
typedef confXvAdaptorRec *confXvAdaptorPtr;
typedef struct
{
float hi;
float lo;
} range;
typedef struct
{
char *_id;
char *vendor;
char *model;
int nHsync;
range hsync[8];
int nVrefresh;
range vrefresh[8];
DisplayModePtr Modes; /* Start of the monitor's mode list */
DisplayModePtr Last; /* End of the monitor's mode list */
Gamma gamma; /* Gamma of the monitor */
int widthmm;
int heightmm;
pointer options;
pointer DDC;
} MonRec;
typedef MonRec *MonPtr;
typedef struct
{
char *_id;
int screennum;
int defaultdepth;
int defaultbpp;
int defaultfbbpp;
MonPtr monitor;
GDevPtr device;
int numdisplays;
DispPtr displays;
int numxvadaptors;
confXvAdaptorPtr xvadaptors;
pointer options;
} confScreenRec;
typedef confScreenRec *confScreenPtr;
typedef struct
{
char *identifier;
char *vendor;
char *board;
char *chipset;
char *ramdac;
char *driver;
confScreenPtr myScreenSection;
Bool claimed;
int dacSpeeds[4];
int numclocks;
int clock[128];
char *clockchip;
char *busID;
Bool active;
Bool inUse;
int videoRam;
int textClockFreq;
unsigned long BiosBase;
unsigned long MemBase;
unsigned long IOBase;
int chipID;
int chipRev;
pointer options;
int irq;
int screen;
} GDevRec;
typedef GDevRec * GDevPtr;
int xf86MatchDevice(const char *drivername, GDevPtr **driversectlist);
typedef unsigned long memType;
pciVideoPtr *xf86GetPciVideoInfo();
typedef unsigned int uint;
pointer Xrealloc(pointer p, uint n);
typedef struct
{
int token;
const char *name;
} SymTabRec;
typedef SymTabRec *SymTabPtr;
typedef struct
{
int numChipset;
int PCIid;
resRange *resList;
} PciChipsets;
typedef struct
{
unsigned long type;
memType a;
memType b;
} resRange;
typedef resRange *resList;
typedef int EntityList[];
int xf86MatchPciInstances(const char *driverName, int vendorID,
SymTabPtr chipsets, PciChipsets *PCIchipsets,
GDevPtr *devList, int numDevs, DriverPtr drvp,
EntityList **foundEntities);
void Xfree(pointer p);
typedef struct
{
unsigned char depth;
unsigned char bitsPerPixel;
unsigned char scanlinePad;
} PixmapFormatRec;
typedef struct
{
int myNum;
ATOM _id;
short width;
short height;
short mmWidth;
short mmHeight;
short numDepths;
unsigned char rootDepth;
DepthPtr allowedDepths;
unsigned long rootVisual;
unsigned long defColormap;
short minInstalledCmaps;
short maxInstalledCmaps;
char backingStoreSupport;
char saveUnderSupport;
unsigned long whitePixel;
unsigned long blackPixel;
unsigned long rgf;
GCPtr GCperDepth[9];
PixmapPtr PixmapPerDepth[1];
pointer devPrivate;
short numVisuals;
VisualPtr visuals;
int WindowPrivateLen;
unsigned *WindowPrivateSizes;
unsigned totalWindowSize;
int GCPrivateLen;
unsigned *GCPrivateSizes;
unsigned totalGCSize;
CloseScreenProcPtr CloseScreen;
QueryBestSizeProcPtr QueryBestSize;
SaveScreenProcPtr SaveScreen;
GetImageProcPtr GetImage;
GetSpansProcPtr GetSpans;
PointerNonInterestBoxProcPtr PointerNonInterestBox;
SourceValidateProcPtr SourceValidate;
CreateWindowProcPtr CreateWindow;
DestroyWindowProcPtr DestroyWindow;
PositionWindowProcPtr PositionWindow;
ChangeWindowAttributesProcPtr ChangeWindowAttributes;
RealizeWindowProcPtr RealizeWindow;
UnrealizeWindowProcPtr UnrealizeWindow;
ValidateTreeProcPtr ValidateTree;
PostValidateTreeProcPtr PostValidateTree;
WindowExposuresProcPtr WindowExposures;
PaintWindowBackgroundProcPtr PaintWindowBackground;
PaintWindowBorderProcPtr PaintWindowBorder;
CopyWindowProcPtr CopyWindow;
ClearToBackgroundProcPtr ClearToBackground;
ClipNotifyProcPtr ClipNotify;
RestackWindowProcPtr RestackWindow;
CreatePixmapProcPtr CreatePixmap;
DestroyPixmapProcPtr DestroyPixmap;
SaveDoomedAreasProcPtr SaveDoomedAreas;
RestoreAreasProcPtr RestoreAreas;
ExposeCopyProcPtr ExposeCopy;
TranslateBackingStoreProcPtr TranslateBackingStore;
ClearBackingStoreProcPtr ClearBackingStore;
DrawGuaranteeProcPtr DrawGuarantee;
BSFuncRec BackingStoreFuncs;
RealizeFontProcPtr RealizeFont;
UnrealizeFontProcPtr UnrealizeFont;
ConstrainCursorProcPtr ConstrainCursor;
CursorLimitsProcPtr CursorLimits;
DisplayCursorProcPtr DisplayCursor;
RealizeCursorProcPtr RealizeCursor;
UnrealizeCursorProcPtr UnrealizeCursor;
RecolorCursorProcPtr RecolorCursor;
SetCursorPositionProcPtr SetCursorPosition;
CreateGCProcPtr CreateGC;
CreateColormapProcPtr CreateColormap;
DestroyColormapProcPtr DestroyColormap;
InstallColormapProcPtr InstallColormap;
UninstallColormapProcPtr UninstallColormap;
ListInstalledColormapsProcPtr ListInstalledColormaps;
StoreColorsProcPtr StoreColors;
ResolveColorProcPtr ResolveColor;
// this just keeps going and going
} ScreenRec;
typedef ScreenRec *ScreenPtr;
typedef int Pix24Flags;
typedef int MessageType;
typedef struct
{
CARD32 red;
CARD32 green;
CARD32 blue;
} rgb;
typedef struct
{
float red;
float green;
float blue;
} Gamma;
typedef struct
{
DisplayModeRec *prev;
DisplayModeRec *next;
char *name;
ModeStatus status;
int type;
int Clock;
int HDisplay;
int HSyncStart;
int HSyncEnd;
int HTotal;
int HSkew;
int VDisplay;
int VSyncStart;
int VSyncEnd;
int VTotal;
int VScan;
int Flags;
int ClockIndex;
int SynthClock;
int CrtcHDisplay;
int CrtcHBlankStart;
int CrtcHSyncStart;
int CrtcHSyncEnd;
int CrtcHBlankEnd;
int CrtcHTotal;
int CrtcHSkew;
int CrtcVDisplay;
int CrtcVBlankStart;
int CrtcVSyncStart;
int CrtcVSyncEnd;
int CrtcVBlankEnd;
int CrtcVTotal;
Bool CrtcHAdjusted;
Bool CrtcVAdjusted;
int PrivSize;
INT32 *Private;
int PrivFlags;
float HSync;
float VRefresh;
} DisplayModeRec;
typedef DisplayModeRec *DisplayModePtr;
typedef int resType;
typedef void AccessDisableFunc (void *arg);
typedef void AccessEnableFunc (void *arg);
typedef struct
{
AccessDisableFunc *AccessDisable;
AccessEnableFunc *AccessEnable;
void *arg;
} xf86AccessRec;
typedef xf86AccessRec *xf86AccessPtr;
typedef struct
{
xf86AccessPtr mem;
xf86AccessPtr io;
xf86AccessPtr io_mem;
} xf86SetAccessFuncRec;
typedef xf86SetAccessFuncRec *xf86SetAccessFuncPtr;
typedef struct
{
xf86AccessPtr fallback;
xf86AccessPtr pAccess;
resType rt;
pointer busAcc;
EntityAccessRec *next;
} EntityAccessRec;
typedef EntityAccessRec *EntityAccessPtr;
typedef struct
{
EntityAccessPtr pMemAccess;
EntityAccessPtr pIoAccess;
} xf86CurrentAccessRec;
typedef xf86CurrentAccessRec *xf86CurrentAccessPtr;
typedef pointer DevUnion;
typedef struct
{
ClockRange *next;
int minClock;
int maxClock;
int clockIndex;
Bool interlaceAllowed;
Bool doubleScanAllowed;
int ClockMulFactor;
int ClockDivFactor;
int PrivFlags;
} ClockRange;
typedef ClockRange *ClockRangePtr;
typedef struct
{
ClockRanges *next;
int minClock;
int maxClock;
int clockIndex;
Bool interlaceAllowed;
Bool doubleScanAllowed;
int ClockMulFactor;
int ClockDivFactor;
int PrivFlags;
int strategy;
} ClockRanges;
typedef ClockRanges *ClockRangesPtr;
typedef struct
{
int driverVersion;
char *driverName;
ScreenPtr pScreen;
int scrnIndex;
Bool configured;
int origIndex;
int imageByteOrder;
int bitmapScanlineUnit;
int bitmapScanlinePad;
int bitmapBitOrder;
int numFormats;
PixmapFormatRec formats[8];
PixmapFormatRec fbFormat;
unsigned char pad;
int bitsPerPixel;
Pix24Flags pixmap24;
int depth;
MessageType depthFrom;
MessageType bitsPerPixelFrom;
rgb weight;
rgb mask;
rgb offset;
int rgbBits;
Gamma gamma;
int defaultVisual;
int maxHValue;
int maxVValue;
int virtualX;
int virtualY;
int xInc;
MessageType virtualFrom;
int displayWidth;
int frameX0;
int frameY0;
int frameX1;
int frameY1;
int zoomLocked;
DisplayModePtr modePool;
DisplayModePtr modes;
DisplayModePtr currentMode;
confScreenPtr confScreen;
MonPtr monitor;
DispPtr display;
int *entityList;
int numEntities;
int widthmm;
int heightmm;
int xDpi;
int yDpi;
char *name;
pointer driverPrivate;
DevUnion *privates;
DriverPtr drv;
pointer module;
int colorKey;
int overlayFlags;
char *chipset;
char *ramdac;
char *clockchip;
Bool progClock;
int numClocks;
int clock[128];
int videoRam;
unsigned long biosBase;
unsigned long memPhysBase;
unsigned long fbOffset;
IOADDRESS domainIOBase;
int memClk;
int textClockFreq;
Bool flipPixels;
pointer options;
int chipID;
int chipRev;
int racMemFlags;
int racIoFlags;
pointer access;
xf86CurrentAccessPtr CurrentAccess;
resType resourceType;
pointer busAccess;
Bool vtSema;
DevUnion pixmapPrivate;
Bool silkenMouse;
ClockRangesPtr clockRanges;
int adjustFlags;
int reservedInt[16];
int *entityInstanceList;
pointer reservedPtr[15];
xf86ProbeProc *Probe;
xf86PreInitProc *PreInit;
xf86ScreenInitProc *ScreenInit;
xf86SwitchModeProc *SwitchMode;
xf86AdjustFrameProc *AdjustFrame;
xf86EnterVTProc *EnterVT;
xf86LeaveVTProc *LeaveVT;
xf86FreeScreenProc *FreeScreen;
xf86ValidModeProc *ValidMode;
xf86EnableDisableFBAccessProc *EnableDisableFBAccess;
xf86SetDGAModeProc *SetDGAMode;
xf86ChangeGammaProc *ChangeGamma;
xf86PointerMovedProc *PointerMoved;
xf86PMEventProc *PMEvent;
xf86HandleMessageProc *HandleMessage;
xf86DPMSSetProc *DPMSSet;
xf86LoadPaletteProc *LoadPalette;
xf86SetOverscanProc *SetOverscan;
xorgRRFuncProc *RRFunc;
funcPointer reservedFuncs[11];
} ScrnInfoRec;
typedef ScrnInfoRec *ScrnInfoPtr;
ScrnInfoPtr xf86ConfigPciEntity(ScrnInfoPtr pScrn, int scrnFlag,
int entityIndex, PciChipsets *p_chip,
resList res, EntityProc init,
EntityProc enter, EntityProc leave,
pointer private);
int xf86GetLastScrnFlag(int entityIndex);
void xf86SetLastScrnFlag(int entityIndex, int scrnIndex);
Bool xf86IsEntityShared(int entityIndex);
void xf86SetEntityShared(int entityIndex);
Bool xf86IsEntitySharable(int entityIndex);
void xf86SetEntitySharable(int entityIndex);
Bool xf86IsPrimInitDone(int entityIndex);
void xf86SetPrimInitDone(int entityIndex);
void xf86ClearPrimInitDone(int entityIndex);
int xf86AllocateEntityPrivateIndex();
DevUnion *xf86GetEntityPrivate(int entityIndex, int privIndex);
void xf86SetEntityInstanceForScreen(ScrnInfoPtr pScrn, int entityIndex, int instance);
int xf86GetVerbosity();
void xf86DrvMsgVerb(int scrnIndex, MessageType type, int verb, const char *format, ...);
void xf86Msg(MessageType type, const char *format, ...);
pointer XNFcalloc(unsigned int n);
pointer Xcalloc(unsigned int n);
pointer Xalloc(unsigned int n);
// libc wrapper stuff
typedef int xf86size_t;
xf86size_t xf86strlen(const char *s);
int xf86vsnprintf(char *s, xf86size_t len @max(s), const char *format, va_list ap);
char *xf86strncpy(char *dest, const char *src, xf86size_t n);
int xf86isspace(int c);
| {
"pile_set_name": "Github"
} |
# This file defines the Feature Logging macros.
#
# MACRO_LOG_FEATURE(VAR FEATURE DESCRIPTION URL [REQUIRED [MIN_VERSION [COMMENTS]]])
# Logs the information so that it can be displayed at the end
# of the configure run
# VAR : TRUE or FALSE, indicating whether the feature is supported
# FEATURE: name of the feature, e.g. "libjpeg"
# DESCRIPTION: description what this feature provides
# URL: home page
# REQUIRED: TRUE or FALSE, indicating whether the featue is required
# MIN_VERSION: minimum version number. empty string if unneeded
# COMMENTS: More info you may want to provide. empty string if unnecessary
#
# MACRO_DISPLAY_FEATURE_LOG()
# Call this to display the collected results.
# Exits CMake with a FATAL error message if a required feature is missing
#
# Example:
#
# INCLUDE(MacroLogFeature)
#
# FIND_PACKAGE(JPEG)
# MACRO_LOG_FEATURE(JPEG_FOUND "libjpeg" "Support JPEG images" "http://www.ijg.org" TRUE "3.2a" "")
# ...
# MACRO_DISPLAY_FEATURE_LOG()
# Copyright (c) 2006, Alexander Neundorf, <[email protected]>
# Copyright (c) 2006, Allen Winter, <[email protected]>
# Copyright (c) 2009, Sebastian Trueg, <[email protected]>
#
# Redistribution and use is allowed according to the terms of the BSD license.
# For details see the accompanying COPYING-CMAKE-SCRIPTS file.
IF (NOT _macroLogFeatureAlreadyIncluded)
SET(_file ${CMAKE_BINARY_DIR}/MissingRequirements.txt)
IF (EXISTS ${_file})
FILE(REMOVE ${_file})
ENDIF (EXISTS ${_file})
SET(_file ${CMAKE_BINARY_DIR}/EnabledFeatures.txt)
IF (EXISTS ${_file})
FILE(REMOVE ${_file})
ENDIF (EXISTS ${_file})
SET(_file ${CMAKE_BINARY_DIR}/DisabledFeatures.txt)
IF (EXISTS ${_file})
FILE(REMOVE ${_file})
ENDIF (EXISTS ${_file})
SET(_macroLogFeatureAlreadyIncluded TRUE)
INCLUDE(FeatureSummary)
ENDIF (NOT _macroLogFeatureAlreadyIncluded)
MACRO(MACRO_LOG_FEATURE _var _package _description _url ) # _required _minvers _comments)
STRING(TOUPPER "${ARGV4}" _required)
SET(_minvers "${ARGV5}")
SET(_comments "${ARGV6}")
IF (${_var})
SET(_LOGFILENAME ${CMAKE_BINARY_DIR}/EnabledFeatures.txt)
ELSE (${_var})
IF ("${_required}" STREQUAL "TRUE")
SET(_LOGFILENAME ${CMAKE_BINARY_DIR}/MissingRequirements.txt)
ELSE ("${_required}" STREQUAL "TRUE")
SET(_LOGFILENAME ${CMAKE_BINARY_DIR}/DisabledFeatures.txt)
ENDIF ("${_required}" STREQUAL "TRUE")
ENDIF (${_var})
SET(_logtext " * ${_package}")
IF (NOT ${_var})
IF (${_minvers} MATCHES ".*")
SET(_logtext "${_logtext} (${_minvers} or higher)")
ENDIF (${_minvers} MATCHES ".*")
SET(_logtext "${_logtext} <${_url}>\n ")
ELSE (NOT ${_var})
SET(_logtext "${_logtext} - ")
ENDIF (NOT ${_var})
SET(_logtext "${_logtext}${_description}")
IF (NOT ${_var})
IF (${_comments} MATCHES ".*")
SET(_logtext "${_logtext}\n ${_comments}")
ENDIF (${_comments} MATCHES ".*")
# SET(_logtext "${_logtext}\n") #double-space missing features?
ENDIF (NOT ${_var})
FILE(APPEND "${_LOGFILENAME}" "${_logtext}\n")
IF(COMMAND SET_PACKAGE_INFO) # in FeatureSummary.cmake since CMake 2.8.3
SET_PACKAGE_INFO("${_package}" "\"${_description}\"" "${_url}" "\"${_comments}\"")
ENDIF(COMMAND SET_PACKAGE_INFO)
ENDMACRO(MACRO_LOG_FEATURE)
MACRO(MACRO_DISPLAY_FEATURE_LOG)
IF(COMMAND FEATURE_SUMMARY) # in FeatureSummary.cmake since CMake 2.8.3
FEATURE_SUMMARY(FILENAME ${CMAKE_CURRENT_BINARY_DIR}/FindPackageLog.txt
WHAT ALL)
ENDIF(COMMAND FEATURE_SUMMARY)
SET(_missingFile ${CMAKE_BINARY_DIR}/MissingRequirements.txt)
SET(_enabledFile ${CMAKE_BINARY_DIR}/EnabledFeatures.txt)
SET(_disabledFile ${CMAKE_BINARY_DIR}/DisabledFeatures.txt)
IF (EXISTS ${_missingFile} OR EXISTS ${_enabledFile} OR EXISTS ${_disabledFile})
SET(_printSummary TRUE)
ENDIF (EXISTS ${_missingFile} OR EXISTS ${_enabledFile} OR EXISTS ${_disabledFile})
IF(_printSummary)
SET(_missingDeps 0)
IF (EXISTS ${_enabledFile})
FILE(READ ${_enabledFile} _enabled)
FILE(REMOVE ${_enabledFile})
SET(_summary "${_summary}\n-----------------------------------------------------------------------------\n-- The following external packages were located on your system.\n-- This installation will have the extra features provided by these packages.\n-----------------------------------------------------------------------------\n${_enabled}")
ENDIF (EXISTS ${_enabledFile})
IF (EXISTS ${_disabledFile})
SET(_missingDeps 1)
FILE(READ ${_disabledFile} _disabled)
FILE(REMOVE ${_disabledFile})
SET(_summary "${_summary}\n-----------------------------------------------------------------------------\n-- The following OPTIONAL packages could NOT be located on your system.\n-- Consider installing them to enable more features from this software.\n-----------------------------------------------------------------------------\n${_disabled}")
ENDIF (EXISTS ${_disabledFile})
IF (EXISTS ${_missingFile})
SET(_missingDeps 1)
FILE(READ ${_missingFile} _requirements)
SET(_summary "${_summary}\n-----------------------------------------------------------------------------\n-- The following REQUIRED packages could NOT be located on your system.\n-- You must install these packages before continuing.\n-----------------------------------------------------------------------------\n${_requirements}")
FILE(REMOVE ${_missingFile})
SET(_haveMissingReq 1)
ENDIF (EXISTS ${_missingFile})
IF (NOT ${_missingDeps})
SET(_summary "${_summary}\n-----------------------------------------------------------------------------\n-- Congratulations! All external packages have been found.")
ENDIF (NOT ${_missingDeps})
MESSAGE(${_summary})
MESSAGE("-----------------------------------------------------------------------------\n")
IF(_haveMissingReq)
MESSAGE(FATAL_ERROR "Exiting: Missing Requirements")
ENDIF(_haveMissingReq)
ENDIF(_printSummary)
ENDMACRO(MACRO_DISPLAY_FEATURE_LOG)
| {
"pile_set_name": "Github"
} |
// Copyright Aleksey Gurtovoy 2000-2004
// Copyright David Abrahams 2003-2004
//
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
//
// Preprocessed version of "boost/mpl/map/map10.hpp" header
// -- DO NOT modify by hand!
namespace boost { namespace mpl {
template<>
struct m_at_impl<0>
{
template< typename Map > struct result_
{
typedef typename Map::item0 type;
};
};
template<>
struct m_item_impl<1>
{
template< typename Key, typename T, typename Base > struct result_
: m_item_< Key,T,Base >
{
typedef pair< Key,T > item0;
};
};
template<
typename P0
>
struct map1
: m_item<
1
, typename P0::first
, typename P0::second
, map0< >
>
{
typedef map1 type;
};
template<>
struct m_at_impl<1>
{
template< typename Map > struct result_
{
typedef typename Map::item1 type;
};
};
template<>
struct m_item_impl<2>
{
template< typename Key, typename T, typename Base > struct result_
: m_item_< Key,T,Base >
{
typedef pair< Key,T > item1;
};
};
template<
typename P0, typename P1
>
struct map2
: m_item<
2
, typename P1::first
, typename P1::second
, map1<P0>
>
{
typedef map2 type;
};
template<>
struct m_at_impl<2>
{
template< typename Map > struct result_
{
typedef typename Map::item2 type;
};
};
template<>
struct m_item_impl<3>
{
template< typename Key, typename T, typename Base > struct result_
: m_item_< Key,T,Base >
{
typedef pair< Key,T > item2;
};
};
template<
typename P0, typename P1, typename P2
>
struct map3
: m_item<
3
, typename P2::first
, typename P2::second
, map2< P0,P1 >
>
{
typedef map3 type;
};
template<>
struct m_at_impl<3>
{
template< typename Map > struct result_
{
typedef typename Map::item3 type;
};
};
template<>
struct m_item_impl<4>
{
template< typename Key, typename T, typename Base > struct result_
: m_item_< Key,T,Base >
{
typedef pair< Key,T > item3;
};
};
template<
typename P0, typename P1, typename P2, typename P3
>
struct map4
: m_item<
4
, typename P3::first
, typename P3::second
, map3< P0,P1,P2 >
>
{
typedef map4 type;
};
template<>
struct m_at_impl<4>
{
template< typename Map > struct result_
{
typedef typename Map::item4 type;
};
};
template<>
struct m_item_impl<5>
{
template< typename Key, typename T, typename Base > struct result_
: m_item_< Key,T,Base >
{
typedef pair< Key,T > item4;
};
};
template<
typename P0, typename P1, typename P2, typename P3, typename P4
>
struct map5
: m_item<
5
, typename P4::first
, typename P4::second
, map4< P0,P1,P2,P3 >
>
{
typedef map5 type;
};
template<>
struct m_at_impl<5>
{
template< typename Map > struct result_
{
typedef typename Map::item5 type;
};
};
template<>
struct m_item_impl<6>
{
template< typename Key, typename T, typename Base > struct result_
: m_item_< Key,T,Base >
{
typedef pair< Key,T > item5;
};
};
template<
typename P0, typename P1, typename P2, typename P3, typename P4
, typename P5
>
struct map6
: m_item<
6
, typename P5::first
, typename P5::second
, map5< P0,P1,P2,P3,P4 >
>
{
typedef map6 type;
};
template<>
struct m_at_impl<6>
{
template< typename Map > struct result_
{
typedef typename Map::item6 type;
};
};
template<>
struct m_item_impl<7>
{
template< typename Key, typename T, typename Base > struct result_
: m_item_< Key,T,Base >
{
typedef pair< Key,T > item6;
};
};
template<
typename P0, typename P1, typename P2, typename P3, typename P4
, typename P5, typename P6
>
struct map7
: m_item<
7
, typename P6::first
, typename P6::second
, map6< P0,P1,P2,P3,P4,P5 >
>
{
typedef map7 type;
};
template<>
struct m_at_impl<7>
{
template< typename Map > struct result_
{
typedef typename Map::item7 type;
};
};
template<>
struct m_item_impl<8>
{
template< typename Key, typename T, typename Base > struct result_
: m_item_< Key,T,Base >
{
typedef pair< Key,T > item7;
};
};
template<
typename P0, typename P1, typename P2, typename P3, typename P4
, typename P5, typename P6, typename P7
>
struct map8
: m_item<
8
, typename P7::first
, typename P7::second
, map7< P0,P1,P2,P3,P4,P5,P6 >
>
{
typedef map8 type;
};
template<>
struct m_at_impl<8>
{
template< typename Map > struct result_
{
typedef typename Map::item8 type;
};
};
template<>
struct m_item_impl<9>
{
template< typename Key, typename T, typename Base > struct result_
: m_item_< Key,T,Base >
{
typedef pair< Key,T > item8;
};
};
template<
typename P0, typename P1, typename P2, typename P3, typename P4
, typename P5, typename P6, typename P7, typename P8
>
struct map9
: m_item<
9
, typename P8::first
, typename P8::second
, map8< P0,P1,P2,P3,P4,P5,P6,P7 >
>
{
typedef map9 type;
};
template<>
struct m_at_impl<9>
{
template< typename Map > struct result_
{
typedef typename Map::item9 type;
};
};
template<>
struct m_item_impl<10>
{
template< typename Key, typename T, typename Base > struct result_
: m_item_< Key,T,Base >
{
typedef pair< Key,T > item9;
};
};
template<
typename P0, typename P1, typename P2, typename P3, typename P4
, typename P5, typename P6, typename P7, typename P8, typename P9
>
struct map10
: m_item<
10
, typename P9::first
, typename P9::second
, map9< P0,P1,P2,P3,P4,P5,P6,P7,P8 >
>
{
typedef map10 type;
};
}}
| {
"pile_set_name": "Github"
} |
<!DOCTYPE HTML>
<!-- NewPage -->
<html lang="en">
<head>
<!-- Generated by javadoc -->
<title>BootstrapMethodError (Java SE 12 & JDK 12 )</title>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
<meta name="keywords" content="java.lang.BootstrapMethodError class">
<link rel="stylesheet" type="text/css" href="../../../stylesheet.css" title="Style">
<link rel="stylesheet" type="text/css" href="../../../jquery/jquery-ui.css" title="Style">
<script type="text/javascript" src="../../../script.js"></script>
<script type="text/javascript" src="../../../jquery/jszip/dist/jszip.min.js"></script>
<script type="text/javascript" src="../../../jquery/jszip-utils/dist/jszip-utils.min.js"></script>
<!--[if IE]>
<script type="text/javascript" src="../../../jquery/jszip-utils/dist/jszip-utils-ie.min.js"></script>
<![endif]-->
<script type="text/javascript" src="../../../jquery/jquery-3.3.1.js"></script>
<script type="text/javascript" src="../../../jquery/jquery-migrate-3.0.1.js"></script>
<script type="text/javascript" src="../../../jquery/jquery-ui.js"></script>
</head>
<body>
<script type="text/javascript"><!--
try {
if (location.href.indexOf('is-external=true') == -1) {
parent.document.title="BootstrapMethodError (Java SE 12 & JDK 12 )";
}
}
catch(err) {
}
//-->
var pathtoroot = "../../../";
var useModuleDirectories = true;
loadScripts(document, 'script');</script>
<noscript>
<div>JavaScript is disabled on your browser.</div>
</noscript>
<header role="banner">
<nav role="navigation">
<div class="fixedNav">
<!-- ========= START OF TOP NAVBAR ======= -->
<div class="topNav"><a id="navbar.top">
<!-- -->
</a>
<div class="skipNav"><a href="#skip.navbar.top" title="Skip navigation links">Skip navigation links</a></div>
<a id="navbar.top.firstrow">
<!-- -->
</a>
<ul class="navList" title="Navigation">
<li><a href="../../../index.html">Overview</a></li>
<li><a href="../../module-summary.html">Module</a></li>
<li><a href="package-summary.html">Package</a></li>
<li class="navBarCell1Rev">Class</li>
<li><a href="class-use/BootstrapMethodError.html">Use</a></li>
<li><a href="package-tree.html">Tree</a></li>
<li><a href="../../../deprecated-list.html">Deprecated</a></li>
<li><a href="../../../index-files/index-1.html">Index</a></li>
<li><a href="../../../help-doc.html">Help</a></li>
</ul>
<div class="aboutLanguage"><div style="margin-top: 14px;"><strong>Java SE 12 & JDK 12</strong> </div></div>
</div>
<div class="subNav">
<div>
<ul class="subNavList">
<li>Summary: </li>
<li>Nested | </li>
<li>Field | </li>
<li><a href="#constructor.summary">Constr</a> | </li>
<li><a href="#method.summary">Method</a></li>
</ul>
<ul class="subNavList">
<li>Detail: </li>
<li>Field | </li>
<li><a href="#constructor.detail">Constr</a> | </li>
<li>Method</li>
</ul>
</div>
<ul class="navListSearch">
<li><label for="search">SEARCH:</label>
<input type="text" id="search" value="search" disabled="disabled">
<input type="reset" id="reset" value="reset" disabled="disabled">
</li>
</ul>
</div>
<a id="skip.navbar.top">
<!-- -->
</a>
<!-- ========= END OF TOP NAVBAR ========= -->
</div>
<div class="navPadding"> </div>
<script type="text/javascript"><!--
$('.navPadding').css('padding-top', $('.fixedNav').css("height"));
//-->
</script>
</nav>
</header>
<!-- ======== START OF CLASS DATA ======== -->
<main role="main">
<div class="header">
<div class="subTitle"><span class="moduleLabelInType">Module</span> <a href="../../module-summary.html">java.base</a></div>
<div class="subTitle"><span class="packageLabelInType">Package</span> <a href="package-summary.html">java.lang</a></div>
<h2 title="Class BootstrapMethodError" class="title">Class BootstrapMethodError</h2>
</div>
<div class="contentContainer">
<ul class="inheritance">
<li><a href="Object.html" title="class in java.lang">java.lang.Object</a></li>
<li>
<ul class="inheritance">
<li><a href="Throwable.html" title="class in java.lang">java.lang.Throwable</a></li>
<li>
<ul class="inheritance">
<li><a href="Error.html" title="class in java.lang">java.lang.Error</a></li>
<li>
<ul class="inheritance">
<li><a href="LinkageError.html" title="class in java.lang">java.lang.LinkageError</a></li>
<li>
<ul class="inheritance">
<li>java.lang.BootstrapMethodError</li>
</ul>
</li>
</ul>
</li>
</ul>
</li>
</ul>
</li>
</ul>
<div class="description">
<ul class="blockList">
<li class="blockList">
<dl>
<dt>All Implemented Interfaces:</dt>
<dd><code><a href="../io/Serializable.html" title="interface in java.io">Serializable</a></code></dd>
</dl>
<hr>
<pre>public class <span class="typeNameLabel">BootstrapMethodError</span>
extends <a href="LinkageError.html" title="class in java.lang">LinkageError</a></pre>
<div class="block">Thrown to indicate that an <code>invokedynamic</code> instruction or a dynamic
constant failed to resolve its bootstrap method and arguments,
or for <code>invokedynamic</code> instruction the bootstrap method has failed to
provide a
<a href="invoke/CallSite.html" title="class in java.lang.invoke">call site</a> with a
<a href="invoke/CallSite.html#getTarget()">target</a>
of the correct <a href="invoke/MethodHandle.html#type()">method type</a>,
or for a dynamic constant the bootstrap method has failed to provide a
constant value of the required type.</div>
<dl>
<dt><span class="simpleTagLabel">Since:</span></dt>
<dd>1.7</dd>
<dt><span class="seeLabel">See Also:</span></dt>
<dd><a href="../../../serialized-form.html#java.lang.BootstrapMethodError">Serialized Form</a></dd>
</dl>
</li>
</ul>
</div>
<div class="summary">
<ul class="blockList">
<li class="blockList">
<!-- ======== CONSTRUCTOR SUMMARY ======== -->
<section role="region">
<ul class="blockList">
<li class="blockList"><a id="constructor.summary">
<!-- -->
</a>
<h3>Constructor Summary</h3>
<div class="memberSummary">
<table>
<caption><span>Constructors</span><span class="tabEnd"> </span></caption>
<tr>
<th class="colFirst" scope="col">Constructor</th>
<th class="colLast" scope="col">Description</th>
</tr>
<tbody>
<tr class="altColor">
<th class="colConstructorName" scope="row"><code><span class="memberNameLink"><a href="#%3Cinit%3E()">BootstrapMethodError</a></span>()</code></th>
<td class="colLast">
<div class="block">Constructs a <code>BootstrapMethodError</code> with no detail message.</div>
</td>
</tr>
<tr class="rowColor">
<th class="colConstructorName" scope="row"><code><span class="memberNameLink"><a href="#%3Cinit%3E(java.lang.String)">BootstrapMethodError</a></span>​(<a href="String.html" title="class in java.lang">String</a> s)</code></th>
<td class="colLast">
<div class="block">Constructs a <code>BootstrapMethodError</code> with the specified
detail message.</div>
</td>
</tr>
<tr class="altColor">
<th class="colConstructorName" scope="row"><code><span class="memberNameLink"><a href="#%3Cinit%3E(java.lang.String,java.lang.Throwable)">BootstrapMethodError</a></span>​(<a href="String.html" title="class in java.lang">String</a> s,
<a href="Throwable.html" title="class in java.lang">Throwable</a> cause)</code></th>
<td class="colLast">
<div class="block">Constructs a <code>BootstrapMethodError</code> with the specified
detail message and cause.</div>
</td>
</tr>
<tr class="rowColor">
<th class="colConstructorName" scope="row"><code><span class="memberNameLink"><a href="#%3Cinit%3E(java.lang.Throwable)">BootstrapMethodError</a></span>​(<a href="Throwable.html" title="class in java.lang">Throwable</a> cause)</code></th>
<td class="colLast">
<div class="block">Constructs a <code>BootstrapMethodError</code> with the specified
cause.</div>
</td>
</tr>
</tbody>
</table>
</div>
</li>
</ul>
</section>
<!-- ========== METHOD SUMMARY =========== -->
<section role="region">
<ul class="blockList">
<li class="blockList"><a id="method.summary">
<!-- -->
</a>
<h3>Method Summary</h3>
<ul class="blockList">
<li class="blockList"><a id="methods.inherited.from.class.java.lang.Throwable">
<!-- -->
</a>
<h3>Methods declared in class java.lang.<a href="Throwable.html" title="class in java.lang">Throwable</a></h3>
<code><a href="Throwable.html#addSuppressed(java.lang.Throwable)">addSuppressed</a>, <a href="Throwable.html#fillInStackTrace()">fillInStackTrace</a>, <a href="Throwable.html#getCause()">getCause</a>, <a href="Throwable.html#getLocalizedMessage()">getLocalizedMessage</a>, <a href="Throwable.html#getMessage()">getMessage</a>, <a href="Throwable.html#getStackTrace()">getStackTrace</a>, <a href="Throwable.html#getSuppressed()">getSuppressed</a>, <a href="Throwable.html#initCause(java.lang.Throwable)">initCause</a>, <a href="Throwable.html#printStackTrace()">printStackTrace</a>, <a href="Throwable.html#printStackTrace(java.io.PrintStream)">printStackTrace</a>, <a href="Throwable.html#printStackTrace(java.io.PrintWriter)">printStackTrace</a>, <a href="Throwable.html#setStackTrace(java.lang.StackTraceElement%5B%5D)">setStackTrace</a>, <a href="Throwable.html#toString()">toString</a></code></li>
</ul>
<ul class="blockList">
<li class="blockList"><a id="methods.inherited.from.class.java.lang.Object">
<!-- -->
</a>
<h3>Methods declared in class java.lang.<a href="Object.html" title="class in java.lang">Object</a></h3>
<code><a href="Object.html#clone()">clone</a>, <a href="Object.html#equals(java.lang.Object)">equals</a>, <a href="Object.html#finalize()">finalize</a>, <a href="Object.html#getClass()">getClass</a>, <a href="Object.html#hashCode()">hashCode</a>, <a href="Object.html#notify()">notify</a>, <a href="Object.html#notifyAll()">notifyAll</a>, <a href="Object.html#wait()">wait</a>, <a href="Object.html#wait(long)">wait</a>, <a href="Object.html#wait(long,int)">wait</a></code></li>
</ul>
</li>
</ul>
</section>
</li>
</ul>
</div>
<div class="details">
<ul class="blockList">
<li class="blockList">
<!-- ========= CONSTRUCTOR DETAIL ======== -->
<section role="region">
<ul class="blockList">
<li class="blockList"><a id="constructor.detail">
<!-- -->
</a>
<h3>Constructor Detail</h3>
<a id="<init>()">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>BootstrapMethodError</h4>
<pre>public BootstrapMethodError()</pre>
<div class="block">Constructs a <code>BootstrapMethodError</code> with no detail message.</div>
</li>
</ul>
<a id="<init>(java.lang.String)">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>BootstrapMethodError</h4>
<pre>public BootstrapMethodError​(<a href="String.html" title="class in java.lang">String</a> s)</pre>
<div class="block">Constructs a <code>BootstrapMethodError</code> with the specified
detail message.</div>
<dl>
<dt><span class="paramLabel">Parameters:</span></dt>
<dd><code>s</code> - the detail message.</dd>
</dl>
</li>
</ul>
<a id="<init>(java.lang.String,java.lang.Throwable)">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>BootstrapMethodError</h4>
<pre>public BootstrapMethodError​(<a href="String.html" title="class in java.lang">String</a> s,
<a href="Throwable.html" title="class in java.lang">Throwable</a> cause)</pre>
<div class="block">Constructs a <code>BootstrapMethodError</code> with the specified
detail message and cause.</div>
<dl>
<dt><span class="paramLabel">Parameters:</span></dt>
<dd><code>s</code> - the detail message.</dd>
<dd><code>cause</code> - the cause, may be <code>null</code>.</dd>
</dl>
</li>
</ul>
<a id="<init>(java.lang.Throwable)">
<!-- -->
</a>
<ul class="blockListLast">
<li class="blockList">
<h4>BootstrapMethodError</h4>
<pre>public BootstrapMethodError​(<a href="Throwable.html" title="class in java.lang">Throwable</a> cause)</pre>
<div class="block">Constructs a <code>BootstrapMethodError</code> with the specified
cause.</div>
<dl>
<dt><span class="paramLabel">Parameters:</span></dt>
<dd><code>cause</code> - the cause, may be <code>null</code>.</dd>
</dl>
</li>
</ul>
</li>
</ul>
</section>
</li>
</ul>
</div>
</div>
</main>
<!-- ========= END OF CLASS DATA ========= -->
<footer role="contentinfo">
<nav role="navigation">
<!-- ======= START OF BOTTOM NAVBAR ====== -->
<div class="bottomNav"><a id="navbar.bottom">
<!-- -->
</a>
<div class="skipNav"><a href="#skip.navbar.bottom" title="Skip navigation links">Skip navigation links</a></div>
<a id="navbar.bottom.firstrow">
<!-- -->
</a>
<ul class="navList" title="Navigation">
<li><a href="../../../index.html">Overview</a></li>
<li><a href="../../module-summary.html">Module</a></li>
<li><a href="package-summary.html">Package</a></li>
<li class="navBarCell1Rev">Class</li>
<li><a href="class-use/BootstrapMethodError.html">Use</a></li>
<li><a href="package-tree.html">Tree</a></li>
<li><a href="../../../deprecated-list.html">Deprecated</a></li>
<li><a href="../../../index-files/index-1.html">Index</a></li>
<li><a href="../../../help-doc.html">Help</a></li>
</ul>
<div class="aboutLanguage"><div style="margin-top: 14px;"><strong>Java SE 12 & JDK 12</strong> </div></div>
</div>
<div class="subNav">
<div>
<ul class="subNavList">
<li>Summary: </li>
<li>Nested | </li>
<li>Field | </li>
<li><a href="#constructor.summary">Constr</a> | </li>
<li><a href="#method.summary">Method</a></li>
</ul>
<ul class="subNavList">
<li>Detail: </li>
<li>Field | </li>
<li><a href="#constructor.detail">Constr</a> | </li>
<li>Method</li>
</ul>
</div>
</div>
<a id="skip.navbar.bottom">
<!-- -->
</a>
<!-- ======== END OF BOTTOM NAVBAR ======= -->
</nav>
<p class="legalCopy"><small><a href="https://bugreport.java.com/bugreport/">Report a bug or suggest an enhancement</a><br> For further API reference and developer documentation see the <a href="https://docs.oracle.com/pls/topic/lookup?ctx=javase12.0.2&id=homepage" target="_blank">Java SE Documentation</a>, which contains more detailed, developer-targeted descriptions with conceptual overviews, definitions of terms, workarounds, and working code examples.<br> Java is a trademark or registered trademark of Oracle and/or its affiliates in the US and other countries.<br> <a href="../../../../legal/copyright.html">Copyright</a> © 1993, 2019, Oracle and/or its affiliates, 500 Oracle Parkway, Redwood Shores, CA 94065 USA.<br>All rights reserved. Use is subject to <a href="https://www.oracle.com/technetwork/java/javase/terms/license/java12.0.2speclicense.html">license terms</a> and the <a href="https://www.oracle.com/technetwork/java/redist-137594.html">documentation redistribution policy</a>. <!-- Version 12.0.2+10 --></small></p>
</footer>
</body>
</html>
| {
"pile_set_name": "Github"
} |
/*
* Copyright (c) 2010 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef VPX_VP8_COMMON_THREADING_H_
#define VPX_VP8_COMMON_THREADING_H_
#include "./vpx_config.h"
#ifdef __cplusplus
extern "C" {
#endif
#if CONFIG_OS_SUPPORT && CONFIG_MULTITHREAD
/* Thread management macros */
#if defined(_WIN32) && !HAVE_PTHREAD_H
/* Win32 */
#include <process.h>
#include <windows.h>
#if defined(__GNUC__) && \
(__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 2))
#define THREAD_FUNCTION \
__attribute__((force_align_arg_pointer)) unsigned int __stdcall
#else
#define THREAD_FUNCTION unsigned int __stdcall
#endif
#define THREAD_FUNCTION_RETURN DWORD
#define THREAD_SPECIFIC_INDEX DWORD
#define pthread_t HANDLE
#define pthread_attr_t DWORD
#define pthread_detach(thread) \
if (thread != NULL) CloseHandle(thread)
#define thread_sleep(nms) Sleep(nms)
#define pthread_cancel(thread) terminate_thread(thread, 0)
#define ts_key_create(ts_key, destructor) \
{ ts_key = TlsAlloc(); };
#define pthread_getspecific(ts_key) TlsGetValue(ts_key)
#define pthread_setspecific(ts_key, value) TlsSetValue(ts_key, (void *)value)
#define pthread_self() GetCurrentThreadId()
#elif defined(__OS2__)
/* OS/2 */
#define INCL_DOS
#include <os2.h>
#include <stdlib.h>
#define THREAD_FUNCTION void *
#define THREAD_FUNCTION_RETURN void *
#define THREAD_SPECIFIC_INDEX PULONG
#define pthread_t TID
#define pthread_attr_t ULONG
#define pthread_detach(thread) 0
#define thread_sleep(nms) DosSleep(nms)
#define pthread_cancel(thread) DosKillThread(thread)
#define ts_key_create(ts_key, destructor) \
DosAllocThreadLocalMemory(1, &(ts_key));
#define pthread_getspecific(ts_key) ((void *)(*(ts_key)))
#define pthread_setspecific(ts_key, value) (*(ts_key) = (ULONG)(value))
#define pthread_self() _gettid()
#else
#ifdef __APPLE__
#include <mach/mach_init.h>
#include <mach/semaphore.h>
#include <mach/task.h>
#include <time.h>
#include <unistd.h>
#else
#include <semaphore.h>
#endif
#include <pthread.h>
/* pthreads */
/* Nearly everything is already defined */
#define THREAD_FUNCTION void *
#define THREAD_FUNCTION_RETURN void *
#define THREAD_SPECIFIC_INDEX pthread_key_t
#define ts_key_create(ts_key, destructor) \
pthread_key_create(&(ts_key), destructor);
#endif
/* Synchronization macros: Win32 and Pthreads */
#if defined(_WIN32) && !HAVE_PTHREAD_H
#define sem_t HANDLE
#define pause(voidpara) __asm PAUSE
#define sem_init(sem, sem_attr1, sem_init_value) \
(int)((*sem = CreateSemaphore(NULL, 0, 32768, NULL)) == NULL)
#define sem_wait(sem) \
(int)(WAIT_OBJECT_0 != WaitForSingleObject(*sem, INFINITE))
#define sem_post(sem) ReleaseSemaphore(*sem, 1, NULL)
#define sem_destroy(sem) \
if (*sem) ((int)(CloseHandle(*sem)) == TRUE)
#define thread_sleep(nms) Sleep(nms)
#elif defined(__OS2__)
typedef struct {
HEV event;
HMTX wait_mutex;
HMTX count_mutex;
int count;
} sem_t;
static inline int sem_init(sem_t *sem, int pshared, unsigned int value) {
DosCreateEventSem(NULL, &sem->event, pshared ? DC_SEM_SHARED : 0,
value > 0 ? TRUE : FALSE);
DosCreateMutexSem(NULL, &sem->wait_mutex, 0, FALSE);
DosCreateMutexSem(NULL, &sem->count_mutex, 0, FALSE);
sem->count = value;
return 0;
}
static inline int sem_wait(sem_t *sem) {
DosRequestMutexSem(sem->wait_mutex, -1);
DosWaitEventSem(sem->event, -1);
DosRequestMutexSem(sem->count_mutex, -1);
sem->count--;
if (sem->count == 0) {
ULONG post_count;
DosResetEventSem(sem->event, &post_count);
}
DosReleaseMutexSem(sem->count_mutex);
DosReleaseMutexSem(sem->wait_mutex);
return 0;
}
static inline int sem_post(sem_t *sem) {
DosRequestMutexSem(sem->count_mutex, -1);
if (sem->count < 32768) {
sem->count++;
DosPostEventSem(sem->event);
}
DosReleaseMutexSem(sem->count_mutex);
return 0;
}
static inline int sem_destroy(sem_t *sem) {
DosCloseEventSem(sem->event);
DosCloseMutexSem(sem->wait_mutex);
DosCloseMutexSem(sem->count_mutex);
return 0;
}
#define thread_sleep(nms) DosSleep(nms)
#else
#ifdef __APPLE__
#define sem_t semaphore_t
#define sem_init(X, Y, Z) \
semaphore_create(mach_task_self(), X, SYNC_POLICY_FIFO, Z)
#define sem_wait(sem) (semaphore_wait(*sem))
#define sem_post(sem) semaphore_signal(*sem)
#define sem_destroy(sem) semaphore_destroy(mach_task_self(), *sem)
#define thread_sleep(nms)
/* { struct timespec ts;ts.tv_sec=0; ts.tv_nsec =
1000*nms;nanosleep(&ts, NULL);} */
#else
#include <unistd.h>
#include <sched.h>
#define thread_sleep(nms) sched_yield();
/* {struct timespec ts;ts.tv_sec=0;
ts.tv_nsec = 1000*nms;nanosleep(&ts, NULL);} */
#endif
/* Not Windows. Assume pthreads */
#endif
#if VPX_ARCH_X86 || VPX_ARCH_X86_64
#include "vpx_ports/x86.h"
#else
#define x86_pause_hint()
#endif
#include "vpx_util/vpx_thread.h"
#include "vpx_util/vpx_atomics.h"
static INLINE void vp8_atomic_spin_wait(
int mb_col, const vpx_atomic_int *last_row_current_mb_col,
const int nsync) {
while (mb_col > (vpx_atomic_load_acquire(last_row_current_mb_col) - nsync)) {
x86_pause_hint();
thread_sleep(0);
}
}
#endif /* CONFIG_OS_SUPPORT && CONFIG_MULTITHREAD */
#ifdef __cplusplus
} // extern "C"
#endif
#endif // VPX_VP8_COMMON_THREADING_H_
| {
"pile_set_name": "Github"
} |
// Copyright 2018 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_OBJECTS_JS_WEAK_REFS_H_
#define V8_OBJECTS_JS_WEAK_REFS_H_
#include "src/objects/js-objects.h"
#include "torque-generated/bit-fields-tq.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
namespace v8 {
namespace internal {
class NativeContext;
class WeakCell;
// FinalizationRegistry object from the JS Weak Refs spec proposal:
// https://github.com/tc39/proposal-weakrefs
class JSFinalizationRegistry : public JSObject {
public:
DECL_PRINTER(JSFinalizationRegistry)
EXPORT_DECL_VERIFIER(JSFinalizationRegistry)
DECL_CAST(JSFinalizationRegistry)
DECL_ACCESSORS(native_context, NativeContext)
DECL_ACCESSORS(cleanup, Object)
DECL_ACCESSORS(active_cells, HeapObject)
DECL_ACCESSORS(cleared_cells, HeapObject)
DECL_ACCESSORS(key_map, Object)
DECL_ACCESSORS(next_dirty, Object)
DECL_INT_ACCESSORS(flags)
DECL_BOOLEAN_ACCESSORS(scheduled_for_cleanup)
class BodyDescriptor;
inline static void Register(
Handle<JSFinalizationRegistry> finalization_registry,
Handle<JSReceiver> target, Handle<Object> holdings, Handle<Object> key,
Isolate* isolate);
inline static bool Unregister(
Handle<JSFinalizationRegistry> finalization_registry,
Handle<JSReceiver> unregister_token, Isolate* isolate);
// RemoveUnregisterToken is called from both Unregister and during GC. Since
// it modifies slots in key_map and WeakCells and the normal write barrier is
// disabled during GC, we need to tell the GC about the modified slots via the
// gc_notify_updated_slot function.
template <typename MatchCallback, typename GCNotifyUpdatedSlotCallback>
inline bool RemoveUnregisterToken(
JSReceiver unregister_token, Isolate* isolate,
MatchCallback match_callback,
GCNotifyUpdatedSlotCallback gc_notify_updated_slot);
// Returns true if the cleared_cells list is non-empty.
inline bool NeedsCleanup() const;
// Remove the already-popped weak_cell from its unregister token linked list,
// as well as removing the entry from the key map if it is the only WeakCell
// with its unregister token. This method cannot GC and does not shrink the
// key map. Asserts that weak_cell has a non-undefined unregister token.
//
// It takes raw Addresses because it is called from CSA and Torque.
V8_EXPORT_PRIVATE static void RemoveCellFromUnregisterTokenMap(
Isolate* isolate, Address raw_finalization_registry,
Address raw_weak_cell);
// Layout description.
DEFINE_FIELD_OFFSET_CONSTANTS(
JSObject::kHeaderSize, TORQUE_GENERATED_JS_FINALIZATION_REGISTRY_FIELDS)
// Bitfields in flags.
DEFINE_TORQUE_GENERATED_FINALIZATION_REGISTRY_FLAGS()
OBJECT_CONSTRUCTORS(JSFinalizationRegistry, JSObject);
};
// Internal object for storing weak references in JSFinalizationRegistry.
class WeakCell : public TorqueGeneratedWeakCell<WeakCell, HeapObject> {
public:
DECL_PRINTER(WeakCell)
EXPORT_DECL_VERIFIER(WeakCell)
class BodyDescriptor;
// Provide relaxed load access to target field.
inline HeapObject relaxed_target() const;
// Nullify is called during GC and it modifies the pointers in WeakCell and
// JSFinalizationRegistry. Thus we need to tell the GC about the modified
// slots via the gc_notify_updated_slot function. The normal write barrier is
// not enough, since it's disabled before GC.
template <typename GCNotifyUpdatedSlotCallback>
inline void Nullify(Isolate* isolate,
GCNotifyUpdatedSlotCallback gc_notify_updated_slot);
inline void RemoveFromFinalizationRegistryCells(Isolate* isolate);
TQ_OBJECT_CONSTRUCTORS(WeakCell)
};
class JSWeakRef : public TorqueGeneratedJSWeakRef<JSWeakRef, JSObject> {
public:
DECL_PRINTER(JSWeakRef)
EXPORT_DECL_VERIFIER(JSWeakRef)
class BodyDescriptor;
TQ_OBJECT_CONSTRUCTORS(JSWeakRef)
};
} // namespace internal
} // namespace v8
#include "src/objects/object-macros-undef.h"
#endif // V8_OBJECTS_JS_WEAK_REFS_H_
| {
"pile_set_name": "Github"
} |
/****************************************************************************
*
* Copyright 2018 Samsung Electronics All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied. See the License for the specific
* language governing permissions and limitations under the License.
*
****************************************************************************/
//===----------------------------------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is dual licensed under the MIT and the University of Illinois Open
// Source Licenses. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
// <unordered_map>
// template <class Key, class T, class Hash = hash<Key>, class Pred = equal_to<Key>,
// class Alloc = allocator<pair<const Key, T>>>
// class unordered_multimap
// iterator erase(const_iterator first, const_iterator last)
#include <unordered_map>
#include <string>
#include <cassert>
#include "test_macros.h"
#include "libcxx_tc_common.h"
#include <cstddef>
int tc_libcxx_containers_unord_multimap_modifiers_erase_range(void)
{
{
typedef std::unordered_multimap<int, std::string> C;
typedef std::pair<int, std::string> P;
P a[] =
{
P(1, "one"),
P(2, "two"),
P(3, "three"),
P(4, "four"),
P(1, "four"),
P(2, "four"),
};
C c(a, a + sizeof(a)/sizeof(a[0]));
C::const_iterator i = c.find(2);
C::const_iterator j = next(i, 2);
C::iterator k = c.erase(i, i);
TC_ASSERT_EXPR(k == i);
TC_ASSERT_EXPR(c.size() == 6);
typedef std::pair<C::iterator, C::iterator> Eq;
Eq eq = c.equal_range(1);
TC_ASSERT_EXPR(std::distance(eq.first, eq.second) == 2);
k = eq.first;
TC_ASSERT_EXPR(k->first == 1);
TC_ASSERT_EXPR(k->second == "one");
++k;
TC_ASSERT_EXPR(k->first == 1);
TC_ASSERT_EXPR(k->second == "four");
eq = c.equal_range(2);
TC_ASSERT_EXPR(std::distance(eq.first, eq.second) == 2);
k = eq.first;
TC_ASSERT_EXPR(k->first == 2);
TC_ASSERT_EXPR(k->second == "two");
++k;
TC_ASSERT_EXPR(k->first == 2);
TC_ASSERT_EXPR(k->second == "four");
eq = c.equal_range(3);
TC_ASSERT_EXPR(std::distance(eq.first, eq.second) == 1);
k = eq.first;
TC_ASSERT_EXPR(k->first == 3);
TC_ASSERT_EXPR(k->second == "three");
eq = c.equal_range(4);
TC_ASSERT_EXPR(std::distance(eq.first, eq.second) == 1);
k = eq.first;
TC_ASSERT_EXPR(k->first == 4);
TC_ASSERT_EXPR(k->second == "four");
TC_ASSERT_EXPR(static_cast<std::size_t>(std::distance(c.begin(), c.end())) == c.size());
TC_ASSERT_EXPR(static_cast<std::size_t>(std::distance(c.cbegin(), c.cend())) == c.size());
k = c.erase(i, j);
TC_ASSERT_EXPR(c.size() == 4);
eq = c.equal_range(1);
TC_ASSERT_EXPR(std::distance(eq.first, eq.second) == 2);
k = eq.first;
TC_ASSERT_EXPR(k->first == 1);
TC_ASSERT_EXPR(k->second == "one");
++k;
TC_ASSERT_EXPR(k->first == 1);
TC_ASSERT_EXPR(k->second == "four");
eq = c.equal_range(3);
TC_ASSERT_EXPR(std::distance(eq.first, eq.second) == 1);
k = eq.first;
TC_ASSERT_EXPR(k->first == 3);
TC_ASSERT_EXPR(k->second == "three");
eq = c.equal_range(4);
TC_ASSERT_EXPR(std::distance(eq.first, eq.second) == 1);
k = eq.first;
TC_ASSERT_EXPR(k->first == 4);
TC_ASSERT_EXPR(k->second == "four");
TC_ASSERT_EXPR(static_cast<std::size_t>(std::distance(c.begin(), c.end())) == c.size());
TC_ASSERT_EXPR(static_cast<std::size_t>(std::distance(c.cbegin(), c.cend())) == c.size());
k = c.erase(c.cbegin(), c.cend());
TC_ASSERT_EXPR(c.size() == 0);
TC_ASSERT_EXPR(k == c.end());
}
TC_SUCCESS_RESULT();
return 0;
}
| {
"pile_set_name": "Github"
} |
/*
* Synopsys G210 Test Chip driver
*
* Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com)
*
* Authors: Joao Pinto <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/delay.h>
#include "ufshcd-pltfrm.h"
#include "ufshcd-dwc.h"
#include "tc-dwc-g210.h"
/**
* UFS DWC specific variant operations
*/
static struct ufs_hba_variant_ops tc_dwc_g210_20bit_pltfm_hba_vops = {
.name = "tc-dwc-g210-pltfm",
.link_startup_notify = ufshcd_dwc_link_startup_notify,
.phy_initialization = tc_dwc_g210_config_20_bit,
};
static struct ufs_hba_variant_ops tc_dwc_g210_40bit_pltfm_hba_vops = {
.name = "tc-dwc-g210-pltfm",
.link_startup_notify = ufshcd_dwc_link_startup_notify,
.phy_initialization = tc_dwc_g210_config_40_bit,
};
static const struct of_device_id tc_dwc_g210_pltfm_match[] = {
{
.compatible = "snps,g210-tc-6.00-20bit",
.data = &tc_dwc_g210_20bit_pltfm_hba_vops,
},
{
.compatible = "snps,g210-tc-6.00-40bit",
.data = &tc_dwc_g210_40bit_pltfm_hba_vops,
},
{ },
};
MODULE_DEVICE_TABLE(of, tc_dwc_g210_pltfm_match);
/**
* tc_dwc_g210_pltfm_probe()
* @pdev: pointer to platform device structure
*
*/
static int tc_dwc_g210_pltfm_probe(struct platform_device *pdev)
{
int err;
const struct of_device_id *of_id;
struct ufs_hba_variant_ops *vops;
struct device *dev = &pdev->dev;
of_id = of_match_node(tc_dwc_g210_pltfm_match, dev->of_node);
vops = (struct ufs_hba_variant_ops *)of_id->data;
/* Perform generic probe */
err = ufshcd_pltfrm_init(pdev, vops);
if (err)
dev_err(dev, "ufshcd_pltfrm_init() failed %d\n", err);
return err;
}
/**
* tc_dwc_g210_pltfm_remove()
* @pdev: pointer to platform device structure
*
*/
static int tc_dwc_g210_pltfm_remove(struct platform_device *pdev)
{
struct ufs_hba *hba = platform_get_drvdata(pdev);
pm_runtime_get_sync(&(pdev)->dev);
ufshcd_remove(hba);
return 0;
}
static const struct dev_pm_ops tc_dwc_g210_pltfm_pm_ops = {
.suspend = ufshcd_pltfrm_suspend,
.resume = ufshcd_pltfrm_resume,
.runtime_suspend = ufshcd_pltfrm_runtime_suspend,
.runtime_resume = ufshcd_pltfrm_runtime_resume,
.runtime_idle = ufshcd_pltfrm_runtime_idle,
};
static struct platform_driver tc_dwc_g210_pltfm_driver = {
.probe = tc_dwc_g210_pltfm_probe,
.remove = tc_dwc_g210_pltfm_remove,
.shutdown = ufshcd_pltfrm_shutdown,
.driver = {
.name = "tc-dwc-g210-pltfm",
.pm = &tc_dwc_g210_pltfm_pm_ops,
.of_match_table = of_match_ptr(tc_dwc_g210_pltfm_match),
},
};
module_platform_driver(tc_dwc_g210_pltfm_driver);
MODULE_ALIAS("platform:tc-dwc-g210-pltfm");
MODULE_DESCRIPTION("Synopsys Test Chip G210 platform glue driver");
MODULE_AUTHOR("Joao Pinto <[email protected]>");
MODULE_LICENSE("Dual BSD/GPL");
| {
"pile_set_name": "Github"
} |
// Protocol Buffers - Google's data interchange format
// Copyright 2008 Google Inc. All rights reserved.
// https://developers.google.com/protocol-buffers/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
syntax = "proto2";
package jspb.filenametest.package2;
message TestMessage {
optional int32 a = 1;
}
enum TestEnum {
VALUE_0 = 0;
VALUE_1 = 1;
VALUE_2 = 2;
}
| {
"pile_set_name": "Github"
} |
# [The annoying parrot](http://alexa.amazon.com/#skills/amzn1.echo-sdk-ams.app.d373f712-0b9f-4685-b9ac-e7e5cb11e54c)
 3
To use the The annoying parrot skill, try saying...
* *Alexa, start the annoying parrot*
* *What is your name*
* *Stop repeating me*
Start the app by saying, "Alexa, start the annoying parrot".
The parrot will greet you and let you know it will repeat what you say.
Then when you say something, it will (annoyingly ?:) ) repeat.
***
### Skill Details
* **Invocation Name:** the annoying parrot
* **Category:** null
* **ID:** amzn1.echo-sdk-ams.app.d373f712-0b9f-4685-b9ac-e7e5cb11e54c
* **ASIN:** B01JTK5JRK
* **Author:** Rajendra
* **Release Date:** August 13, 2016 @ 04:07:44
* **In-App Purchasing:** No
| {
"pile_set_name": "Github"
} |
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __Q6AFE_H__
#define __Q6AFE_H__
#include <dt-bindings/sound/qcom,q6afe.h>
#define AFE_PORT_MAX 105
#define MSM_AFE_PORT_TYPE_RX 0
#define MSM_AFE_PORT_TYPE_TX 1
#define AFE_MAX_PORTS AFE_PORT_MAX
#define Q6AFE_MAX_MI2S_LINES 4
#define AFE_MAX_CHAN_COUNT 8
#define AFE_PORT_MAX_AUDIO_CHAN_CNT 0x8
#define Q6AFE_LPASS_CLK_SRC_INTERNAL 1
#define Q6AFE_LPASS_CLK_ROOT_DEFAULT 0
#define LPAIF_DIG_CLK 1
#define LPAIF_BIT_CLK 2
#define LPAIF_OSR_CLK 3
/* Clock ID for Primary I2S IBIT */
#define Q6AFE_LPASS_CLK_ID_PRI_MI2S_IBIT 0x100
/* Clock ID for Primary I2S EBIT */
#define Q6AFE_LPASS_CLK_ID_PRI_MI2S_EBIT 0x101
/* Clock ID for Secondary I2S IBIT */
#define Q6AFE_LPASS_CLK_ID_SEC_MI2S_IBIT 0x102
/* Clock ID for Secondary I2S EBIT */
#define Q6AFE_LPASS_CLK_ID_SEC_MI2S_EBIT 0x103
/* Clock ID for Tertiary I2S IBIT */
#define Q6AFE_LPASS_CLK_ID_TER_MI2S_IBIT 0x104
/* Clock ID for Tertiary I2S EBIT */
#define Q6AFE_LPASS_CLK_ID_TER_MI2S_EBIT 0x105
/* Clock ID for Quartnery I2S IBIT */
#define Q6AFE_LPASS_CLK_ID_QUAD_MI2S_IBIT 0x106
/* Clock ID for Quartnery I2S EBIT */
#define Q6AFE_LPASS_CLK_ID_QUAD_MI2S_EBIT 0x107
/* Clock ID for Speaker I2S IBIT */
#define Q6AFE_LPASS_CLK_ID_SPEAKER_I2S_IBIT 0x108
/* Clock ID for Speaker I2S EBIT */
#define Q6AFE_LPASS_CLK_ID_SPEAKER_I2S_EBIT 0x109
/* Clock ID for Speaker I2S OSR */
#define Q6AFE_LPASS_CLK_ID_SPEAKER_I2S_OSR 0x10A
/* Clock ID for QUINARY I2S IBIT */
#define Q6AFE_LPASS_CLK_ID_QUI_MI2S_IBIT 0x10B
/* Clock ID for QUINARY I2S EBIT */
#define Q6AFE_LPASS_CLK_ID_QUI_MI2S_EBIT 0x10C
/* Clock ID for SENARY I2S IBIT */
#define Q6AFE_LPASS_CLK_ID_SEN_MI2S_IBIT 0x10D
/* Clock ID for SENARY I2S EBIT */
#define Q6AFE_LPASS_CLK_ID_SEN_MI2S_EBIT 0x10E
/* Clock ID for INT0 I2S IBIT */
#define Q6AFE_LPASS_CLK_ID_INT0_MI2S_IBIT 0x10F
/* Clock ID for INT1 I2S IBIT */
#define Q6AFE_LPASS_CLK_ID_INT1_MI2S_IBIT 0x110
/* Clock ID for INT2 I2S IBIT */
#define Q6AFE_LPASS_CLK_ID_INT2_MI2S_IBIT 0x111
/* Clock ID for INT3 I2S IBIT */
#define Q6AFE_LPASS_CLK_ID_INT3_MI2S_IBIT 0x112
/* Clock ID for INT4 I2S IBIT */
#define Q6AFE_LPASS_CLK_ID_INT4_MI2S_IBIT 0x113
/* Clock ID for INT5 I2S IBIT */
#define Q6AFE_LPASS_CLK_ID_INT5_MI2S_IBIT 0x114
/* Clock ID for INT6 I2S IBIT */
#define Q6AFE_LPASS_CLK_ID_INT6_MI2S_IBIT 0x115
/* Clock ID for QUINARY MI2S OSR CLK */
#define Q6AFE_LPASS_CLK_ID_QUI_MI2S_OSR 0x116
/* Clock ID for Primary PCM IBIT */
#define Q6AFE_LPASS_CLK_ID_PRI_PCM_IBIT 0x200
/* Clock ID for Primary PCM EBIT */
#define Q6AFE_LPASS_CLK_ID_PRI_PCM_EBIT 0x201
/* Clock ID for Secondary PCM IBIT */
#define Q6AFE_LPASS_CLK_ID_SEC_PCM_IBIT 0x202
/* Clock ID for Secondary PCM EBIT */
#define Q6AFE_LPASS_CLK_ID_SEC_PCM_EBIT 0x203
/* Clock ID for Tertiary PCM IBIT */
#define Q6AFE_LPASS_CLK_ID_TER_PCM_IBIT 0x204
/* Clock ID for Tertiary PCM EBIT */
#define Q6AFE_LPASS_CLK_ID_TER_PCM_EBIT 0x205
/* Clock ID for Quartery PCM IBIT */
#define Q6AFE_LPASS_CLK_ID_QUAD_PCM_IBIT 0x206
/* Clock ID for Quartery PCM EBIT */
#define Q6AFE_LPASS_CLK_ID_QUAD_PCM_EBIT 0x207
/* Clock ID for Quinary PCM IBIT */
#define Q6AFE_LPASS_CLK_ID_QUIN_PCM_IBIT 0x208
/* Clock ID for Quinary PCM EBIT */
#define Q6AFE_LPASS_CLK_ID_QUIN_PCM_EBIT 0x209
/* Clock ID for QUINARY PCM OSR */
#define Q6AFE_LPASS_CLK_ID_QUI_PCM_OSR 0x20A
/** Clock ID for Primary TDM IBIT */
#define Q6AFE_LPASS_CLK_ID_PRI_TDM_IBIT 0x200
/** Clock ID for Primary TDM EBIT */
#define Q6AFE_LPASS_CLK_ID_PRI_TDM_EBIT 0x201
/** Clock ID for Secondary TDM IBIT */
#define Q6AFE_LPASS_CLK_ID_SEC_TDM_IBIT 0x202
/** Clock ID for Secondary TDM EBIT */
#define Q6AFE_LPASS_CLK_ID_SEC_TDM_EBIT 0x203
/** Clock ID for Tertiary TDM IBIT */
#define Q6AFE_LPASS_CLK_ID_TER_TDM_IBIT 0x204
/** Clock ID for Tertiary TDM EBIT */
#define Q6AFE_LPASS_CLK_ID_TER_TDM_EBIT 0x205
/** Clock ID for Quartery TDM IBIT */
#define Q6AFE_LPASS_CLK_ID_QUAD_TDM_IBIT 0x206
/** Clock ID for Quartery TDM EBIT */
#define Q6AFE_LPASS_CLK_ID_QUAD_TDM_EBIT 0x207
/** Clock ID for Quinary TDM IBIT */
#define Q6AFE_LPASS_CLK_ID_QUIN_TDM_IBIT 0x208
/** Clock ID for Quinary TDM EBIT */
#define Q6AFE_LPASS_CLK_ID_QUIN_TDM_EBIT 0x209
/** Clock ID for Quinary TDM OSR */
#define Q6AFE_LPASS_CLK_ID_QUIN_TDM_OSR 0x20A
/* Clock ID for MCLK1 */
#define Q6AFE_LPASS_CLK_ID_MCLK_1 0x300
/* Clock ID for MCLK2 */
#define Q6AFE_LPASS_CLK_ID_MCLK_2 0x301
/* Clock ID for MCLK3 */
#define Q6AFE_LPASS_CLK_ID_MCLK_3 0x302
/* Clock ID for MCLK4 */
#define Q6AFE_LPASS_CLK_ID_MCLK_4 0x304
/* Clock ID for Internal Digital Codec Core */
#define Q6AFE_LPASS_CLK_ID_INTERNAL_DIGITAL_CODEC_CORE 0x303
/* Clock ID for INT MCLK0 */
#define Q6AFE_LPASS_CLK_ID_INT_MCLK_0 0x305
/* Clock ID for INT MCLK1 */
#define Q6AFE_LPASS_CLK_ID_INT_MCLK_1 0x306
/* Clock attribute for invalid use (reserved for internal usage) */
#define Q6AFE_LPASS_CLK_ATTRIBUTE_INVALID 0x0
/* Clock attribute for no couple case */
#define Q6AFE_LPASS_CLK_ATTRIBUTE_COUPLE_NO 0x1
/* Clock attribute for dividend couple case */
#define Q6AFE_LPASS_CLK_ATTRIBUTE_COUPLE_DIVIDEND 0x2
/* Clock attribute for divisor couple case */
#define Q6AFE_LPASS_CLK_ATTRIBUTE_COUPLE_DIVISOR 0x3
/* Clock attribute for invert and no couple case */
#define Q6AFE_LPASS_CLK_ATTRIBUTE_INVERT_COUPLE_NO 0x4
#define Q6AFE_CMAP_INVALID 0xFFFF
struct q6afe_hdmi_cfg {
u16 datatype;
u16 channel_allocation;
u32 sample_rate;
u16 bit_width;
};
struct q6afe_slim_cfg {
u32 sample_rate;
u16 bit_width;
u16 data_format;
u16 num_channels;
u8 ch_mapping[AFE_MAX_CHAN_COUNT];
};
struct q6afe_i2s_cfg {
u32 sample_rate;
u16 bit_width;
u16 data_format;
u16 num_channels;
u32 sd_line_mask;
int fmt;
};
struct q6afe_tdm_cfg {
u16 num_channels;
u32 sample_rate;
u16 bit_width;
u16 data_format;
u16 sync_mode;
u16 sync_src;
u16 nslots_per_frame;
u16 slot_width;
u16 slot_mask;
u32 data_align_type;
u16 ch_mapping[AFE_MAX_CHAN_COUNT];
};
struct q6afe_port_config {
struct q6afe_hdmi_cfg hdmi;
struct q6afe_slim_cfg slim;
struct q6afe_i2s_cfg i2s_cfg;
struct q6afe_tdm_cfg tdm;
};
struct q6afe_port;
struct q6afe_port *q6afe_port_get_from_id(struct device *dev, int id);
int q6afe_port_start(struct q6afe_port *port);
int q6afe_port_stop(struct q6afe_port *port);
void q6afe_port_put(struct q6afe_port *port);
int q6afe_get_port_id(int index);
int q6afe_is_rx_port(int index);
void q6afe_hdmi_port_prepare(struct q6afe_port *port,
struct q6afe_hdmi_cfg *cfg);
void q6afe_slim_port_prepare(struct q6afe_port *port,
struct q6afe_slim_cfg *cfg);
int q6afe_i2s_port_prepare(struct q6afe_port *port, struct q6afe_i2s_cfg *cfg);
void q6afe_tdm_port_prepare(struct q6afe_port *port, struct q6afe_tdm_cfg *cfg);
int q6afe_port_set_sysclk(struct q6afe_port *port, int clk_id,
int clk_src, int clk_root,
unsigned int freq, int dir);
#endif /* __Q6AFE_H__ */
| {
"pile_set_name": "Github"
} |
using System.Reflection;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
[assembly: AssemblyTitle("Serilog.UwpTests")]
[assembly: AssemblyDescription("")]
[assembly: AssemblyConfiguration("")]
[assembly: AssemblyCompany("")]
[assembly: AssemblyProduct("Serilog.UwpTests")]
[assembly: AssemblyCopyright("Copyright © 2017")]
[assembly: AssemblyTrademark("")]
[assembly: AssemblyCulture("")]
[assembly: AssemblyMetadata("TargetPlatform","UAP")]
// [assembly: AssemblyVersion("1.0.*")]
[assembly: AssemblyVersion("1.0.0.0")]
[assembly: AssemblyFileVersion("1.0.0.0")]
[assembly: ComVisible(false)] | {
"pile_set_name": "Github"
} |
# Configuration Locations
Configuration is picked up from the project file given by the `-p` CLI option.
The `-p`option may refer to a `package.json` (with custom `ngPackage` property), an `ng-package.json`, or an `ng-package.js` file.
When the `-p` option refers to a directory, the configuration is picked up from the first matching source;
locations are tried in the above-mentioned order.
To configure with a `package.json`, put the configuration in the `ngPackage` custom property:
```json
{
"$schema": "./node_modules/ng-packagr/package.schema.json",
"ngPackage": {
"lib": {
"entryFile": "src/public_api.ts"
}
}
}
```
To configure with a `ng-package.json` or `ng-package.js`, keep the library's `package.json` in the same folder next to `ng-package.json` or `ng-package.js`.
Example of `ng-package.json`:
```json
{
"$schema": "./node_modules/ng-packagr/ng-package.schema.json",
"lib": {
"entryFile": "src/public_api.ts"
}
}
```
Example of `ng-package.js`:
```js
module.exports = {
lib: {
entryFile: 'src/public_api.ts'
}
};
```
| {
"pile_set_name": "Github"
} |
/*
* Copyright 1999-2018 Alibaba Group Holding Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.alibaba.csp.sentinel.cluster.server.handler;
import java.net.InetSocketAddress;
import com.alibaba.csp.sentinel.cluster.ClusterConstants;
import com.alibaba.csp.sentinel.cluster.request.ClusterRequest;
import com.alibaba.csp.sentinel.cluster.response.ClusterResponse;
import com.alibaba.csp.sentinel.cluster.server.connection.ConnectionManager;
import com.alibaba.csp.sentinel.cluster.server.connection.ConnectionPool;
import com.alibaba.csp.sentinel.cluster.server.processor.RequestProcessor;
import com.alibaba.csp.sentinel.cluster.server.processor.RequestProcessorProvider;
import com.alibaba.csp.sentinel.log.RecordLog;
import com.alibaba.csp.sentinel.util.StringUtil;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.ChannelInboundHandlerAdapter;
/**
* Netty server handler for Sentinel token server.
*
* @author Eric Zhao
* @since 1.4.0
*/
public class TokenServerHandler extends ChannelInboundHandlerAdapter {
private final ConnectionPool globalConnectionPool;
public TokenServerHandler(ConnectionPool globalConnectionPool) {
this.globalConnectionPool = globalConnectionPool;
}
@Override
public void channelActive(ChannelHandlerContext ctx) throws Exception {
globalConnectionPool.createConnection(ctx.channel());
}
@Override
public void channelInactive(ChannelHandlerContext ctx) throws Exception {
String remoteAddress = getRemoteAddress(ctx);
globalConnectionPool.remove(ctx.channel());
ConnectionManager.removeConnection(remoteAddress);
}
@Override
@SuppressWarnings("unchecked")
public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception {
globalConnectionPool.refreshLastReadTime(ctx.channel());
if (msg instanceof ClusterRequest) {
ClusterRequest request = (ClusterRequest)msg;
// Client ping with its namespace, add to connection manager.
if (request.getType() == ClusterConstants.MSG_TYPE_PING) {
handlePingRequest(ctx, request);
return;
}
// Pick request processor for request type.
RequestProcessor<?, ?> processor = RequestProcessorProvider.getProcessor(request.getType());
if (processor == null) {
RecordLog.warn("[TokenServerHandler] No processor for request type: " + request.getType());
writeBadResponse(ctx, request);
} else {
ClusterResponse<?> response = processor.processRequest(request);
writeResponse(ctx, response);
}
}
}
private void writeBadResponse(ChannelHandlerContext ctx, ClusterRequest request) {
ClusterResponse<?> response = new ClusterResponse<>(request.getId(), request.getType(),
ClusterConstants.RESPONSE_STATUS_BAD, null);
writeResponse(ctx, response);
}
private void writeResponse(ChannelHandlerContext ctx, ClusterResponse response) {
ctx.writeAndFlush(response);
}
private void handlePingRequest(ChannelHandlerContext ctx, ClusterRequest request) {
if (request.getData() == null || StringUtil.isBlank((String)request.getData())) {
writeBadResponse(ctx, request);
return;
}
String namespace = (String)request.getData();
String clientAddress = getRemoteAddress(ctx);
// Add the remote namespace to connection manager.
int curCount = ConnectionManager.addConnection(namespace, clientAddress).getConnectedCount();
int status = ClusterConstants.RESPONSE_STATUS_OK;
ClusterResponse<Integer> response = new ClusterResponse<>(request.getId(), request.getType(), status, curCount);
writeResponse(ctx, response);
}
private String getRemoteAddress(ChannelHandlerContext ctx) {
if (ctx.channel().remoteAddress() == null) {
return null;
}
InetSocketAddress inetAddress = (InetSocketAddress) ctx.channel().remoteAddress();
return inetAddress.getAddress().getHostAddress() + ":" + inetAddress.getPort();
}
}
| {
"pile_set_name": "Github"
} |
StartChar: afii10076
Encoding: 1082 1082 424
Width: 610
Flags: W
HStem: 0 21G<50 129 368.901 481> 458 20G<50 129 336.062 470>
VStem: 50 79<0 201 285 478>
LayerCount: 2
Fore
SplineSet
93 0 m 1,0,-1
93 486 l 1,1,-1
211 486 l 1,2,-1
211 315 l 1,3,-1
403 486 l 1,4,-1
563 486 l 1,5,-1
348 306 l 1,6,-1
575 0 l 1,7,-1
431 0 l 1,8,-1
265 237 l 1,9,-1
211 192 l 1,10,-1
211 0 l 1,11,-1
93 0 l 1,0,-1
EndSplineSet
EndChar
| {
"pile_set_name": "Github"
} |
The hosts <b>alice</b> and <b>carol</b> set up a tunnel connection each to gateway <b>moon</b>.
Both hosts request a <b>virtual IP</b> via the IKEv2 configuration payload.
Gateway <b>moon</b> assigns virtual IP addresses from <b>pool1</b> with an address range of
<b>10.3.0.0/28</b> to hosts connecting to the <b>eth0</b> (192.168.0.1) interface and
virtual IP addresses from <b>pool2</b> with an address range of <b>10.4.0.0/28</b> to hosts
connecting to the <b>eth1</b> (10.1.0.1) interface.
<p>
Thus <b>carol</b> is assigned <b>PH_IP_CAROL1</b> whereas <b>alice</b> gets <b>10.4.0.1</b> and
both ping the gateway <b>moon</b>.
| {
"pile_set_name": "Github"
} |
# Generated by Django 2.1.15 on 2020-06-08 11:52
import django.contrib.auth.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('badgeuser', '0024_auto_20200106_1621'),
]
operations = [
migrations.AlterField(
model_name='badgeuser',
name='badgrapp',
field=models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.SET_NULL, to='mainsite.BadgrApp'),
),
migrations.AlterField(
model_name='badgeuser',
name='last_name',
field=models.CharField(blank=True, max_length=150, verbose_name='last name'),
),
migrations.AlterField(
model_name='badgeuser',
name='username',
field=models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username'),
),
]
| {
"pile_set_name": "Github"
} |
<?php
/*
* Copyright 2016 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
class Google_Service_Dataflow_LeaseWorkItemResponse extends Google_Collection
{
protected $collection_key = 'workItems';
protected $workItemsType = 'Google_Service_Dataflow_WorkItem';
protected $workItemsDataType = 'array';
public function setWorkItems($workItems)
{
$this->workItems = $workItems;
}
public function getWorkItems()
{
return $this->workItems;
}
}
| {
"pile_set_name": "Github"
} |
export 'database/database.dart';
export 'database/adapters.dart';
| {
"pile_set_name": "Github"
} |
{
"dataset_reader": {
"type": "syntactic_dependency_arc_classification",
"include_raw_tokens": true
},
"train_data_path": "data/syntactic_dependency/ptb.train.conllu",
"validation_data_path": "data/syntactic_dependency/ptb.dev.conllu",
"test_data_path": "data/syntactic_dependency/ptb.test.conllu",
"evaluate_on_test" : true,
"model": {
"type": "pairwise_tagger",
"decoder": "mlp",
"encoder": {
"type": "lstm",
"input_size": 1024,
"hidden_size": 512,
"bidirectional": true,
"num_layers": 2
},
"contextualizer": {
"type": "elmo_contextualizer",
"batch_size": 80,
"elmo": {
"weight_file": "contextualizers/elmo_original_randomly_initialized/elmo_original_randomly_initialized_weights.hdf5",
"options_file": "contextualizers/elmo_original_randomly_initialized/elmo_original_randomly_initialized_options.json",
"requires_grad": true,
"num_output_representations": 1,
"dropout": 0.0
}
},
"token_representation_dim": 1024,
"combination": "x,y,x*y"
},
"iterator": {
"type": "basic",
"batch_size" : 80
},
"trainer": {
"num_epochs": 50,
"patience": 3,
"cuda_device": 0,
"validation_metric": "+accuracy",
"optimizer": {
"type": "adam",
"lr": 0.001
}
}
}
| {
"pile_set_name": "Github"
} |
package scm
import (
"fmt"
"github.com/driusan/bug/bugs"
"io/ioutil"
"os"
"strings"
"testing"
)
type GitCommit struct {
commit string
log string
}
func (c GitCommit) CommitID() string {
return c.commit
}
func (c GitCommit) LogMsg() string {
return c.log
}
func (c GitCommit) Diff() (string, error) {
return runCmd("git", "show", "--pretty=format:%b", c.CommitID())
}
func (c GitCommit) CommitMessage() (string, error) {
return runCmd("git", "show", "--pretty=format:%B", "--quiet", c.CommitID())
}
type GitTester struct {
handler SCMHandler
workdir string
}
func (t GitTester) GetLogs() ([]Commit, error) {
logs, err := runCmd("git", "log", "--oneline", "--reverse", "-z")
if err != nil {
wd, _ := os.Getwd()
fmt.Fprintf(os.Stderr, "Error retrieving git logs: %s in directory %s\n", logs, wd)
return nil, err
}
logMsgs := strings.Split(logs, "\000")
// the last line is empty, so don't allocate 1 for
// it
commits := make([]Commit, len(logMsgs)-1)
for idx, commitText := range logMsgs {
if commitText == "" {
continue
}
spaceIdx := strings.Index(commitText, " ")
if spaceIdx >= 0 {
commits[idx] = GitCommit{commitText[0:spaceIdx], commitText[spaceIdx+1:]}
}
}
return commits, nil
}
func (g GitTester) AssertStagingIndex(t *testing.T, f []FileStatus) {
for _, file := range f {
out, err := runCmd("git", "status", "--porcelain", file.Filename)
if err != nil {
t.Error("Could not run git status")
}
expected := file.IndexStatus + file.WorkingStatus + " " + file.Filename + "\n"
if out != expected {
t.Error("Incorrect file status")
t.Error("Got" + out + " not " + expected)
}
}
}
func (g GitTester) StageFile(file string) error {
_, err := runCmd("git", "add", file)
return err
}
func (t *GitTester) Setup() error {
if dir, err := ioutil.TempDir("", "gitbug"); err == nil {
t.workdir = dir
os.Chdir(t.workdir)
} else {
return err
}
out, err := runCmd("git", "init")
if err != nil {
fmt.Fprintf(os.Stderr, "Error initializing git: %s", out)
return err
}
return nil
}
func (t GitTester) TearDown() {
os.RemoveAll(t.workdir)
}
func (t GitTester) GetWorkDir() string {
return t.workdir
}
func (m GitTester) AssertCleanTree(t *testing.T) {
out, err := runCmd("git", "status", "--porcelain")
if err != nil {
t.Error("Error running git status")
}
if out != "" {
t.Error("Unexpected Output from git status (expected nothing):\n" + out)
}
}
func (m GitTester) GetManager() SCMHandler {
return m.handler
}
func TestGitBugRenameCommits(t *testing.T) {
gm := GitTester{}
gm.handler = GitManager{}
expectedDiffs := []string{
`
diff --git a/issues/Test-bug/Description b/issues/Test-bug/Description
new file mode 100644
index 0000000..e69de29
`, `
diff --git a/issues/Test-bug/Description b/issues/Renamed-bug/Description
similarity index 100%
rename from issues/Test-bug/Description
rename to issues/Renamed-bug/Description
`}
runtestRenameCommitsHelper(&gm, t, expectedDiffs)
}
func TestGitFilesOutsideOfBugNotCommited(t *testing.T) {
gm := GitTester{}
gm.handler = GitManager{}
runtestCommitDirtyTree(&gm, t)
}
func TestGitManagerGetType(t *testing.T) {
manager := GitManager{}
if getType := manager.GetSCMType(); getType != "git" {
t.Error("Incorrect SCM Type for GitManager. Got " + getType)
}
}
func TestGitManagerPurge(t *testing.T) {
gm := GitTester{}
gm.handler = GitManager{}
runtestPurgeFiles(&gm, t)
}
func TestGitManagerAutoclosingGitHub(t *testing.T) {
// This test is specific to gitmanager, since GitHub
// only supports git..
tester := GitTester{}
tester.handler = GitManager{Autoclose: true}
err := tester.Setup()
if err != nil {
panic("Something went wrong trying to initialize git:" + err.Error())
}
defer tester.TearDown()
m := tester.GetManager()
if m == nil {
t.Error("Could not get manager")
return
}
os.Mkdir("issues", 0755)
runCmd("bug", "create", "-n", "Test", "bug")
runCmd("bug", "create", "-n", "Test", "Another", "bug")
if err = ioutil.WriteFile("issues/Test-bug/Identifier", []byte("\n\nGitHub:#TestBug"), 0644); err != nil {
t.Error("Could not write Identifier file")
return
}
if err = ioutil.WriteFile("issues/Test-Another-bug/Identifier", []byte("\n\nGITHuB: #Whitespace "), 0644); err != nil {
t.Error("Could not write Identifier file")
return
}
// Commit the file, so that we can close it..
m.Commit(bugs.Directory(tester.GetWorkDir()+"/issues"), "Adding commit")
// Delete the bug
os.RemoveAll(tester.GetWorkDir() + "/issues/Test-bug")
os.RemoveAll(tester.GetWorkDir() + "/issues/Test-Another-bug")
m.Commit(bugs.Directory(tester.GetWorkDir()+"/issues"), "Removal commit")
commits, err := tester.GetLogs()
if len(commits) != 2 || err != nil {
t.Error("Error getting git logs while attempting to test GitHub autoclosing")
return
}
if msg, err := commits[1].(GitCommit).CommitMessage(); err != nil {
t.Error("Error getting git logs while attempting to test GitHub autoclosing")
} else {
closing := func(issue string) bool {
return strings.Contains(msg, "Closes #"+issue) ||
strings.Contains(msg, ", closes #"+issue)
}
if !closing("Whitespace") || !closing("TestBug") {
fmt.Printf("%s\n", msg)
t.Error("GitManager did not autoclose Github issues")
}
}
}
| {
"pile_set_name": "Github"
} |
from cpp11_std_array import *
import sys
def failed(a, b, msg):
raise RuntimeError(msg + " " + str(list(a)) + " " + str(list(b)))
def compare_sequences(a, b):
if len(a) != len(b):
failed(a, b, "different sizes")
for i in range(len(a)):
if a[i] != b[i]:
failed(a, b, "elements are different")
def compare_containers(pythonlist, swigarray):
compare_sequences(pythonlist, swigarray)
def steps_exception(swigarray, i, j, step):
try:
if i == None and j == None:
a = swigarray[::step]
elif i == None:
a = swigarray[:j:step]
elif j == None:
a = swigarray[i::step]
else:
a = swigarray[i:j:step]
raise RuntimeError("swigarray[" + str(i) + ":" + str(j) + ":" + str(step) + "] missed steps exception for " + str(list(swigarray)))
except ValueError as e:
# print("exception: {}".format(e))
pass
def del_exception(swigarray, i, j, step):
try:
if i == None and j == None:
del swigarray[::step]
elif j == None and step == None:
del swigarray[i]
elif i == None:
del swigarray[:j:step]
elif j == None:
del swigarray[i::step]
else:
del swigarray[i:j:step]
raise RuntimeError("swigarray[" + str(i) + ":" + str(j) + ":" + str(step) + "] missed del exception for " + str(list(swigarray)))
except ValueError as e:
# print("exception: {}".format(e))
pass
def setslice_exception(swigarray, newval):
try:
swigarray[::] = newval
raise RuntimeError("swigarray[::] = " + str(newval) + " missed set exception for swigarray:" + str(list(swigarray)))
except TypeError as e:
# print("exception: {}".format(e))
pass
# Check std::array has similar behaviour to a Python list
# except it is not resizable
ps = [0, 1, 2, 3, 4, 5]
ai = ArrayInt6(ps)
compare_containers(ps, ai)
# slices
compare_containers(ps[0:6], ai[0:6])
compare_containers(ps[0:10], ai[0:10])
compare_containers(ps[-10:6], ai[-10:6])
compare_containers(ps[-10:10], ai[-10:10])
compare_containers(ps[0:6:1], ai[0:6:1])
compare_containers(ps[::], ai[::])
compare_containers(ps[::1], ai[::1])
compare_containers([x for x in ps], [x for x in ai])
# Reverse
compare_containers(ps[::-1], ai[::-1])
compare_containers(ps[5::-1], ai[5::-1])
compare_containers(ps[10::-1], ai[10::-1])
# Steps other than +1 and -1 not supported
steps_exception(ai, 0, 6, 3)
steps_exception(ai, None, None, 0)
steps_exception(ai, None, None, 2)
steps_exception(ai, None, None, -2)
steps_exception(ai, 1, 3, 1)
steps_exception(ai, 3, 1, -1)
# Modify content
for i in range(len(ps)):
ps[i] = (ps[i] + 1) * 10
ai[i] = (ai[i] + 1) * 10
compare_containers(ps, ai)
# Delete
del_exception(ai, 0, 6, 3)
del_exception(ai, None, None, 0)
del_exception(ai, None, None, 2)
del_exception(ai, None, None, -2)
del_exception(ai, 1, 3, 1)
del_exception(ai, 3, 1, -1)
del_exception(ai, 0, None, None)
del_exception(ai, 5, None, None)
# Empty
ai = ArrayInt6()
compare_containers([0, 0, 0, 0, 0, 0], ai)
# Set slice
newvals = [10, 20, 30, 40, 50, 60]
ai[::] = newvals
compare_containers(ai, newvals)
newvals = [100, 200, 300, 400, 500, 600]
ai[0:6:1] = newvals
compare_containers(ai, newvals)
newvals = [1000, 2000, 3000, 4000, 5000, 6000]
ai[::-1] = newvals
compare_containers(ai, newvals[::-1])
newvals = [10000, 20000, 30000, 40000, 50000, 60000]
ai[-10:100:1] = newvals
compare_containers(ai, newvals[-10:100:1])
setslice_exception(ai, [1, 2, 3, 4, 5, 6, 7])
setslice_exception(ai, [1, 2, 3, 4, 5])
setslice_exception(ai, [1, 2, 3, 4])
setslice_exception(ai, [1, 2, 3])
setslice_exception(ai, [1, 2])
setslice_exception(ai, [1])
setslice_exception(ai, [])
# Check return
compare_containers(arrayOutVal(), [-2, -1, 0, 0, 1, 2])
compare_containers(arrayOutConstRef(), [-2, -1, 0, 0, 1, 2])
compare_containers(arrayOutRef(), [-2, -1, 0, 0, 1, 2])
compare_containers(arrayOutPtr(), [-2, -1, 0, 0, 1, 2])
# Check passing arguments
ai = arrayInVal([9, 8, 7, 6, 5, 4])
compare_containers(ai, [90, 80, 70, 60, 50, 40])
ai = arrayInConstRef([9, 8, 7, 6, 5, 4])
compare_containers(ai, [90, 80, 70, 60, 50, 40])
ai = ArrayInt6([9, 8, 7, 6, 5, 4])
arrayInRef(ai)
compare_containers(ai, [90, 80, 70, 60, 50, 40])
ai = ArrayInt6([9, 8, 7, 6, 5, 4])
arrayInPtr(ai)
compare_containers(ai, [90, 80, 70, 60, 50, 40])
# fill
ai.fill(111)
compare_containers(ai, [111, 111, 111, 111, 111, 111])
| {
"pile_set_name": "Github"
} |
package com.annimon.stream.function;
import com.annimon.stream.Objects;
import org.jetbrains.annotations.NotNull;
/**
* Represents a predicate (function with boolean type result) with additional index argument.
*
* @since 1.2.1
*/
public interface IndexedIntPredicate {
/**
* Tests the value for satisfying predicate.
*
* @param index the index
* @param value the value to be tested
* @return {@code true} if the value matches the predicate, otherwise {@code false}
*/
boolean test(int index, int value);
class Util {
private Util() { }
/**
* Wraps {@link IntPredicate} and returns {@code IndexedIntPredicate}.
*
* @param predicate the predicate to wrap
* @return a wrapped {@code IndexedIntPredicate}
* @throws NullPointerException if {@code predicate} is null
*/
public static IndexedIntPredicate wrap(@NotNull final IntPredicate predicate) {
Objects.requireNonNull(predicate);
return new IndexedIntPredicate() {
@Override
public boolean test(int index, int value) {
return predicate.test(value);
}
};
}
}
}
| {
"pile_set_name": "Github"
} |
## PostgreSQL quorum based同步复制模式在极端情况下的0丢失破坏问题
### 作者
digoal
### 日期
2018-07-04
### 标签
PostgreSQL , quorom based sync replication , 2PC , 0丢失
----
## 背景
PostgreSQL 9.6开始支持了quorum based 同步复制机制,当客户端发起事务结束请求时,必须要等这笔事务对应的redo日志复制到了指定副本,主库才响应客户端。
从而保证客户端正常收到主库反馈后,WAL日志一定已经有了多个副本,保证数据的0丢失。
但是在极端情况下,可能无法保障0丢失,为什么呢?
因为WAL日志先落主库,然后等备库复制,最后反馈客户端。
比如用户提交事务前,备库挂了,实际上主库本地的WAL已经写了,数据在本地已经持久化。只是主库要等符合quorum based个数备库那边接收到WAL的位点反馈,才反馈给客户端提交成功。
因此,当客户端在等待过程中,如果连接中断,就会造成本地已提交,但是有可能某些备库没有收到WAL的情况。
客户端如果重新与主库建立连接,它可以看到已提交的数据。
在pg_shardman的介绍中,也提到了类似的问题
https://github.com/postgrespro/pg_shardman
The trade-off is well-known: asynchronous replication is faster, but allows replica to lag arbitrary behind the primary, which might lead to loss of a bunch of recently committed transactions (if primary holder fails), or WAL puffing up in case of replica failure. Synchronous replication is slower, but committed transaction are typically not dropped. Typically, because it is actually still possible to lose them without kind of 2PC commit. Imagine the following scenario:
- Primary's connection with replica is teared down.
- Primary executes a transaction, e.g. adds some row with id 42, commits it locally and blocks because there is no connection with replica.
- Client suddenly loses connection with primary for a moment and reconnects to learn the status of the transaction, sees the row with id 42 and thinks that it has been committed.
- Now primary fails permanently and we switch to the replica. Replica has no idea of that transaction, but client is sure it is committed.
### 2PC可以保证一次客户端连接丢失带来的问题
```
预提交成功
2PC提交
备库挂
主库2PC提交成功
客户端丢失连接
客户端重新发起连接,发现主库已经2PC提交成功,可以看到数据
主库挂掉
切换到备库,客户端重新发起连接,发现备库还没有数据,因为2PC还没有结束,可以人为介入处理,不会丢数据。
```
### 2PC不可以保证超过一次客户端连接丢失带来的问题
```
预提交
备库挂
主库预提交成功
客户端丢失连接
客户端重新发起连接,发现主库已经预提交成功,发起2PC提交
客户端丢失连接
客户端重新发起连接,发现主库已经2PC提交成功,可以看到数据
主库挂掉
切换到备库,客户端重新发起连接,发现备库还没有数据,丢数据。
```
## 小结
当备库挂掉,无法满足quorum时,客户端提交事务后,会处于等待状态,但是如果客户端丢失连接,再次发起请求,可以看到仅仅在主库提交的数据。对于2PC事务,只要多次丢失连接,同样会造成问题。
## 参考
[《PG多节点(quorum based), 0丢失 HA(failover,switchover)方案》](../201706/20170612_02.md)
[《PostgreSQL 一主多从(多副本,强同步)简明手册 - 配置、压测、监控、切换、防脑裂、修复、0丢失 - 珍藏级》](../201803/20180326_01.md)
#### [PostgreSQL 许愿链接](https://github.com/digoal/blog/issues/76 "269ac3d1c492e938c0191101c7238216")
您的愿望将传达给PG kernel hacker、数据库厂商等, 帮助提高数据库产品质量和功能, 说不定下一个PG版本就有您提出的功能点. 针对非常好的提议,奖励限量版PG文化衫、纪念品、贴纸、PG热门书籍等,奖品丰富,快来许愿。[开不开森](https://github.com/digoal/blog/issues/76 "269ac3d1c492e938c0191101c7238216").
#### [9.9元购买3个月阿里云RDS PostgreSQL实例](https://www.aliyun.com/database/postgresqlactivity "57258f76c37864c6e6d23383d05714ea")
#### [PostgreSQL 解决方案集合](https://yq.aliyun.com/topic/118 "40cff096e9ed7122c512b35d8561d9c8")
#### [德哥 / digoal's github - 公益是一辈子的事.](https://github.com/digoal/blog/blob/master/README.md "22709685feb7cab07d30f30387f0a9ae")

| {
"pile_set_name": "Github"
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.