text
stringlengths 2
100k
| meta
dict |
---|---|
{
"created_at": "2015-02-27T22:28:18.612218",
"description": "Mind mapping App for OS X compatible with FreeMind",
"fork": false,
"full_name": "qvacua/qmind",
"language": "Objective-C",
"updated_at": "2015-02-27T23:42:26.878605"
} | {
"pile_set_name": "Github"
} |
true
true
true
true
true
true
false
false
false
false
true
true
true
true
false
false
false
false
true
true
false
false
false
false
true
false
false
true
false
false
false
false
false
false
false
false
false
true
false
false
true
false
true
false
true
false
true
false
true
true
true
true
true
true
false
false
false
false
true
true
true
true
false
false
false
false
true
false
true
true
true
true
true
true
false
false
false
false
true
true
true
true
false
false
false
false
true
true
false
false
false
false
true
false
false
true
false
false
false
false
false
false
false
false
false
true
false
false
true
false
true
false
true
false
true
false
true
true
true
true
true
true
false
false
false
false
true
true
true
true
false
false
false
false
true
false
true
true
true
true
true
true
false
false
false
false
true
true
true
true
false
false
false
false
true
true
false
false
false
false
true
false
false
true
false
false
false
false
false
false
false
false
false
true
false
false
true
false
true
false
true
false
true
false
true
true
true
true
true
true
false
false
false
false
true
true
true
true
false
false
false
false
true
false
true
true
true
true
true
true
false
false
false
false
true
true
true
true
false
false
false
false
true
true
false
false
false
false
true
false
false
true
false
false
false
false
false
false
false
false
false
true
false
false
true
false
true
false
true
false
true
false
true
true
true
true
true
true
false
false
false
false
true
true
true
true
false
false
false
false
true
false
true
true
true
true
true
true
false
false
false
false
true
true
true
true
false
false
false
false
true
true
false
false
false
false
true
false
false
true
false
false
false
false
false
false
false
false
false
true
false
false
true
false
true
false
true
false
true
false
true
true
true
true
true
true
false
false
false
false
true
true
true
true
false
false
false
false
true
false
true
true
true
true
true
true
false
false
false
false
true
true
true
true
false
false
false
false
true
true
false
false
false
false
true
false
false
true
false
false
false
false
false
false
false
false
false
true
false
false
true
true
true
true
true
true
true
false
true
true
true
true
true
true
true
false
false
false
true
true
true
true
false
true
false
false
true
false
true
true
true
true
true
true
true
false
false
false
true
true
true
true
false
true
false
false
true
true
false
false
false
false
true
false
false
true
false
false
false
false
false
false
false
false
false
true
false
false
true
true
true
true
true
true
true
false
true
true
true
true
true
true
true
true
true
false
true
true
true
true
false
true
false
false
true
false
true
true
true
true
true
true
true
true
true
false
true
true
true
true
false
true
false
false
true
true
true
true
false
false
true
false
false
true
false
false
false
false
false
false
false
false
false
true
false
false
true
true
true
true
true
true
true
false
true
true
true
true
true
true
true
true
true
false
true
true
true
true
false
true
false
false
true
false
true
true
true
true
true
true
true
true
true
false
true
true
true
true
false
true
false
false
true
true
true
true
false
false
true
false
false
true
false
false
false
false
false
false
false
false
false
true
false
false
true
true
true
true
true
true
true
false
true
true
true
true
true
true
true
true
true
true
true
true
true
true
false
true
false
false
true
false
true
true
true
true
true
true
true
true
true
true
true
true
true
true
false
true
false
false
true
true
true
true
false
false
true
false
false
true
false
false
false
false
false
false
false
false
false
true
false
false
false
false
false
false
false
false
false
false
false
false
false
false
false
false
false
false
false
false
true
true
true
true
false
false
false
false
true
false
false
false
false
false
false
false
false
false
false
false
true
true
true
true
false
false
false
false
true
false
false
false
false
false
false
false
false
false
false
false
false
false
false
false
false
false
false
false
false
false
false
false
false
false
false
false
false
false
false
false
false
false
false
false
false
false
false
false
false
true
true
true
false
false
false
false
true
false
false
false
false
false
false
false
false
false
false
false
false
true
true
true
false
false
false
false
true
false
false
false
false
false
false
false
false
false
false
false
false
false
false
false
false
false
false
false
false
false
false
false
false
false
false
false
false
false
false
false
false
false
false
false
false
false
false
false
false
true
true
true
false
false
false
false
true
false
false
false
false
false
false
false
false
false
false
false
false
true
true
true
false
false
false
false
true
false
false
false
false
false
false
false
false
false
false
false
false
false
false
false
false
false
false
false
false
false
false
false
false
false
false
false
false
false
false
false
false
false
false
false
false
false
false
false
false
false
false
true
false
false
false
false
true
false
false
false
false
false
false
false
false
false
false
false
false
false
false
true
false
false
false
false
true
false
false
false
false
false
false
false
false
false
false
false
false
false
false
false
false
false
false
false
false
false
true
true
true
true
true
true
true
false
true
true
true
true
true
true
true
true
true
true
true
true
true
true
true
true
false
false
true
false
true
true
true
true
true
true
true
true
true
true
true
true
true
true
true
true
false
false
true
true
true
true
false
false
true
false
false
true
false
false
false
false
false
false
false
false
false
true
false
false
true
false
true
false
true
false
true
false
true
| {
"pile_set_name": "Github"
} |
commandlinefu_id: 13582
translator:
weibo: ''
hide: true
command: |-
bkname="test"; tobk="*" ; totalsize=$(du -csb $tobk | tail -1 | cut -f1) ; tar cvf - $tobk | tee >(sha512sum > $bkname.sha512) >(tar -tv > $bkname.lst) | mbuffer -m 4G -P 100% | pv -s $totalsize -w 100 | dd of=/dev/nst0 bs=256k
summary: |-
Backup to LTO Tape with progress, checksums and buffering
| {
"pile_set_name": "Github"
} |
//
// CSUndoObjectController.m
// CocoaSplit
//
// Created by Zakk on 2/19/18.
//
#import "CSUndoObjectController.h"
@implementation CSUndoObjectController
-(void)awakeFromNib
{
if (!_undoNotificationCenter)
{
_undoNotificationCenter = [[NSNotificationCenter alloc] init];
_undoNotificationQueue = [[NSNotificationQueue alloc] initWithNotificationCenter:_undoNotificationCenter];
[_undoNotificationCenter addObserver:self selector:@selector(undoNotification:) name:nil object:nil];
}
_pausedUndoKeys = [NSMutableDictionary dictionary];
}
-(void)undoNotification:(NSNotification *)notification
{
NSString *keyPath = notification.name;
id propValue = notification.object;
if (self.undoDelegate)
{
[self.undoDelegate performUndoForKeyPath:keyPath usingValue:propValue];
}
}
-(void)dealloc
{
if (_undoNotificationCenter)
{
[_undoNotificationCenter removeObserver:self];
}
}
-(void)registerUndoForProperty:(NSString *)propName
{
if ([_pausedUndoKeys valueForKey:propName])
{
return;
}
id propertyValue = [self valueForKeyPath:propName];
NSNotification *undoNotification = [NSNotification notificationWithName:propName object:propertyValue];
[_undoNotificationQueue enqueueNotification:undoNotification postingStyle:NSPostWhenIdle coalesceMask:NSNotificationCoalescingOnName forModes:nil];
}
-(void)pauseUndoForKeyPath:(NSString *)keyPath
{
[_pausedUndoKeys setObject:@(YES) forKey:keyPath];
}
-(void)resumeUndoForKeyPath:(NSString *)keyPath
{
[_pausedUndoKeys removeObjectForKey:keyPath];
}
-(void)setValue:(id)value forKeyPath:(NSString *)keyPath
{
[self registerUndoForProperty:keyPath];
[super setValue:value forKeyPath:keyPath];
}
@end
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="UTF-8"?>
<!--
-->
<beans xmlns="http://www.springframework.org/schema/beans"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:context="http://www.springframework.org/schema/context"
xsi:schemaLocation="http://www.springframework.org/schema/beans http://www.springframework.org/schema/beans/spring-beans-2.5.xsd
http://www.springframework.org/schema/context http://www.springframework.org/schema/context/spring-context-2.5.xsd"
default-autowire="byName" default-lazy-init="false">
<bean id="jdbcTemplate" class="org.springframework.jdbc.core.JdbcTemplate">
<property name="dataSource" ref="dataSource"/>
</bean>
<!-- Transaction manager for a single JDBC DataSource -->
<bean id="transactionManager" class="org.springframework.jdbc.datasource.DataSourceTransactionManager">
<property name="dataSource" ref="dataSource"/>
</bean>
<bean id="sqlSessionFactory" class="javacommon.SqlSessionFactoryFactoryBean">
<property name="configLocation" value="classpath:configuration.xml"/>
<property name="mapperLocations" value="classpath*:/com/**/model/**/*Mapper.xml"/>
<property name="dataSource" ref="dataSource"/>
</bean>
<!-- component-scan自动搜索@Component , @Controller , @Service , @Repository等标注的类 -->
<context:component-scan base-package="com.**.dao"/>
</beans>
| {
"pile_set_name": "Github"
} |
/* set page layout */
@page {
size: A4 landscape;
}
body {
display: block !important;
}
.slides {
left: 0;
top: 0;
}
.slides > article {
position: relative;
left: 0;
top: 0;
margin: 0 !important;
page-break-inside: avoid;
text-shadow: none; /* disable shadow */
display: block !important;
transform: translate(0) !important;
-o-transform: translate(0) !important;
-moz-transform: translate(0) !important;
-webkit-transform: translate3d(0, 0, 0) !important;
}
div.code {
background: rgb(240, 240, 240);
}
/* hide click areas */
.slide-area, #prev-slide-area, #next-slide-area {
display: none;
}
/* add explicit links */
a:link:after, a:visited:after {
content: " (" attr(href) ") ";
font-size: 50%;
}
/* white background */
body {
background: rgb(255,255,255) !important;
}
| {
"pile_set_name": "Github"
} |
/* Copyright (c) 2012 Scott Lembcke and Howling Moon Software
* Copyright (c) 2012 cocos2d-x.org
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef __PHYSICSNODES_DEBUGNODE_H__
#define __PHYSICSNODES_DEBUGNODE_H__
#include "cocos2d.h"
#include "ExtensionMacros.h"
#if CC_ENABLE_CHIPMUNK_INTEGRATION
#include "chipmunk.h"
NS_CC_EXT_BEGIN
/**
A CCBaseData that draws the components of a physics engine.
Supported physics engines:
- Chipmunk
- Objective-Chipmunk
@since v2.1
*/
class CCPhysicsDebugNode : public CCDrawNode
{
protected:
cpSpace *m_pSpacePtr;
public:
/** Create a debug node for a regular Chipmunk space. */
static CCPhysicsDebugNode* create(cpSpace *space);
virtual ~CCPhysicsDebugNode();
virtual void draw();
cpSpace* getSpace() const;
void setSpace(cpSpace *space);
CCPhysicsDebugNode();
};
NS_CC_EXT_END
#endif // CC_ENABLE_CHIPMUNK_INTEGRATION
#endif // __PHYSICSNODES_DEBUGNODE_H__
| {
"pile_set_name": "Github"
} |
package com.fasterxml.jackson.dataformat.avro.interop;
import java.util.Objects;
import com.fasterxml.jackson.annotation.JsonProperty;
public class DummyRecord {
@JsonProperty(required = true)
public String firstValue;
@JsonProperty(required = true)
public int secondValue;
protected DummyRecord() { }
public DummyRecord(String fv, int sv) {
firstValue = fv;
secondValue = sv;
}
@Override
public String toString() {
return String.format("[first=%s,second=%s]", firstValue, secondValue);
}
@Override
public int hashCode() {
return Objects.hash(firstValue, secondValue);
}
@Override
public boolean equals(Object o) {
if (o == this) return true;
if (!(o instanceof DummyRecord)) return false;
return _equals((DummyRecord) o);
}
protected boolean _equals(DummyRecord other) {
return Objects.equals(firstValue, other.firstValue)
&& Objects.equals(secondValue, other.secondValue);
}
}
| {
"pile_set_name": "Github"
} |
#ifndef BOOST_MPL_FIND_HPP_INCLUDED
#define BOOST_MPL_FIND_HPP_INCLUDED
// Copyright Aleksey Gurtovoy 2000-2002
//
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
//
// See http://www.boost.org/libs/mpl for documentation.
// $Id$
// $Date$
// $Revision$
#include <boost/mpl/find_if.hpp>
#include <boost/mpl/same_as.hpp>
#include <boost/mpl/aux_/na_spec.hpp>
#include <boost/mpl/aux_/lambda_support.hpp>
namespace boost { namespace mpl {
template<
typename BOOST_MPL_AUX_NA_PARAM(Sequence)
, typename BOOST_MPL_AUX_NA_PARAM(T)
>
struct find
: find_if< Sequence,same_as<T> >
{
BOOST_MPL_AUX_LAMBDA_SUPPORT(2,find,(Sequence,T))
};
BOOST_MPL_AUX_NA_SPEC(2, find)
}}
#endif // BOOST_MPL_FIND_HPP_INCLUDED
| {
"pile_set_name": "Github"
} |
ONE_DAY=\ 1 day
ONE_HOUR=\ 1 hour
ONE_MIN=\ 1 min
ONE_MONTH=\ 1 month
ONE_YEAR=\ 1 year
TWO_HOURS=\ 2 hours
THREE_HOURS=\ 3 hours
THREE_MONTHS=\ 3 months
FIVE_MIN=\ 5 min
SIX_HOURS=\ 6 hours
SIX_MONTHS=\ 6 months
SEVEN_DAYS=\ 7 days
TEN_MIN=10 min
TWELVE_HOURS=12 hours
THIRTY_MIN=30 min
LESS_THAN=<
A_LOT_LESS_THAN=<<
GREATER_THAN=>
ACTION_CAPITALIZED=ACTION
ACTION_INFO_CAPITALIZED=ACTION_INFO
ALL=All
ARCHITECTURE=Architecture
ATTRIBUTE=Attribute
ATTRIBUTE_VALUE=Attribute value
ATTRIBUTE_VALUES=Attribute values
ATTRIBUTES=Attributes
BLANK=Blank
BLOCKED_COUNT_WAITED_COUNT=Total blocked: {0} Total waited: {1}\n
BOOT_CLASS_PATH=Boot class path
BORDERED_COMPONENT_MORE_OR_LESS_BUTTON_TOOLTIP=Toggle to show more or less information
CPU_USAGE=CPU Usage
CPU_USAGE_FORMAT=CPU Usage: {0}%
CANCEL=Cancel
CASCADE=&Cascade
CHART_COLON=&Chart:
CLASS_PATH=Class path
CLASS_NAME=ClassName
CLASS_TAB_INFO_LABEL_FORMAT=<html>Loaded: {0} Unloaded: {1} Total: {2}</html>
CLASS_TAB_LOADED_CLASSES_PLOTTER_ACCESSIBLE_NAME=Chart for Loaded Classes.
CLASSES=Classes
CLOSE=Close
COLUMN_NAME=Name
COLUMN_PID=PID
COMMITTED_MEMORY=Committed memory
COMMITTED_VIRTUAL_MEMORY=Committed virtual memory
COMMITTED=Committed
CONNECT=&Connect
CONNECT_DIALOG_CONNECT_BUTTON_TOOLTIP=Connect to Java Virtual Machine
CONNECT_DIALOG_ACCESSIBLE_DESCRIPTION=Dialog for making a new connection to a local or remote Java Virtual Machine
CONNECT_DIALOG_MASTHEAD_ACCESSIBLE_NAME=Masthead Graphic
CONNECT_DIALOG_MASTHEAD_TITLE=New Connection
CONNECT_DIALOG_STATUS_BAR_ACCESSIBLE_NAME=Status Bar
CONNECT_DIALOG_TITLE=JConsole: New Connection
CONNECTED_PUNCTUATION_CLICK_TO_DISCONNECT_=Connected. Click to disconnect.
CONNECTION_FAILED=Connection failed
CONNECTION=&Connection
CONNECTION_NAME=Connection name
CONNECTION_NAME__DISCONNECTED_={0} (disconnected)
CONSTRUCTOR=Constructor
CURRENT_CLASSES_LOADED=Current classes loaded
CURRENT_HEAP_SIZE=Current heap size
CURRENT_VALUE=Current value: {0}
CREATE=Create
DAEMON_THREADS=Daemon threads
DISCONNECTED_PUNCTUATION_CLICK_TO_CONNECT_=Disconnected. Click to connect.
DOUBLE_CLICK_TO_EXPAND_FORWARD_SLASH_COLLAPSE=Double click to expand/collapse
DOUBLE_CLICK_TO_VISUALIZE=Double click to visualize
DESCRIPTION=Description
DESCRIPTOR=Descriptor
DETAILS=Details
DETECT_DEADLOCK=&Detect Deadlock
DETECT_DEADLOCK_TOOLTIP=Detect deadlocked threads
DIMENSION_IS_NOT_SUPPORTED_COLON=Dimension is not supported:
DISCARD_CHART=Discard chart
DURATION_DAYS_HOURS_MINUTES={0,choice,1#{0,number,integer} day |1.0<{0,number,integer} days }{1,choice,0<{1,number,integer} hours |1#{1,number,integer} hour |1<{1,number,integer} hours }{2,choice,0<{2,number,integer} minutes|1#{2,number,integer} minute|1.0<{2,number,integer} minutes}
DURATION_HOURS_MINUTES={0,choice,1#{0,number,integer} hour |1<{0,number,integer} hours }{1,choice,0<{1,number,integer} minutes|1#{1,number,integer} minute|1.0<{1,number,integer} minutes}
DURATION_MINUTES={0,choice,1#{0,number,integer} minute|1.0<{0,number,integer} minutes}
DURATION_SECONDS={0} seconds
EMPTY_ARRAY=Empty array
ERROR=Error
ERROR_COLON_MBEANS_ALREADY_EXIST=Error: MBeans already exist
ERROR_COLON_MBEANS_DO_NOT_EXIST=Error: MBeans do not exist
EVENT=Event
EXIT=E&xit
FAIL_TO_LOAD_PLUGIN=Warning: Fail to load plugin: {0}
FILE_CHOOSER_FILE_EXISTS_CANCEL_OPTION=Cancel
FILE_CHOOSER_FILE_EXISTS_MESSAGE=<html><center>File already exists:<br>{0}<br>Do you want to replace it?
FILE_CHOOSER_FILE_EXISTS_OK_OPTION=Replace
FILE_CHOOSER_FILE_EXISTS_TITLE=File Exists
FILE_CHOOSER_SAVED_FILE=<html>Saved to file:<br>{0}<br>({1} bytes)
FILE_CHOOSER_SAVE_FAILED_MESSAGE=<html><center>Save to file failed:<br>{0}<br>{1}
FILE_CHOOSER_SAVE_FAILED_TITLE=Save Failed
FREE_PHYSICAL_MEMORY=Free physical memory
FREE_SWAP_SPACE=Free swap space
GARBAGE_COLLECTOR=Garbage collector
GC_INFO=Name = ''{0}'', Collections = {1,choice,-1#Unavailable|0#{1,number,integer}}, Total time spent = {2}
GC_TIME=GC time
GC_TIME_DETAILS={0} on {1} ({2} collections)
HEAP_MEMORY_USAGE=Heap Memory Usage
HEAP=Heap
HELP_ABOUT_DIALOG_ACCESSIBLE_DESCRIPTION=Dialog containing information about JConsole and JDK versions
HELP_ABOUT_DIALOG_JCONSOLE_VERSION=JConsole version:<br>{0}
HELP_ABOUT_DIALOG_JAVA_VERSION=Java VM version:<br>{0}
HELP_ABOUT_DIALOG_MASTHEAD_ACCESSIBLE_NAME=Masthead Graphic
HELP_ABOUT_DIALOG_MASTHEAD_TITLE=About JConsole
HELP_ABOUT_DIALOG_TITLE=JConsole: About
HELP_ABOUT_DIALOG_USER_GUIDE_LINK_URL=https://docs.oracle.com/javase/{0}/docs/technotes/guides/management/jconsole.html
HELP_MENU_ABOUT_TITLE=&About JConsole
HELP_MENU_USER_GUIDE_TITLE=Online &User Guide
HELP_MENU_TITLE=&Help
HOTSPOT_MBEANS_ELLIPSIS=&Hotspot MBeans...
HOTSPOT_MBEANS_DIALOG_ACCESSIBLE_DESCRIPTION=Dialog for managing Hotspot MBeans
IMPACT=Impact
INFO=Info
INFO_CAPITALIZED=INFO
INSECURE=Insecure connection
INVALID_PLUGIN_PATH=Warning: Invalid plugin path: {0}
INVALID_URL=Invalid URL: {0}
IS=Is
JAVA_MONITORING___MANAGEMENT_CONSOLE=Java Monitoring && Management Console
JCONSOLE_COLON_=JConsole: {0}
JCONSOLE_VERSION=JConsole version "{0}"
JCONSOLE_ACCESSIBLE_DESCRIPTION=Java Monitoring && Management Console
JIT_COMPILER=JIT compiler
LIBRARY_PATH=Library path
LIVE_THREADS=Live threads
LOADED=Loaded
LOCAL_PROCESS_COLON=&Local Process:
MASTHEAD_FONT=Dialog-PLAIN-25
MANAGEMENT_NOT_ENABLED=<b>Note</b>: The management agent is not enabled on this process.
MANAGEMENT_WILL_BE_ENABLED=<b>Note</b>: The management agent will be enabled on this process.
MBEAN_ATTRIBUTE_INFO=MBeanAttributeInfo
MBEAN_INFO=MBeanInfo
MBEAN_NOTIFICATION_INFO=MBeanNotificationInfo
MBEAN_OPERATION_INFO=MBeanOperationInfo
MBEANS=MBeans
MBEANS_TAB_CLEAR_NOTIFICATIONS_BUTTON=&Clear
MBEANS_TAB_CLEAR_NOTIFICATIONS_BUTTON_TOOLTIP=Clear notifications
MBEANS_TAB_COMPOSITE_NAVIGATION_MULTIPLE=Composite Data Navigation {0}/{1}
MBEANS_TAB_COMPOSITE_NAVIGATION_SINGLE=Composite Data Navigation
MBEANS_TAB_REFRESH_ATTRIBUTES_BUTTON=&Refresh
MBEANS_TAB_REFRESH_ATTRIBUTES_BUTTON_TOOLTIP=Refresh attributes
MBEANS_TAB_SUBSCRIBE_NOTIFICATIONS_BUTTON=&Subscribe
MBEANS_TAB_SUBSCRIBE_NOTIFICATIONS_BUTTON_TOOLTIP=Start listening for notifications
MBEANS_TAB_TABULAR_NAVIGATION_MULTIPLE=Tabular Data Navigation {0}/{1}
MBEANS_TAB_TABULAR_NAVIGATION_SINGLE=Tabular Data Navigation
MBEANS_TAB_UNSUBSCRIBE_NOTIFICATIONS_BUTTON=&Unsubscribe
MBEANS_TAB_UNSUBSCRIBE_NOTIFICATIONS_BUTTON_TOOLTIP=Stop listening for notifications
MANAGE_HOTSPOT_MBEANS_IN_COLON_=Manage Hotspot MBeans in:
MAX=Max
MAXIMUM_HEAP_SIZE=Maximum heap size
MEMORY=Memory
MEMORY_POOL_LABEL=Memory Pool "{0}"
MEMORY_TAB_HEAP_PLOTTER_ACCESSIBLE_NAME=Memory usage chart for heap.
MEMORY_TAB_INFO_LABEL_FORMAT=<html>Used: {0} Committed: {1} Max: {2}</html>
MEMORY_TAB_NON_HEAP_PLOTTER_ACCESSIBLE_NAME=Memory usage chart for non heap.
MEMORY_TAB_POOL_CHART_ABOVE_THRESHOLD=which is above the threshold of {0}.\n
MEMORY_TAB_POOL_CHART_ACCESSIBLE_NAME=Memory Pool Usage Chart.
MEMORY_TAB_POOL_CHART_BELOW_THRESHOLD=which is below the threshold of {0}.\n
MEMORY_TAB_POOL_PLOTTER_ACCESSIBLE_NAME=Memory usage chart for {0}.
MESSAGE=Message
METHOD_SUCCESSFULLY_INVOKED=Method successfully invoked
MINIMIZE_ALL=&Minimize All
MONITOR_LOCKED=\ \ \ - locked {0}\n
NAME=Name
NAME_AND_BUILD={0} (build {1})
NAME_STATE=Name: {0}\nState: {1}\n
NAME_STATE_LOCK_NAME=Name: {0}\nState: {1} on {2}\n
NAME_STATE_LOCK_NAME_LOCK_OWNER=Name: {0}\nState: {1} on {2} owned by: {3}\n
NEW_CONNECTION_ELLIPSIS=&New Connection...
NO_DEADLOCK_DETECTED=No deadlock detected
NON_HEAP_MEMORY_USAGE=Non-Heap Memory Usage
NON_HEAP=Non-Heap
NOTIFICATION=Notification
NOTIFICATION_BUFFER=Notification buffer
NOTIFICATIONS=Notifications
NOTIF_TYPES=NotifTypes
NUMBER_OF_THREADS=Number of Threads
NUMBER_OF_LOADED_CLASSES=Number of Loaded Classes
NUMBER_OF_PROCESSORS=Number of processors
OBJECT_NAME=ObjectName
OPERATING_SYSTEM=Operating System
OPERATION=Operation
OPERATION_INVOCATION=Operation invocation
OPERATION_RETURN_VALUE=Operation return value
OPERATIONS=Operations
OVERVIEW=Overview
OVERVIEW_PANEL_PLOTTER_ACCESSIBLE_NAME=Chart for {0}.
PARAMETER=Parameter
PASSWORD_ACCESSIBLE_NAME=Password
PASSWORD_COLON_=&Password:
PEAK=Peak
PERFORM_GC=Perform &GC
PERFORM_GC_TOOLTIP=Request Garbage Collection
PLOTTER_ACCESSIBLE_NAME=Chart
PLOTTER_ACCESSIBLE_NAME_KEY_AND_VALUE={0}={1}\n
PLOTTER_ACCESSIBLE_NAME_NO_DATA=No data plotted.
PLOTTER_SAVE_AS_MENU_ITEM=Save data &as...
PLOTTER_TIME_RANGE_MENU=&Time Range
PLUGIN_EXCEPTION_DIALOG_BUTTON_EXIT=Exit
PLUGIN_EXCEPTION_DIALOG_BUTTON_IGNORE=Ignore
PLUGIN_EXCEPTION_DIALOG_BUTTON_OK=OK
PLUGIN_EXCEPTION_DIALOG_MESSAGE=An unexpected exception has occurred in %s:\n\n%s\n\nStart with -debug for details. Ignore will suppress further exceptions.
PLUGIN_EXCEPTION_DIALOG_TITLE=Plug-in exception
PROBLEM_ADDING_LISTENER=Problem adding listener
PROBLEM_DISPLAYING_MBEAN=Problem displaying MBean
PROBLEM_INVOKING=Problem invoking
PROBLEM_REMOVING_LISTENER=Problem removing listener
PROBLEM_SETTING_ATTRIBUTE=Problem setting attribute
PROCESS_CPU_TIME=Process CPU time
READABLE=Readable
RECONNECT=Reconnect
REMOTE_PROCESS_COLON=&Remote Process:
REMOTE_PROCESS_TEXT_FIELD_ACCESSIBLE_NAME=Remote Process
RESTORE_ALL=&Restore All
RETURN_TYPE=ReturnType
SEQ_NUM=SeqNum
SIZE_BYTES={0,number,integer} bytes
SIZE_GB={0} Gb
SIZE_KB={0} Kb
SIZE_MB={0} Mb
SOURCE=Source
STACK_TRACE=\nStack trace: \n
SUMMARY_TAB_HEADER_DATE_TIME_FORMAT=FULL,FULL
SUMMARY_TAB_PENDING_FINALIZATION_LABEL=Pending finalization
SUMMARY_TAB_PENDING_FINALIZATION_VALUE={0} objects
SUMMARY_TAB_TAB_NAME=VM Summary
SUMMARY_TAB_VM_VERSION={0} version {1}
THREADS=Threads
THREAD_TAB_INFO_LABEL_FORMAT=<html>Live: {0} Peak: {1} Total: {2}</html>
THREAD_TAB_THREAD_INFO_ACCESSIBLE_NAME=Thread Information
THREAD_TAB_THREAD_PLOTTER_ACCESSIBLE_NAME=Chart for number of threads.
THREAD_TAB_INITIAL_STACK_TRACE_MESSAGE=[No thread selected]
THRESHOLD=Threshold
TILE=&Tile
TIME_RANGE_COLON=&Time Range:
TIME=Time
TIME_STAMP=TimeStamp
TOTAL_LOADED=Total Loaded
TOTAL_CLASSES_LOADED=Total classes loaded
TOTAL_CLASSES_UNLOADED=Total classes unloaded
TOTAL_COMPILE_TIME=Total compile time
TOTAL_PHYSICAL_MEMORY=Total physical memory
TOTAL_THREADS_STARTED=Total threads started
TOTAL_SWAP_SPACE=Total swap space
TYPE=Type
UNAVAILABLE=Unavailable
UNKNOWN_CAPITALIZED=UNKNOWN
UNKNOWN_HOST=Unknown Host: {0}
UNREGISTER=Unregister
UPTIME=Uptime
USAGE_THRESHOLD=Usage Threshold
REMOTE_TF_USAGE=<b>Usage</b>: &<hostname&>:&<port&> OR service:jmx:&<protocol&>:&<sap&>
USED=Used
USERNAME_COLON_=&Username:
USERNAME_ACCESSIBLE_NAME=User Name
USER_DATA=UserData
VIRTUAL_MACHINE=Virtual Machine
VM_ARGUMENTS=VM arguments
VMINTERNAL_FRAME_ACCESSIBLE_DESCRIPTION=Internal frame for monitoring a Java Virtual Machine
VALUE=Value
VENDOR=Vendor
VERBOSE_OUTPUT=Verbose Output
VERBOSE_OUTPUT_TOOLTIP=Enable verbose output for class loading system
VIEW=View
WINDOW=&Window
WINDOWS=Windows
WRITABLE=Writable
CONNECTION_FAILED1=Connection Failed: Retry?
CONNECTION_FAILED2=The connection to {0} did not succeed.<br>Would you like to try again?
CONNECTION_FAILED_SSL1=Secure connection failed. Retry insecurely?
CONNECTION_FAILED_SSL2=The connection to {0} could not be made using SSL.<br>Would you like to try without SSL?<br>(Username and password will be sent in plain text.)
CONNECTION_LOST1=Connection Lost: Reconnect?
CONNECTING_TO1=Connecting to {0}
CONNECTING_TO2=You are currently being connected to {0}.<br>This will take a few moments.
DEADLOCK_TAB=Deadlock
DEADLOCK_TAB_N=Deadlock {0}
EXPAND=expand
KBYTES={0} kbytes
PLOT=plot
VISUALIZE=visualize
ZZ_USAGE_TEXT=Usage: {0} [ -interval=n ] [ -notile ] [ -pluginpath <path> ] [ -version ] [ connection ... ]\n\n -interval Set the update interval to n seconds (default is 4 seconds)\n -notile Do not tile windows initially (for two or more connections)\n -pluginpath Specify the path that jconsole uses to look up the plugins\n -version Print program version\n\n connection = pid || host:port || JMX URL (service:jmx:<protocol>://...)\n pid The process id of a target process\n host A remote host name or IP address\n port The port number for the remote connection\n\n -J Specify the input arguments to the Java virtual machine\n on which jconsole is running
| {
"pile_set_name": "Github"
} |
# Colorful `console.log` with `util-inspect`
In previous versions of NativeScript-Vue `console.log` was overriden to bring better logging and colors to your console.
It was [removed in this commit](https://github.com/nativescript-vue/nativescript-vue/commit/226e108b92273b7a2f3e133e71f9f4fe3f5935b0) to improve perfomance and reduce the size of our bundle.
If you however need this in your app, you can bring it back at an application level.
Keep in mind that this override should not be present in production.
```bash
$ npm install --save-dev util-inspect
```
In your main app file:
```js
import Vue from 'nativescript-vue'
if (TNS_ENV !== 'production') {
const inspect = require('util-inspect');
const newLineRegExp = /\\n/g
console.log = (function(log, inspect, Vue) {
return function(...args) {
return log.call(
this,
...Array.prototype.map.call(args, function(arg) {
return inspect(arg, {
depth: 2,
colors: Vue.config.debug,
showHidden: true
}).replace(newLineRegExp, '\n')
})
)
}
})(console.log, inspect, Vue);
}
```
With this change, everything should work the way it worked before we removed our override.
| {
"pile_set_name": "Github"
} |
// Scintilla source code edit control
// Nimrod lexer
// (c) 2009 Andreas Rumpf
/** @file LexNimrod.cxx
** Lexer for Nimrod.
**/
// Copyright 1998-2002 by Neil Hodgson <[email protected]>
// The License.txt file describes the conditions under which this software may be distributed.
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include <stdarg.h>
#include <assert.h>
#include <ctype.h>
#include "ILexer.h"
#include "Scintilla.h"
#include "SciLexer.h"
#include "WordList.h"
#include "LexAccessor.h"
#include "Accessor.h"
#include "StyleContext.h"
#include "CharacterSet.h"
#include "LexerModule.h"
using namespace Scintilla;
static inline bool IsAWordChar(int ch) {
return (ch >= 0x80) || isalnum(ch) || ch == '_';
}
static Sci_Position tillEndOfTripleQuote(Accessor &styler, Sci_Position pos, Sci_Position max) {
/* search for """ */
for (;;) {
if (styler.SafeGetCharAt(pos, '\0') == '\0') return pos;
if (pos >= max) return pos;
if (styler.Match(pos, "\"\"\"")) {
return pos + 2;
}
pos++;
}
}
#define CR 13 /* use both because Scite allows changing the line ending */
#define LF 10
static bool inline isNewLine(int ch) {
return ch == CR || ch == LF;
}
static Sci_Position scanString(Accessor &styler, Sci_Position pos, Sci_Position max, bool rawMode) {
for (;;) {
if (pos >= max) return pos;
char ch = styler.SafeGetCharAt(pos, '\0');
if (ch == CR || ch == LF || ch == '\0') return pos;
if (ch == '"') return pos;
if (ch == '\\' && !rawMode) {
pos += 2;
} else {
pos++;
}
}
}
static Sci_Position scanChar(Accessor &styler, Sci_Position pos, Sci_Position max) {
for (;;) {
if (pos >= max) return pos;
char ch = styler.SafeGetCharAt(pos, '\0');
if (ch == CR || ch == LF || ch == '\0') return pos;
if (ch == '\'' && !isalnum(styler.SafeGetCharAt(pos+1, '\0')) )
return pos;
if (ch == '\\') {
pos += 2;
} else {
pos++;
}
}
}
static Sci_Position scanIdent(Accessor &styler, Sci_Position pos, WordList &keywords) {
char buf[100]; /* copy to lowercase and ignore underscores */
Sci_Position i = 0;
for (;;) {
char ch = styler.SafeGetCharAt(pos, '\0');
if (!IsAWordChar(ch)) break;
if (ch != '_' && i < ((int)sizeof(buf))-1) {
buf[i] = static_cast<char>(tolower(ch));
i++;
}
pos++;
}
buf[i] = '\0';
/* look for keyword */
if (keywords.InList(buf)) {
styler.ColourTo(pos-1, SCE_P_WORD);
} else {
styler.ColourTo(pos-1, SCE_P_IDENTIFIER);
}
return pos;
}
static Sci_Position scanNumber(Accessor &styler, Sci_Position pos) {
char ch, ch2;
ch = styler.SafeGetCharAt(pos, '\0');
ch2 = styler.SafeGetCharAt(pos+1, '\0');
if (ch == '0' && (ch2 == 'b' || ch2 == 'B')) {
/* binary number: */
pos += 2;
for (;;) {
ch = styler.SafeGetCharAt(pos, '\0');
if (ch == '_' || (ch >= '0' && ch <= '1')) ++pos;
else break;
}
} else if (ch == '0' &&
(ch2 == 'o' || ch2 == 'O' || ch2 == 'c' || ch2 == 'C')) {
/* octal number: */
pos += 2;
for (;;) {
ch = styler.SafeGetCharAt(pos, '\0');
if (ch == '_' || (ch >= '0' && ch <= '7')) ++pos;
else break;
}
} else if (ch == '0' && (ch2 == 'x' || ch2 == 'X')) {
/* hexadecimal number: */
pos += 2;
for (;;) {
ch = styler.SafeGetCharAt(pos, '\0');
if (ch == '_' || (ch >= '0' && ch <= '9')
|| (ch >= 'a' && ch <= 'f')
|| (ch >= 'A' && ch <= 'F')) ++pos;
else break;
}
} else {
// skip decimal part:
for (;;) {
ch = styler.SafeGetCharAt(pos, '\0');
if (ch == '_' || (ch >= '0' && ch <= '9')) ++pos;
else break;
}
ch2 = styler.SafeGetCharAt(pos+1, '\0');
if (ch == '.' && ch2 >= '0' && ch2 <= '9') {
++pos; // skip '.'
for (;;) {
ch = styler.SafeGetCharAt(pos, '\0');
if (ch == '_' || (ch >= '0' && ch <= '9')) ++pos;
else break;
}
}
if (ch == 'e' || ch == 'E') {
++pos;
ch = styler.SafeGetCharAt(pos, '\0');
if (ch == '-' || ch == '+') ++pos;
for (;;) {
ch = styler.SafeGetCharAt(pos, '\0');
if (ch == '_' || (ch >= '0' && ch <= '9')) ++pos;
else break;
}
}
}
if (ch == '\'') {
/* a type suffix: */
pos++;
for (;;) {
ch = styler.SafeGetCharAt(pos);
if ((ch >= '0' && ch <= '9') || (ch >= 'A' && ch <= 'Z')
|| (ch >= 'a' && ch <= 'z') || ch == '_') ++pos;
else break;
}
}
styler.ColourTo(pos-1, SCE_P_NUMBER);
return pos;
}
/* rewritten from scratch, because I couldn't get rid of the bugs...
(A character based approach sucks!)
*/
static void ColouriseNimrodDoc(Sci_PositionU startPos, Sci_Position length, int initStyle,
WordList *keywordlists[], Accessor &styler) {
Sci_Position pos = startPos;
Sci_Position max = startPos + length;
char ch;
WordList &keywords = *keywordlists[0];
styler.StartAt(startPos);
styler.StartSegment(startPos);
switch (initStyle) {
/* check where we are: */
case SCE_P_TRIPLEDOUBLE:
pos = tillEndOfTripleQuote(styler, pos, max);
styler.ColourTo(pos, SCE_P_TRIPLEDOUBLE);
pos++;
break;
default: /* nothing to do: */
break;
}
while (pos < max) {
ch = styler.SafeGetCharAt(pos, '\0');
switch (ch) {
case '\0': return;
case '#': {
bool doccomment = (styler.SafeGetCharAt(pos+1) == '#');
while (pos < max && !isNewLine(styler.SafeGetCharAt(pos, LF))) pos++;
if (doccomment)
styler.ColourTo(pos, SCE_C_COMMENTLINEDOC);
else
styler.ColourTo(pos, SCE_P_COMMENTLINE);
} break;
case 'r': case 'R': {
if (styler.SafeGetCharAt(pos+1) == '"') {
pos = scanString(styler, pos+2, max, true);
styler.ColourTo(pos, SCE_P_STRING);
pos++;
} else {
pos = scanIdent(styler, pos, keywords);
}
} break;
case '"':
if (styler.Match(pos+1, "\"\"")) {
pos = tillEndOfTripleQuote(styler, pos+3, max);
styler.ColourTo(pos, SCE_P_TRIPLEDOUBLE);
} else {
pos = scanString(styler, pos+1, max, false);
styler.ColourTo(pos, SCE_P_STRING);
}
pos++;
break;
case '\'':
pos = scanChar(styler, pos+1, max);
styler.ColourTo(pos, SCE_P_CHARACTER);
pos++;
break;
default: // identifers, numbers, operators, whitespace
if (ch >= '0' && ch <= '9') {
pos = scanNumber(styler, pos);
} else if (IsAWordChar(ch)) {
pos = scanIdent(styler, pos, keywords);
} else if (ch == '`') {
pos++;
while (pos < max) {
ch = styler.SafeGetCharAt(pos, LF);
if (ch == '`') {
++pos;
break;
}
if (ch == CR || ch == LF) break;
++pos;
}
styler.ColourTo(pos, SCE_P_IDENTIFIER);
} else if (strchr("()[]{}:=;-\\/&%$!+<>|^?,.*~@", ch)) {
styler.ColourTo(pos, SCE_P_OPERATOR);
pos++;
} else {
styler.ColourTo(pos, SCE_P_DEFAULT);
pos++;
}
break;
}
}
}
static bool IsCommentLine(Sci_Position line, Accessor &styler) {
Sci_Position pos = styler.LineStart(line);
Sci_Position eol_pos = styler.LineStart(line + 1) - 1;
for (Sci_Position i = pos; i < eol_pos; i++) {
char ch = styler[i];
if (ch == '#')
return true;
else if (ch != ' ' && ch != '\t')
return false;
}
return false;
}
static bool IsQuoteLine(Sci_Position line, Accessor &styler) {
int style = styler.StyleAt(styler.LineStart(line)) & 31;
return ((style == SCE_P_TRIPLE) || (style == SCE_P_TRIPLEDOUBLE));
}
static void FoldNimrodDoc(Sci_PositionU startPos, Sci_Position length,
int /*initStyle - unused*/,
WordList *[], Accessor &styler) {
const Sci_Position maxPos = startPos + length;
const Sci_Position maxLines = styler.GetLine(maxPos - 1); // Requested last line
const Sci_Position docLines = styler.GetLine(styler.Length() - 1); // Available last line
const bool foldComment = styler.GetPropertyInt("fold.comment.nimrod") != 0;
const bool foldQuotes = styler.GetPropertyInt("fold.quotes.nimrod") != 0;
// Backtrack to previous non-blank line so we can determine indent level
// for any white space lines (needed esp. within triple quoted strings)
// and so we can fix any preceding fold level (which is why we go back
// at least one line in all cases)
int spaceFlags = 0;
Sci_Position lineCurrent = styler.GetLine(startPos);
int indentCurrent = styler.IndentAmount(lineCurrent, &spaceFlags, NULL);
while (lineCurrent > 0) {
lineCurrent--;
indentCurrent = styler.IndentAmount(lineCurrent, &spaceFlags, NULL);
if (!(indentCurrent & SC_FOLDLEVELWHITEFLAG) &&
(!IsCommentLine(lineCurrent, styler)) &&
(!IsQuoteLine(lineCurrent, styler)))
break;
}
int indentCurrentLevel = indentCurrent & SC_FOLDLEVELNUMBERMASK;
// Set up initial loop state
startPos = styler.LineStart(lineCurrent);
int prev_state = SCE_P_DEFAULT & 31;
if (lineCurrent >= 1)
prev_state = styler.StyleAt(startPos - 1) & 31;
int prevQuote = foldQuotes && ((prev_state == SCE_P_TRIPLE) ||
(prev_state == SCE_P_TRIPLEDOUBLE));
int prevComment = 0;
if (lineCurrent >= 1)
prevComment = foldComment && IsCommentLine(lineCurrent - 1, styler);
// Process all characters to end of requested range or end of any triple quote
// or comment that hangs over the end of the range. Cap processing in all cases
// to end of document (in case of unclosed quote or comment at end).
while ((lineCurrent <= docLines) && ((lineCurrent <= maxLines) ||
prevQuote || prevComment)) {
// Gather info
int lev = indentCurrent;
Sci_Position lineNext = lineCurrent + 1;
int indentNext = indentCurrent;
int quote = false;
if (lineNext <= docLines) {
// Information about next line is only available if not at end of document
indentNext = styler.IndentAmount(lineNext, &spaceFlags, NULL);
int style = styler.StyleAt(styler.LineStart(lineNext)) & 31;
quote = foldQuotes && ((style == SCE_P_TRIPLE) || (style == SCE_P_TRIPLEDOUBLE));
}
const int quote_start = (quote && !prevQuote);
const int quote_continue = (quote && prevQuote);
const int comment = foldComment && IsCommentLine(lineCurrent, styler);
const int comment_start = (comment && !prevComment && (lineNext <= docLines) &&
IsCommentLine(lineNext, styler) &&
(lev > SC_FOLDLEVELBASE));
const int comment_continue = (comment && prevComment);
if ((!quote || !prevQuote) && !comment)
indentCurrentLevel = indentCurrent & SC_FOLDLEVELNUMBERMASK;
if (quote)
indentNext = indentCurrentLevel;
if (indentNext & SC_FOLDLEVELWHITEFLAG)
indentNext = SC_FOLDLEVELWHITEFLAG | indentCurrentLevel;
if (quote_start) {
// Place fold point at start of triple quoted string
lev |= SC_FOLDLEVELHEADERFLAG;
} else if (quote_continue || prevQuote) {
// Add level to rest of lines in the string
lev = lev + 1;
} else if (comment_start) {
// Place fold point at start of a block of comments
lev |= SC_FOLDLEVELHEADERFLAG;
} else if (comment_continue) {
// Add level to rest of lines in the block
lev = lev + 1;
}
// Skip past any blank lines for next indent level info; we skip also
// comments (all comments, not just those starting in column 0)
// which effectively folds them into surrounding code rather
// than screwing up folding.
while (!quote &&
(lineNext < docLines) &&
((indentNext & SC_FOLDLEVELWHITEFLAG) ||
(lineNext <= docLines && IsCommentLine(lineNext, styler)))) {
lineNext++;
indentNext = styler.IndentAmount(lineNext, &spaceFlags, NULL);
}
const int levelAfterComments = indentNext & SC_FOLDLEVELNUMBERMASK;
const int levelBeforeComments =
Maximum(indentCurrentLevel,levelAfterComments);
// Now set all the indent levels on the lines we skipped
// Do this from end to start. Once we encounter one line
// which is indented more than the line after the end of
// the comment-block, use the level of the block before
Sci_Position skipLine = lineNext;
int skipLevel = levelAfterComments;
while (--skipLine > lineCurrent) {
int skipLineIndent = styler.IndentAmount(skipLine, &spaceFlags, NULL);
if ((skipLineIndent & SC_FOLDLEVELNUMBERMASK) > levelAfterComments)
skipLevel = levelBeforeComments;
int whiteFlag = skipLineIndent & SC_FOLDLEVELWHITEFLAG;
styler.SetLevel(skipLine, skipLevel | whiteFlag);
}
// Set fold header on non-quote/non-comment line
if (!quote && !comment && !(indentCurrent & SC_FOLDLEVELWHITEFLAG) ) {
if ((indentCurrent & SC_FOLDLEVELNUMBERMASK) <
(indentNext & SC_FOLDLEVELNUMBERMASK))
lev |= SC_FOLDLEVELHEADERFLAG;
}
// Keep track of triple quote and block comment state of previous line
prevQuote = quote;
prevComment = comment_start || comment_continue;
// Set fold level for this line and move to next line
styler.SetLevel(lineCurrent, lev);
indentCurrent = indentNext;
lineCurrent = lineNext;
}
// NOTE: Cannot set level of last line here because indentCurrent doesn't have
// header flag set; the loop above is crafted to take care of this case!
//styler.SetLevel(lineCurrent, indentCurrent);
}
static const char * const nimrodWordListDesc[] = {
"Keywords",
0
};
LexerModule lmNimrod(SCLEX_NIMROD, ColouriseNimrodDoc, "nimrod", FoldNimrodDoc,
nimrodWordListDesc);
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="utf-8"?>
<resources>
<string name="app_name">Hello Maven</string>
<string name="hello">Hello ${artifactId}!</string>
<string name="action_settings">Settings</string>
<string name="hello_world">Hello world!</string>
</resources> | {
"pile_set_name": "Github"
} |
/**
* Copyright 2007-2016, Kaazing Corporation. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.kaazing.gateway.server.topic;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.RuleChain;
import org.kaazing.gateway.server.context.resolve.StandaloneClusterContext;
import org.kaazing.gateway.service.collections.CollectionsFactory;
import org.kaazing.test.util.ITUtil;
import com.hazelcast.core.ITopic;
import com.hazelcast.core.MessageListener;
// TODO Add a parent abstract class that defines test cases for both cluster and single node.
public class StandaloneClusterTopicTest extends AbstractClusterTopicTest {
private static final StandaloneClusterContext STANDALONE_CLUESTER_CONTEXT = new StandaloneClusterContext();
private CollectionsFactory factory;
@Rule
public RuleChain chain = ITUtil.createRuleChain(10, TimeUnit.SECONDS);
@Before
public void setUp() throws Exception {
factory = STANDALONE_CLUESTER_CONTEXT.getCollectionsFactory();
}
@Test
public void shouldRejectAddAndRemoveFromMessageListenerSameThread() throws InterruptedException {
ITopic<String> topic = factory.getTopic("topic_reject_add_remove_from_listener_same_thread");
CountDownLatch listenersCalled = new CountDownLatch(1);
MessageListener<String> m1 = message -> listenersCalled.countDown();
String name = topic.addMessageListener(m1);
MessageListener<String> m2 = message -> {
try {
topic.removeMessageListener(name); // will throw UnsupportedOperationException
} catch (UnsupportedOperationException e) {
topic.addMessageListener(m -> {}); // will also throw UnsupportedOperationException
}
};
topic.addMessageListener(m2);
topic.publish("msg1");
listenersCalled.await();
assertEquals(1, topic.getLocalTopicStats().getPublishOperationCount());
assertEquals(2, topic.getLocalTopicStats().getReceiveOperationCount());
assertTrue(topic.removeMessageListener(name));
topic.destroy();
}
@Test
public void shouldNotAllowNestedPublish() throws InterruptedException {
ITopic<String> topic = factory.getTopic("topic_nested_publish_same_thread");
CountDownLatch listenerCalled = new CountDownLatch(1);
topic.addMessageListener(message -> {
try {
topic.publish("Resend: " + message.getMessageObject());
} catch (UnsupportedOperationException e) {
listenerCalled.countDown();
}
});
topic.publish("KickOff");
listenerCalled.await();
assertEquals(1, topic.getLocalTopicStats().getPublishOperationCount());
assertEquals(1, topic.getLocalTopicStats().getReceiveOperationCount());
topic.destroy();
}
@Override
protected ITopic<String> getTopicForShouldCallMessageListenersOnTwoThreads() {
return factory.getTopic("topic_two_threads");
}
@Override
protected ITopic<String> getTopicMember1ForShouldNotifyListenersIfOneThrowsException() {
return factory.getTopic("topic_message_listener_null_pointer");
}
@Override
protected ITopic<String> getTopicMember2ForShouldNotifyListenersIfOneThrowsException() {
return factory.getTopic("topic_message_listener_null_pointer");
}
@Override
protected ITopic<String> getTopicMember1ForShouldCallMultipleTimesMessageListener() {
return factory.getTopic("topic_multiple_times_same_listener");
}
@Override
protected ITopic<String> getTopicMember2ForShouldCallMultipleTimesMessageListener() {
return factory.getTopic("topic_multiple_times_same_listener");
}
@Override
protected ITopic<String> getTopicMember1ForShouldAddAndRemoveMessageListener() {
return factory.getTopic("topic_add_remove_listener");
}
@Override
protected ITopic<String> getTopicMember2ForShouldAddAndRemoveMessageListener() {
return factory.getTopic("topic_add_remove_listener");
}
@Override
protected ITopic<String> getTopicMember1ForShouldAllowAddAndRemoveFromMessageListenerDifferentThread() {
return factory.getTopic("topic_allow_add_remove_from_listener_different_thread");
}
@Override
protected ITopic<String> getTopicMember2ForShouldAllowAddAndRemoveFromMessageListenerDifferentThread() {
return factory.getTopic("topic_allow_add_remove_from_listener_different_thread");
}
@Override
protected ITopic<String> getTopicMember1ForShouldPubSubFromMessageListeners() {
return factory.getTopic("topic_pub_sub_msg_listeners_1");
}
@Override
protected ITopic<String> getTopicMember2ForShouldPubSubFromMessageListeners() {
return factory.getTopic("topic_pub_sub_msg_listeners_2");
}
@Override
protected ITopic<String> getTopicMember1ForShouldNotDeadlockNestedPublishOnDifferentThread() {
return factory.getTopic("topic_nested_publish_different_thread_one_member");
}
@Override
protected ITopic<String> getTopicMember1ForShouldNotAddNullMessageListener() {
return factory.getTopic("topic_null_message_listener");
}
@Override
protected ITopic<String> getTopicMember1ForShouldDetectClassIncompatibility() {
return factory.getTopic("topic_class_cast");
}
@Override
protected ITopic<Integer> getTopicMember2ForShouldDetectClassIncompatibility() {
return factory.getTopic("topic_class_cast");
}
}
| {
"pile_set_name": "Github"
} |
package credentials
import (
"os"
"github.com/aws/aws-sdk-go/aws/awserr"
)
// EnvProviderName provides a name of Env provider
const EnvProviderName = "EnvProvider"
var (
// ErrAccessKeyIDNotFound is returned when the AWS Access Key ID can't be
// found in the process's environment.
ErrAccessKeyIDNotFound = awserr.New("EnvAccessKeyNotFound", "AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY not found in environment", nil)
// ErrSecretAccessKeyNotFound is returned when the AWS Secret Access Key
// can't be found in the process's environment.
ErrSecretAccessKeyNotFound = awserr.New("EnvSecretNotFound", "AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY not found in environment", nil)
)
// A EnvProvider retrieves credentials from the environment variables of the
// running process. Environment credentials never expire.
//
// Environment variables used:
//
// * Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY
//
// * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY
type EnvProvider struct {
retrieved bool
}
// NewEnvCredentials returns a pointer to a new Credentials object
// wrapping the environment variable provider.
func NewEnvCredentials() *Credentials {
return NewCredentials(&EnvProvider{})
}
// Retrieve retrieves the keys from the environment.
func (e *EnvProvider) Retrieve() (Value, error) {
e.retrieved = false
id := os.Getenv("AWS_ACCESS_KEY_ID")
if id == "" {
id = os.Getenv("AWS_ACCESS_KEY")
}
secret := os.Getenv("AWS_SECRET_ACCESS_KEY")
if secret == "" {
secret = os.Getenv("AWS_SECRET_KEY")
}
if id == "" {
return Value{ProviderName: EnvProviderName}, ErrAccessKeyIDNotFound
}
if secret == "" {
return Value{ProviderName: EnvProviderName}, ErrSecretAccessKeyNotFound
}
e.retrieved = true
return Value{
AccessKeyID: id,
SecretAccessKey: secret,
SessionToken: os.Getenv("AWS_SESSION_TOKEN"),
ProviderName: EnvProviderName,
}, nil
}
// IsExpired returns if the credentials have been retrieved.
func (e *EnvProvider) IsExpired() bool {
return !e.retrieved
}
| {
"pile_set_name": "Github"
} |
////////////////////////////////////////////////////////////////////////////
// Copyright 2017-2018 Computer Vision Group of State Key Lab at CAD&CG,
// Zhejiang University. All Rights Reserved.
//
// For more information see <https://github.com/ZJUCVG/ENFT-SfM>
// If you use this code, please cite the corresponding publications as
// listed on the above website.
//
// Permission to use, copy, modify and distribute this software and its
// documentation for educational, research and non-profit purposes only.
// Any modification based on this work must be open source and prohibited
// for commercial use.
// You must retain, in the source form of any derivative works that you
// distribute, all copyright, patent, trademark, and attribution notices
// from the source form of this work.
//
//
////////////////////////////////////////////////////////////////////////////
#ifndef _HOMOGRAPHY_ESTIMATOR_H_
#define _HOMOGRAPHY_ESTIMATOR_H_
#include "Estimation/Estimator.h"
#include "HomographyEstimatorData.h"
#include "Optimization/Optimizer.h"
#include "HomographySolver.h"
typedef HomographyEstimatorData HEData;
typedef FourMatches2D HEMinimalSample;
typedef HomographyEstimatorData HENonMinimalSample;
typedef Homography HEModel;
typedef HomographySolver HESolver;
typedef OptimizerTemplate<Homography, LA::AlignedVector8f, LA::AlignedMatrix8f> HEOptimizer;
typedef ushort HEIndex;
class HomographyEstimator : public Estimator<HEData, HEMinimalSample, HENonMinimalSample, HEModel, HESolver, HEOptimizer, HEIndex>
{
public:
HomographyEstimator(const float &errTh = 0) : Estimator<HEData, HEMinimalSample, HENonMinimalSample, HEModel, HESolver, HEOptimizer, HEIndex>(errTh) {}
virtual const ushort MinimalSampleSize() const
{
return 4;
}
virtual float epsilon_exp_m(const float &epsilon) const
{
float tmp = epsilon * epsilon;
return tmp * tmp;
}
virtual void DrawMinimalSample(const HEData &data, HEMinimalSample &sample) const
{
const ushort N = data.Size();
ushort i0 = Random::Generate(N);
sample.Set(data, i0, 0);
ushort i1 = Random::Generate(N);
while(i1 == i0)
i1 = Random::Generate(N);
sample.Set(data, i1, 1);
ushort i2 = Random::Generate(N);
while(i2 == i0 || i2 == i1)
i2 = Random::Generate(N);
sample.Set(data, i2, 2);
ushort i3 = Random::Generate(N);
while(i3 == i0 || i3 == i1 || i3 == i2)
i3 = Random::Generate(N);
sample.Set(data, i3, 3);
}
virtual void DrawMinimalSampleOrdered(const HEData &data, const std::vector<ushort> &orders, const ushort &n, const bool &sampleLastOne, HEMinimalSample &sample) const
{
ushort i0 = Random::Generate(n), i = orders[i0];
sample.Set(data, i, 0);
ushort i1 = Random::Generate(n);
while(i1 == i0)
i1 = Random::Generate(n);
i = orders[i1];
sample.Set(data, i, 1);
ushort i2 = Random::Generate(n);
while(i2 == i0 || i2 == i1)
i2 = Random::Generate(n);
i = orders[i2];
sample.Set(data, i, 2);
ushort i3 = n - 1;
if(!sampleLastOne || i3 == i0 || i3 == i1 || i3 == i2)
{
i3 = Random::Generate(n);
while(i3 == i0 || i3 == i1 || i3 == i2)
i3 = Random::Generate(n);
}
i = orders[i3];
sample.Set(data, i, 3);
}
//virtual void SampleOneDatum(const HEData &data, HEMinimalSample &sample, const ushort &iSrc, const ushort &iDst) const
//{
// sample.Set(data, iSrc, iDst);
//}
virtual void GenerateModels(HEMinimalSample &sample, AlignedVector<HEModel> &models)
{
models.Resize(1);
m_work.Resize(3);
sample.Normalize(m_work.Data());
if(m_solver.Run(sample, models[0], m_work))
models[0].Denormalize(sample.mean_u1v1u2v2(), sample.scale1(), sample.scale2(), m_work.Data());
else
models.Resize(0);
}
virtual void GenerateModels(HENonMinimalSample &sample, AlignedVector<HEModel> &models)
{
models.Resize(1);
m_work.Resize(6);
sample.Normalize(m_work.Data());
if(m_solver.Run(sample, models[0], m_work))
models[0].Denormalize(sample.mean_u1v1u2v2(), sample.scale1(), sample.scale2(), m_work.Data());
else
models.Resize(0);
}
virtual void VerifyModel(const HEData &data, const HEModel &model, const std::vector<bool> &inlierMarks, double &fitErr)
{
m_work.Resize(12);
ENFT_SSE::__m128 &errSq = m_work[0];
ENFT_SSE::__m128 *H = &m_work[1], *work = &m_work[10];
model.GetSSE(H);
fitErr = 0;
const ushort nPacks = data.GetPacksNumber();
for(ushort i = 0, ip1 = 1, ip2 = 2, ip3 = 3; i < nPacks; i += 4, ip1 += 4, ip2 += 4, ip3 += 4)
{
Homography::ComputeSquaredError4(H, data.GetPack(i), data.GetPack(ip1), data.GetPack(ip2), data.GetPack(ip3), errSq, work);
if(inlierMarks[i])
fitErr += errSq.m128_f32[0];
if(inlierMarks[ip1])
fitErr += errSq.m128_f32[1];
if(inlierMarks[ip2])
fitErr += errSq.m128_f32[2];
if(inlierMarks[ip3])
fitErr += errSq.m128_f32[3];
}
for(ushort iRem = 0; iRem < data.GetRemindersNumber(); ++iRem)
{
if(inlierMarks[nPacks + iRem])
fitErr += model.ComputeSquaredError(data.GetReminder1(iRem), data.GetReminder2(iRem), (float *) work);
}
}
virtual void VerifyModel(const HEData &data, const HEModel &model, std::vector<float> &errSqs)
{
m_work.Resize(12);
ENFT_SSE::__m128 &errSq = m_work[0];
ENFT_SSE::__m128 *H = &m_work[1], *work = &m_work[10];
model.GetSSE(H);
errSqs.resize(data.Size());
const ushort nPacks = data.GetPacksNumber();
for(ushort i = 0, ip1 = 1, ip2 = 2, ip3 = 3; i < nPacks; i += 4, ip1 += 4, ip2 += 4, ip3 += 4)
{
Homography::ComputeSquaredError4(H, data.GetPack(i), data.GetPack(ip1), data.GetPack(ip2), data.GetPack(ip3), errSq, work);
errSqs[i] = errSq.m128_f32[0];
errSqs[ip1] = errSq.m128_f32[1];
errSqs[ip2] = errSq.m128_f32[2];
errSqs[ip3] = errSq.m128_f32[3];
}
for(ushort iRem = 0; iRem < data.GetRemindersNumber(); ++iRem)
errSqs[nPacks + iRem] = model.ComputeSquaredError(data.GetReminder1(iRem), data.GetReminder2(iRem), (float *) work);
}
virtual void VerifyModel(const HEData &data, const HEModel &model, std::vector<ushort> &inliers, double &fitErr)
{
m_work.Resize(12);
ENFT_SSE::__m128 &errSq = m_work[0];
ENFT_SSE::__m128 *H = &m_work[1], *work = &m_work[10];
model.GetSSE(H);
inliers.resize(0);
fitErr = 0;
const ushort nPacks = data.GetPacksNumber();
for(ushort i = 0, ip1 = 1, ip2 = 2, ip3 = 3; i < nPacks; i += 4, ip1 += 4, ip2 += 4, ip3 += 4)
{
Homography::ComputeSquaredError4(H, data.GetPack(i), data.GetPack(ip1), data.GetPack(ip2), data.GetPack(ip3), errSq, work);
if(errSq.m128_f32[0] < m_ransacErrorThreshold)
{
inliers.push_back(i);
fitErr += errSq.m128_f32[0];
}
if(errSq.m128_f32[1] < m_ransacErrorThreshold)
{
inliers.push_back(ip1);
fitErr += errSq.m128_f32[1];
}
if(errSq.m128_f32[2] < m_ransacErrorThreshold)
{
inliers.push_back(ip2);
fitErr += errSq.m128_f32[2];
}
if(errSq.m128_f32[3] < m_ransacErrorThreshold)
{
inliers.push_back(ip3);
fitErr += errSq.m128_f32[3];
}
}
for(ushort iRem = 0; iRem < data.GetRemindersNumber(); ++iRem)
{
const float errSq = model.ComputeSquaredError(data.GetReminder1(iRem), data.GetReminder2(iRem), (float *) work);
if(errSq < m_ransacErrorThreshold)
{
inliers.push_back(nPacks + iRem);
fitErr += errSq;
}
}
}
virtual void VerifyModel(const HEData &data, const HEModel &model, const std::vector<bool> &inlierMarksFix, std::vector<ushort> &inliers, double &fitErr)
{
m_work.Resize(12);
ENFT_SSE::__m128 &errSq = m_work[0];
ENFT_SSE::__m128 *H = &m_work[1], *work = &m_work[10];
model.GetSSE(H);
inliers.resize(0);
fitErr = 0;
const ushort nPacks = data.GetPacksNumber();
for(ushort i = 0, ip1 = 1, ip2 = 2, ip3 = 3; i < nPacks; i += 4, ip1 += 4, ip2 += 4, ip3 += 4)
{
Homography::ComputeSquaredError4(H, data.GetPack(i), data.GetPack(ip1), data.GetPack(ip2), data.GetPack(ip3), errSq, work);
if(errSq.m128_f32[0] < m_ransacErrorThreshold)
{
inliers.push_back(i);
fitErr += errSq.m128_f32[0];
}
else if(inlierMarksFix[i])
{
inliers.resize(0);
fitErr = 0;
return;
}
if(errSq.m128_f32[1] < m_ransacErrorThreshold)
{
inliers.push_back(ip1);
fitErr += errSq.m128_f32[1];
}
else if(inlierMarksFix[ip1])
{
inliers.resize(0);
fitErr = 0;
return;
}
if(errSq.m128_f32[2] < m_ransacErrorThreshold)
{
inliers.push_back(ip2);
fitErr += errSq.m128_f32[2];
}
else if(inlierMarksFix[ip2])
{
inliers.resize(0);
fitErr = 0;
return;
}
if(errSq.m128_f32[3] < m_ransacErrorThreshold)
{
inliers.push_back(ip3);
fitErr += errSq.m128_f32[3];
}
else if(inlierMarksFix[ip3])
{
inliers.resize(0);
fitErr = 0;
return;
}
}
for(ushort iRem = 0; iRem < data.GetRemindersNumber(); ++iRem)
{
const float errSq = model.ComputeSquaredError(data.GetReminder1(iRem), data.GetReminder2(iRem), (float *) work);
if(errSq < m_ransacErrorThreshold)
{
inliers.push_back(nPacks + iRem);
fitErr += errSq;
}
else if(inlierMarksFix[nPacks + iRem])
{
inliers.resize(0);
fitErr = 0;
return;
}
}
}
virtual void OptimizeModel(HEData &data, HEModel &model, const ubyte verbose = 0)
{
m_optimizer.m_lmMaxNumIters = m_optimizeMaxNumIters;
m_optimizer.Run(data, model, verbose >= 2 ? verbose - 2 : 0);
}
};
#endif | {
"pile_set_name": "Github"
} |
{
"word": "Unintelligently",
"definitions": [
"In an unintelligent manner."
],
"parts-of-speech": "Adverb"
}
| {
"pile_set_name": "Github"
} |
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/compiler/xla/service/memory_space_assignment.h"
namespace xla {
namespace {
// Define a dummy chunk for chunks that will be allocated in the default memory
// space and for keeping track of number of asynchronous copies.
const HeapSimulator::Chunk kDummyChunk{-1, -1};
} // namespace
std::vector<const GlobalDecreasingSizeBestFitHeap::BufferInterval*>
AlternateMemoryBestFitHeap::GetSortedColocatedIntervals(
const GlobalDecreasingSizeBestFitHeap::BufferInterval& interval) const {
std::vector<const BufferInterval*> colocated_intervals;
std::vector<const BufferInterval*> worklist = {&interval};
while (!worklist.empty()) {
const BufferInterval* item = worklist.back();
worklist.pop_back();
colocated_intervals.push_back(item);
for (const HloValue* buffer_colocated : item->colocations) {
worklist.push_back(&buffer_intervals_.at(buffer_colocated));
}
}
absl::c_sort(colocated_intervals, [&](const BufferInterval* x,
const BufferInterval* y) {
return std::make_pair(x->start, x->end) < std::make_pair(y->start, y->end);
});
return colocated_intervals;
}
HeapSimulator::Result AlternateMemoryBestFitHeap::Finish() {
std::vector<BufferInterval> sorted_buffer_intervals =
GetSortedBufferIntervals();
VLOG(1) << "Assigning buffers to alternate memory. Max heap size = "
<< max_size_in_bytes_
<< ", min prefetch interval = " << min_prefetch_interval_
<< ", max prefetch interval = " << max_prefetch_interval_;
for (auto& interval : sorted_buffer_intervals) {
if (!interval.need_allocation) {
continue;
}
// Skip if we have already allocated for this buffer.
const HloBuffer& buffer =
alias_analysis_.GetBufferContainingValue(*interval.buffer);
if (allocation_map_->contains(&buffer)) {
continue;
}
// If the buffer is a tuple, don't use this algorithm for now. The buffers
// that are pointed to by the tuple will still use this algorithm.
// TODO(berkin): Because tuples are cheap to place in the alternate memory
// (they are just pointers) we don't need to use prefetch/evict logic.
if (buffer.values()[0]->shape().IsTuple()) {
VLOG(4) << "Keeping buffer " << buffer.ToString()
<< " in default mem because it is a tuple.";
continue;
}
auto colocated_intervals = GetSortedColocatedIntervals(interval);
bool keep_in_default_memory = false;
for (const BufferInterval* colocated_interval : colocated_intervals) {
const HloValue* value = colocated_interval->buffer;
// If any of the colocated values are phi buffers, we keep them in the
// default memory for now.
if (value->is_phi()) {
keep_in_default_memory = true;
VLOG(4) << "Keeping value " << value->ToShortString()
<< " because it contains a phi node.";
break;
}
}
MemorySpaceAssignment::AllocationSequence* allocation_sequence =
&(*allocation_map_)[&buffer];
// At this point, none of the colocated buffers contain any phi buffers.
for (const BufferInterval* colocated_interval : colocated_intervals) {
if (keep_in_default_memory) {
break;
}
const HloValue* value = colocated_interval->buffer;
int64 definition_time =
instruction_schedule_->at(value->defining_instruction());
// Sort the uses by the use time.
std::vector<HloUse> uses = value->uses();
absl::c_sort(uses, [&](HloUse use1, HloUse use2) {
return instruction_schedule_->at(use1.instruction) <
instruction_schedule_->at(use2.instruction);
});
// Iterate over the uses.
for (HloUse use : uses) {
int64 use_time = instruction_schedule_->at(use.instruction);
// Bitcasts don't define buffers and don't directly consume buffers.
// Skip allocating buffers for bitcast uses. The uses that feed from
// bitcasts will be handled specially.
if (use.instruction->opcode() != HloOpcode::kBitcast) {
if (!FindAllocation(definition_time, use_time,
value->defining_position(), use, value,
colocated_interval->size, allocation_sequence)) {
// If the allocation finding failed (e.g., due to running out of
// asynchronous copies), then fall back to allocating the buffer
// entirely in the default memory.
pending_chunks_.clear();
pending_async_copies_.clear();
allocation_sequence->clear();
keep_in_default_memory = true;
break;
}
// If there are multiple uses, they can try using the memory
// allocation already at the alternate memory.
definition_time = use_time;
}
}
}
CommitPendingChunks();
}
if (VLOG_IS_ON(3)) {
for (const auto& alloc_pair : *allocation_map_) {
VLOG(3) << "Allocation for " << alloc_pair.first->ToString();
for (const auto& alloc : alloc_pair.second) {
std::string addr_str = ": default";
if (alloc->memory_space() == MemorySpace::kAlternate) {
addr_str = absl::StrCat(": alt ", alloc->chunk().offset);
}
VLOG(3) << " " << alloc->start_time() << "-" << alloc->end_time()
<< addr_str << ", " << alloc->uses().size() << " uses";
}
}
}
return result_;
}
HloInstruction* AlternateMemoryBestFitHeap::GetInstructionAt(int64 time) const {
return flattened_instruction_sequence_->instructions()[time];
}
void AlternateMemoryBestFitHeap::CommitPendingChunks() {
for (auto interval_and_chunk : pending_chunks_) {
VLOG(3) << "Committing chunk: " << interval_and_chunk.first.start << "-"
<< interval_and_chunk.first.end << " : ["
<< interval_and_chunk.second.chunk.offset << ", "
<< interval_and_chunk.second.chunk.size << "]";
CommitChunk(interval_and_chunk.first, interval_and_chunk.second);
}
pending_chunks_.clear();
// Also add the pending async copies to the interval tree.
if (max_outstanding_async_copies_ >= 0) {
for (auto interval : pending_async_copies_) {
async_copy_interval_tree_.Add(interval.first, interval.second,
kDummyChunk);
}
}
pending_async_copies_.clear();
}
void AlternateMemoryBestFitHeap::AddToPendingChunks(
const BufferInterval& buffer_interval,
const ChunkCandidate& chunk_candidate) {
pending_chunks_.emplace_back(buffer_interval, chunk_candidate);
}
bool AlternateMemoryBestFitHeap::FindAllocation(
int64 start_time, int64 end_time, HloPosition defining_position, HloUse use,
const HloValue* buffer, int64 size,
MemorySpaceAssignment::AllocationSequence* allocations) {
HloInstruction* operand =
use.instruction->mutable_operand(use.operand_number);
// If the operand is a bitcast, we look at bitcast's operand until we find a
// non-bitcast operand.
HloInstruction* non_bitcast_operand = operand;
while (non_bitcast_operand->opcode() == HloOpcode::kBitcast) {
non_bitcast_operand = non_bitcast_operand->mutable_operand(0);
}
// Create an alternate memory interval that starts at the earliest
// possible position, given by max_prefetch_interval.
BufferInterval alternate_mem_interval;
alternate_mem_interval.buffer = buffer;
alternate_mem_interval.size = size;
alternate_mem_interval.start =
std::max(start_time, end_time - max_prefetch_interval_);
alternate_mem_interval.end = end_time;
VLOG(2) << "Finding allocation for " << buffer->ToShortString() << " ("
<< start_time << ", " << end_time << "). Size = " << size
<< ", def pos = " << defining_position.ToString()
<< ", operand = " << operand->ToString()
<< (non_bitcast_operand != operand
? ", non_bitcast_operand = " + non_bitcast_operand->ToString()
: "");
CHECK_LT(start_time, end_time);
// First try keeping the allocation entirely in the alternate memory.
if (TryAllocatingInAlternateMemoryNoCopy(
start_time, end_time, defining_position, use, alternate_mem_interval,
non_bitcast_operand, allocations)) {
return true;
}
MemorySpaceAssignment::Allocation* prev_allocation = nullptr;
if (!allocations->empty()) {
prev_allocation = allocations->back().get();
}
// Since copies couldn't be removed, create an allocation in the default
// memory space.
if (prev_allocation != nullptr &&
prev_allocation->memory_space() == MemorySpace::kAlternate &&
prev_allocation->instruction() == non_bitcast_operand) {
// If there was an allocation for this HloValue that was in the alternate
// memory space, we also need to perform an eviction.
// TODO(berkin): For now evictions happen relative to the most recent
// allocation in the alternate memory. We can potentially start evictions
// earlier and end later.
VLOG(3) << "Evicting buffer at " << prev_allocation->chunk().offset << " ("
<< prev_allocation->start_time() << ", "
<< prev_allocation->end_time() << ")";
// See if this interval would violate the asynchronous copy limit.
if (!ViolatesMaximumOutstandingAsyncCopies(prev_allocation->start_time(),
prev_allocation->end_time())) {
AddAsyncCopy(*prev_allocation, MemorySpace::kDefault, kDummyChunk,
prev_allocation->start_time(), prev_allocation->end_time(),
allocations);
} else {
VLOG(3) << "This violates the maximum async copies.";
// If the original interval violated the limit, try sub-intervals within
// this interval.
bool eviction_scheduled = false;
for (int64 time = prev_allocation->start_time();
time <= prev_allocation->end_time(); ++time) {
VLOG(3) << "Try evicting (" << time << ", " << time << ")";
if (!ViolatesMaximumOutstandingAsyncCopies(time, time)) {
VLOG(3) << "Eviction successful.";
AddAsyncCopy(*prev_allocation, MemorySpace::kDefault, kDummyChunk,
time, time, allocations);
eviction_scheduled = true;
break;
}
}
if (!eviction_scheduled) {
// If the eviction couldn't be scheduled, then fail. This buffer will be
// kept in the default memory.
VLOG(3) << "Bailing: Could not evict " << use.ToString()
<< " because we hit the limit of maximum asynchronous copies "
<< "between "
<< GetInstructionAt(prev_allocation->start_time())->ToString()
<< " and "
<< GetInstructionAt(prev_allocation->end_time())->ToString();
return false;
}
}
} else if (prev_allocation != nullptr &&
prev_allocation->memory_space() == MemorySpace::kDefault &&
prev_allocation->instruction() == non_bitcast_operand) {
// If the previous allocation was in the default memory space and was
// defined by the same instruction, extend that. Otherwise, create a new
// allocation.
prev_allocation->Extend(end_time);
} else {
allocations->push_back(absl::make_unique<MemorySpaceAssignment::Allocation>(
non_bitcast_operand, defining_position, MemorySpace::kDefault,
kDummyChunk, start_time, end_time));
}
// Try partially placing the buffer in the alternate space. The time that is
// overlapped will be used to asynchronously copy the buffer from the
// default memory to the alternate memory.
//
// start end
// time time
// X---------------------X
// Alternate: +------+
// Default: +---------------------+
// ^ ^
// Copy Copy
// Start Done
for (alternate_mem_interval.start =
std::max(start_time, end_time - max_prefetch_interval_);
alternate_mem_interval.end - alternate_mem_interval.start >
min_prefetch_interval_;
++alternate_mem_interval.start) {
VLOG(4) << "Trying alternate memory allocation ("
<< alternate_mem_interval.start << ", "
<< alternate_mem_interval.end << ")";
// If this additional asynchronous copy would violate the limit, try a
// different interval.
if (ViolatesMaximumOutstandingAsyncCopies(alternate_mem_interval.start,
alternate_mem_interval.end)) {
VLOG(4) << "This would violate the outstanding async copy limit.";
continue;
}
ChunkCandidate chunk_candidate = FindChunkCandidate(alternate_mem_interval);
// Check if the new heap size fits within limits.
if (chunk_candidate.heap_size < max_size_in_bytes_) {
VLOG(3) << "Move the buffer to alternate memory at "
<< alternate_mem_interval.start
<< ". Offset = " << chunk_candidate.chunk.offset
<< ", size = " << chunk_candidate.chunk.size
<< ", heap_size = " << chunk_candidate.heap_size;
AddToPendingChunks(alternate_mem_interval, chunk_candidate);
AddAsyncCopy(*allocations->back().get(), MemorySpace::kAlternate,
chunk_candidate.chunk, alternate_mem_interval.start,
end_time, allocations);
allocations->back()->AddUse(use);
return true;
}
}
// If a copy wasn't inserted, then add this use to the latest allocation.
allocations->back()->AddUse(use);
return true;
}
void AlternateMemoryBestFitHeap::AddAsyncCopy(
const MemorySpaceAssignment::Allocation& prev_allocation,
MemorySpace memory_space, Chunk chunk, int64 start_time, int64 end_time,
MemorySpaceAssignment::AllocationSequence* allocations) {
HloInstruction* earliest_instruction = GetInstructionAt(start_time);
HloInstruction* latest_instruction = GetInstructionAt(end_time);
VLOG(3) << "Copy to "
<< (memory_space == MemorySpaceAssignment::MemorySpace::kDefault
? "default"
: "alternate")
<< " memory between instructions " << earliest_instruction->ToString()
<< " - " << latest_instruction->ToString();
allocations->push_back(
absl::make_unique<MemorySpaceAssignment::CopyAllocation>(
prev_allocation, memory_space, chunk, start_time, end_time,
earliest_instruction, latest_instruction));
// Register the additional async copy with the interval tree to keep track of
// the limit at any given time.
pending_async_copies_.emplace_back(start_time, end_time);
}
bool AlternateMemoryBestFitHeap::ViolatesMaximumOutstandingAsyncCopies(
int64 start_time, int64 end_time) const {
if (max_outstanding_async_copies_ < 0) {
return false;
}
// Count both the asynchronous copies in the interval tree as well as the
// pending asynchronous copies belonging to this buffer.
int64 num_async_copies =
async_copy_interval_tree_.ChunksOverlappingInTime(start_time, end_time)
.size();
for (auto interval : pending_async_copies_) {
if (interval.second > start_time && interval.first < end_time) {
num_async_copies++;
}
}
// Add one because we are checking if adding an additional asynchronous copy
// would violate the limit.
return num_async_copies + 1 > max_outstanding_async_copies_;
}
bool AlternateMemoryBestFitHeap::TryAllocatingInAlternateMemoryNoCopy(
int64 start_time, int64 end_time, HloPosition defining_position, HloUse use,
BufferInterval alternate_mem_interval, HloInstruction* non_bitcast_operand,
MemorySpaceAssignment::AllocationSequence* allocations) {
MemorySpaceAssignment::Allocation* prev_allocation = nullptr;
bool can_eliminate_copy = false;
if (allocations->empty()) {
// There hasn't been any allocations for this interval so far. We can
// eliminate copy if the value can be placed in the alternate memory.
can_eliminate_copy =
is_allowed_in_alternate_mem_(*alternate_mem_interval.buffer);
} else {
// If there has been a previous allocation, we can eliminate the copy if the
// previous allocation was also in the alternate memory.
prev_allocation = allocations->back().get();
can_eliminate_copy =
(prev_allocation->memory_space() == MemorySpace::kAlternate);
}
if (!can_eliminate_copy) {
return false;
}
if (alternate_mem_interval.start != start_time) {
return false;
}
// Prefer the offset that was previously used for the previous allocation.
int64 preferred_offset = -1;
if (prev_allocation != nullptr) {
preferred_offset = prev_allocation->chunk().offset;
// If there is a previous allocation, set the start time one after the end
// of the previous allocation's end.
alternate_mem_interval.start = prev_allocation->end_time() + 1;
}
VLOG(4) << "We can eliminate copy to alternate memory. Preferred offset = "
<< preferred_offset;
ChunkCandidate chunk_candidate =
FindChunkCandidate(alternate_mem_interval, preferred_offset);
// Check if the new heap size fits within limits. Also ensure if a
// preferred offset was provided, that offset was used.
if (chunk_candidate.heap_size < max_size_in_bytes_ &&
(preferred_offset == -1 ||
preferred_offset == chunk_candidate.chunk.offset)) {
VLOG(3) << "Keep the buffer in alternate memory. Offset = "
<< chunk_candidate.chunk.offset
<< ", size = " << chunk_candidate.chunk.size
<< ", heap_size = " << chunk_candidate.heap_size;
AddToPendingChunks(alternate_mem_interval, chunk_candidate);
// If there was a previous allocation, the buffer location is the
// same as the previous. Otherwise, it is the operand.
if (prev_allocation != nullptr &&
prev_allocation->instruction() == non_bitcast_operand) {
prev_allocation->Extend(end_time);
} else {
allocations->push_back(
absl::make_unique<MemorySpaceAssignment::Allocation>(
non_bitcast_operand, defining_position, MemorySpace::kAlternate,
chunk_candidate.chunk, start_time, end_time));
}
allocations->back()->AddUse(use);
return true;
}
return false;
}
/*static*/ int64 MemorySpaceAssignment::CountMaximumOutstandingAsyncCopies(
const HloModule& module) {
int64 max_copies = 0;
int64 current_copies = 0;
for (HloInstruction* instruction :
module.schedule().sequence(module.entry_computation()).instructions()) {
if (instruction->opcode() == HloOpcode::kCopyStart) {
current_copies++;
} else if (instruction->opcode() == HloOpcode::kCopyDone) {
current_copies--;
}
max_copies = std::max(max_copies, current_copies);
}
return max_copies;
}
/*static*/ StatusOr<std::unique_ptr<PresetAssignments>>
MemorySpaceAssignment::Run(
HloModule* module, int64 alternate_memory_space, int64 max_size_in_bytes,
int64 min_prefetch_interval, int64 max_prefetch_interval,
int64 alternate_memory_space_alignment_in_bytes,
BufferValue::SizeFunction size_fn,
AlternateMemoryBestFitHeap::IsAllowedInAlternateMemoryFunction
is_allowed_in_alternate_mem,
int64 max_outstanding_async_copies) {
CHECK(module->has_schedule());
VLOG(4) << "Module before memory space assignment: ";
XLA_VLOG_LINES(4, module->ToString());
VLOG(4) << "Schedule: " << module->schedule().ToString();
TF_ASSIGN_OR_RETURN(auto alias_analysis, HloAliasAnalysis::Run(module));
MemorySpaceAssignment memory_space_assignment(module, alternate_memory_space);
// TODO(berkin): Explore heap algorithms other than kSpatial.
auto algorithm = absl::make_unique<AlternateMemoryBestFitHeap>(
&memory_space_assignment.allocation_map_, max_size_in_bytes,
min_prefetch_interval, max_prefetch_interval, *alias_analysis,
alternate_memory_space_alignment_in_bytes,
GlobalDecreasingSizeBestFitHeap::Type::kSpatial,
is_allowed_in_alternate_mem, max_outstanding_async_copies);
TF_RETURN_IF_ERROR(HeapSimulator::Run(std::move(algorithm), *module,
module->schedule(),
*alias_analysis.get(), size_fn)
.status());
TF_RETURN_IF_ERROR(memory_space_assignment.Process());
TF_RETURN_IF_ERROR(memory_space_assignment.FixSchedule());
VLOG(4) << "Module after memory space assignment: ";
XLA_VLOG_LINES(4, module->ToString());
TF_CHECK_OK(module->schedule().Verify());
VLOG(1) << "Maximum number of outstanding async copies: "
<< CountMaximumOutstandingAsyncCopies(*module);
return std::move(memory_space_assignment.preset_assignments_);
}
void MemorySpaceAssignment::Allocation::AddUse(HloUse use) {
HloInstruction* operand =
use.instruction->mutable_operand(use.operand_number);
// When the operand of a use is a bitcast, we place the bitcast in a separate
// data structure.
if (operand->opcode() == HloOpcode::kBitcast) {
bitcasts_.push_back(operand);
} else {
uses_.push_back(use);
}
}
Status MemorySpaceAssignment::Allocation::PropagateMemorySpaceToBitcasts(
const MemorySpaceAssignment& memory_space_assignment) {
for (HloInstruction* bitcast : bitcasts_) {
if (memory_space_ == MemorySpace::kAlternate) {
Layout* bitcast_layout = bitcast->mutable_shape()->mutable_layout();
bitcast_layout->set_memory_space(
memory_space_assignment.alternate_memory_space_);
}
}
return Status::OK();
}
Status MemorySpaceAssignment::Allocation::Process(
MemorySpaceAssignment* memory_space_assignment) {
// For non-copy allocations, all we need to do is to update the output memory
// space if placed in the alternate memory.
if (memory_space_ == MemorySpace::kAlternate) {
Layout* layout = instruction_->mutable_shape()->mutable_layout();
layout->set_memory_space(memory_space_assignment->alternate_memory_space_);
}
TF_RETURN_IF_ERROR(PropagateMemorySpaceToBitcasts(*memory_space_assignment));
return Status::OK();
}
Status MemorySpaceAssignment::CopyAllocation::Process(
MemorySpaceAssignment* memory_space_assignment) {
// Copy allocations need to insert asynchronous copy nodes.
HloInstruction* producing_instruction = instruction();
CHECK_NE(producing_instruction, nullptr);
Shape shape = producing_instruction->shape();
HloComputation* computation = producing_instruction->parent();
// Set the layout to include the memory space.
Layout* layout = shape.mutable_layout();
if (memory_space_ == MemorySpace::kAlternate) {
layout->set_memory_space(memory_space_assignment->alternate_memory_space_);
} else {
layout->set_memory_space(0);
}
HloInstruction* copy_start =
computation->AddInstruction(HloInstruction::CreateUnary(
ShapeUtil::MakeTupleShape({shape, ShapeUtil::MakeShape(U32, {})}),
HloOpcode::kCopyStart, producing_instruction));
HloInstruction* copy_done = computation->AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kCopyDone, copy_start));
// Update the allocation with the copy done instruction so that if there
// are further copies from it, it can find the correct instruction.
instruction_ = copy_done;
// Also update the defining position. Note that the output of CopyDone is
// actually defined in the item {0} of CopyStart.
defining_position_ = HloPosition{copy_start, {0}};
// Replace all the uses with the new copy instruction.
for (HloUse use : uses_) {
TF_RETURN_IF_ERROR(
use.instruction->ReplaceOperandWith(use.operand_number, copy_done));
}
// Replace all the bitcasts with the new copy instruction. Note that if there
// is a chain of bitcasts, their operands will be replaced with copy done.
// For example:
//
// a = Foo()
// b = Bitcast(a)
// c = Bitcast(b)
//
// If a is moved to the alternate memory asynchronously, the graph will be
// changed into:
//
// a = Foo()
// cs = CopyStart(a)
// cd = CopyDone(cs)
// b = Bitcast(cd)
// c = Bitcast(cd)
//
// Because of the potential shape change in the operand (b -> cd), we use
// ReplaceOperandWithDifferentShape.
for (HloInstruction* bitcast : bitcasts_) {
TF_RETURN_IF_ERROR(bitcast->ReplaceOperandWithDifferentShape(
/*operand_num=*/0, instruction_));
}
// Propagate the memory space to all bitcasts.
TF_RETURN_IF_ERROR(PropagateMemorySpaceToBitcasts(*memory_space_assignment));
// Insert the new instructions at the appropriate places in the schedule.
// FixSchedule will process the maps to actually insert them.
memory_space_assignment->ScheduleAsynchronousCopy(
copy_start, copy_start_schedule_after_, copy_done,
copy_done_schedule_before_);
return Status::OK();
}
Status MemorySpaceAssignment::Process() {
// Insert CopyStart/CopyDone pairs.
int64 alternate_memory_size = 0;
for (auto& buffer_and_sequence : allocation_map_) {
for (auto& allocation : buffer_and_sequence.second) {
TF_RETURN_IF_ERROR(allocation->Process(this));
// Add the offset and size of the allocation in the alternate memory to
// the output map. Special case for bitcast: since bitcast doesn't define
// its own buffer, that shouldn't be exported as a preset chunk.
if (allocation->memory_space() == MemorySpace::kAlternate &&
allocation->instruction()->opcode() != HloOpcode::kBitcast) {
preset_assignments_->add_chunk(allocation->defining_position(),
allocation->chunk());
alternate_memory_size =
std::max(alternate_memory_size, allocation->chunk().chunk_end());
}
}
}
if (!preset_assignments_->chunks().empty()) {
preset_assignments_->add_size(alternate_memory_space_,
alternate_memory_size);
}
if (VLOG_IS_ON(3)) {
VLOG(3) << "Exported alternate memory allocations:";
for (auto& pair : preset_assignments_->chunks()) {
VLOG(3) << " [" << pair.second.offset << ", " << pair.second.size
<< "] : " << pair.first.ToString();
}
VLOG(3) << "Exported alternate memory sizes:";
for (auto& pair : preset_assignments_->sizes()) {
VLOG(3) << " space: " << pair.first << ", size: " << pair.second;
}
}
return Status::OK();
}
void MemorySpaceAssignment::ScheduleAsynchronousCopy(
HloInstruction* copy_start, HloInstruction* copy_start_schedule_after,
HloInstruction* copy_done, HloInstruction* copy_done_schedule_before) {
schedule_after_[copy_start_schedule_after].push_back(copy_start);
schedule_before_[copy_done_schedule_before].push_back(copy_done);
}
void MemorySpaceAssignment::EnsureInstructionAndOperandsInserted(
HloInstruction* new_instruction, HloInstructionSequence* new_sequence,
absl::flat_hash_set<HloInstruction*>* inserted_instructions) const {
if (inserted_instructions->contains(new_instruction)) {
return;
}
for (HloInstruction* operand : new_instruction->operands()) {
EnsureInstructionAndOperandsInserted(operand, new_sequence,
inserted_instructions);
}
VLOG(4) << "inserting: " << new_instruction->ToString();
new_sequence->push_back(new_instruction);
inserted_instructions->insert(new_instruction);
}
Status MemorySpaceAssignment::FixSchedule() {
CHECK(module_->has_schedule());
HloSchedule& schedule = module_->schedule();
for (const HloComputation* computation :
module_->MakeNonfusionComputations()) {
CHECK(schedule.is_computation_scheduled(computation));
const HloInstructionSequence& sequence = schedule.sequence(computation);
HloInstructionSequence new_sequence;
absl::flat_hash_set<HloInstruction*> inserted_instructions;
for (HloInstruction* instruction : sequence.instructions()) {
auto insts_before_iter = schedule_before_.find(instruction);
if (insts_before_iter != schedule_before_.end()) {
for (HloInstruction* new_instruction : insts_before_iter->second) {
EnsureInstructionAndOperandsInserted(new_instruction, &new_sequence,
&inserted_instructions);
}
}
// Insert only if not previously inserted.
if (!inserted_instructions.contains(instruction)) {
EnsureInstructionAndOperandsInserted(instruction, &new_sequence,
&inserted_instructions);
}
auto insts_after_iter = schedule_after_.find(instruction);
if (insts_after_iter != schedule_after_.end()) {
for (HloInstruction* new_instruction : insts_after_iter->second) {
EnsureInstructionAndOperandsInserted(new_instruction, &new_sequence,
&inserted_instructions);
}
}
}
schedule.set_sequence(computation, new_sequence);
}
return Status::OK();
}
} // namespace xla
| {
"pile_set_name": "Github"
} |
CREATE TABLE "plugin_sms_member" (
"id_member" INTEGER PRIMARY KEY AUTOINCREMENT,
"phone_number" TEXT NOT NULL,
"reg_date" DATETIME NOT NULL
); | {
"pile_set_name": "Github"
} |
package frontdoor
// Copyright (c) Microsoft and contributors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
import (
"context"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/autorest/validation"
"github.com/Azure/go-autorest/tracing"
"net/http"
)
// ExperimentsClient is the frontDoor Client
type ExperimentsClient struct {
BaseClient
}
// NewExperimentsClient creates an instance of the ExperimentsClient client.
func NewExperimentsClient(subscriptionID string) ExperimentsClient {
return NewExperimentsClientWithBaseURI(DefaultBaseURI, subscriptionID)
}
// NewExperimentsClientWithBaseURI creates an instance of the ExperimentsClient client using a custom endpoint. Use
// this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack).
func NewExperimentsClientWithBaseURI(baseURI string, subscriptionID string) ExperimentsClient {
return ExperimentsClient{NewWithBaseURI(baseURI, subscriptionID)}
}
// CreateOrUpdate sends the create or update request.
// Parameters:
// resourceGroupName - name of the Resource group within the Azure subscription.
// profileName - the Profile identifier associated with the Tenant and Partner
// experimentName - the Experiment identifier associated with the Experiment
// parameters - the Experiment resource
func (client ExperimentsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, profileName string, experimentName string, parameters Experiment) (result ExperimentsCreateOrUpdateFuture, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/ExperimentsClient.CreateOrUpdate")
defer func() {
sc := -1
if result.Response() != nil {
sc = result.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 80, Chain: nil},
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9_\-\(\)\.]*[^\.]$`, Chain: nil}}},
{TargetValue: profileName,
Constraints: []validation.Constraint{{Target: "profileName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9_\-\(\)\.]*[^\.]$`, Chain: nil}}},
{TargetValue: experimentName,
Constraints: []validation.Constraint{{Target: "experimentName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9_\-\(\)\.]*[^\.]$`, Chain: nil}}}}); err != nil {
return result, validation.NewError("frontdoor.ExperimentsClient", "CreateOrUpdate", err.Error())
}
req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, profileName, experimentName, parameters)
if err != nil {
err = autorest.NewErrorWithError(err, "frontdoor.ExperimentsClient", "CreateOrUpdate", nil, "Failure preparing request")
return
}
result, err = client.CreateOrUpdateSender(req)
if err != nil {
err = autorest.NewErrorWithError(err, "frontdoor.ExperimentsClient", "CreateOrUpdate", result.Response(), "Failure sending request")
return
}
return
}
// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
func (client ExperimentsClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, profileName string, experimentName string, parameters Experiment) (*http.Request, error) {
pathParameters := map[string]interface{}{
"experimentName": autorest.Encode("path", experimentName),
"profileName": autorest.Encode("path", profileName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2019-11-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsContentType("application/json; charset=utf-8"),
autorest.AsPut(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/NetworkExperimentProfiles/{profileName}/Experiments/{experimentName}", pathParameters),
autorest.WithJSON(parameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
// http.Response Body if it receives an error.
func (client ExperimentsClient) CreateOrUpdateSender(req *http.Request) (future ExperimentsCreateOrUpdateFuture, err error) {
var resp *http.Response
resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
future.Future, err = azure.NewFutureFromResponse(resp)
return
}
// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
// closes the http.Response Body.
func (client ExperimentsClient) CreateOrUpdateResponder(resp *http.Response) (result Experiment, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated, http.StatusAccepted),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// Delete sends the delete request.
// Parameters:
// resourceGroupName - name of the Resource group within the Azure subscription.
// profileName - the Profile identifier associated with the Tenant and Partner
// experimentName - the Experiment identifier associated with the Experiment
func (client ExperimentsClient) Delete(ctx context.Context, resourceGroupName string, profileName string, experimentName string) (result ExperimentsDeleteFuture, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/ExperimentsClient.Delete")
defer func() {
sc := -1
if result.Response() != nil {
sc = result.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 80, Chain: nil},
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9_\-\(\)\.]*[^\.]$`, Chain: nil}}},
{TargetValue: profileName,
Constraints: []validation.Constraint{{Target: "profileName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9_\-\(\)\.]*[^\.]$`, Chain: nil}}},
{TargetValue: experimentName,
Constraints: []validation.Constraint{{Target: "experimentName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9_\-\(\)\.]*[^\.]$`, Chain: nil}}}}); err != nil {
return result, validation.NewError("frontdoor.ExperimentsClient", "Delete", err.Error())
}
req, err := client.DeletePreparer(ctx, resourceGroupName, profileName, experimentName)
if err != nil {
err = autorest.NewErrorWithError(err, "frontdoor.ExperimentsClient", "Delete", nil, "Failure preparing request")
return
}
result, err = client.DeleteSender(req)
if err != nil {
err = autorest.NewErrorWithError(err, "frontdoor.ExperimentsClient", "Delete", result.Response(), "Failure sending request")
return
}
return
}
// DeletePreparer prepares the Delete request.
func (client ExperimentsClient) DeletePreparer(ctx context.Context, resourceGroupName string, profileName string, experimentName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"experimentName": autorest.Encode("path", experimentName),
"profileName": autorest.Encode("path", profileName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2019-11-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsDelete(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/NetworkExperimentProfiles/{profileName}/Experiments/{experimentName}", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// DeleteSender sends the Delete request. The method will close the
// http.Response Body if it receives an error.
func (client ExperimentsClient) DeleteSender(req *http.Request) (future ExperimentsDeleteFuture, err error) {
var resp *http.Response
resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
future.Future, err = azure.NewFutureFromResponse(resp)
return
}
// DeleteResponder handles the response to the Delete request. The method always
// closes the http.Response Body.
func (client ExperimentsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent),
autorest.ByClosing())
result.Response = resp
return
}
// Get sends the get request.
// Parameters:
// resourceGroupName - name of the Resource group within the Azure subscription.
// profileName - the Profile identifier associated with the Tenant and Partner
// experimentName - the Experiment identifier associated with the Experiment
func (client ExperimentsClient) Get(ctx context.Context, resourceGroupName string, profileName string, experimentName string) (result Experiment, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/ExperimentsClient.Get")
defer func() {
sc := -1
if result.Response.Response != nil {
sc = result.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 80, Chain: nil},
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9_\-\(\)\.]*[^\.]$`, Chain: nil}}},
{TargetValue: profileName,
Constraints: []validation.Constraint{{Target: "profileName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9_\-\(\)\.]*[^\.]$`, Chain: nil}}},
{TargetValue: experimentName,
Constraints: []validation.Constraint{{Target: "experimentName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9_\-\(\)\.]*[^\.]$`, Chain: nil}}}}); err != nil {
return result, validation.NewError("frontdoor.ExperimentsClient", "Get", err.Error())
}
req, err := client.GetPreparer(ctx, resourceGroupName, profileName, experimentName)
if err != nil {
err = autorest.NewErrorWithError(err, "frontdoor.ExperimentsClient", "Get", nil, "Failure preparing request")
return
}
resp, err := client.GetSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "frontdoor.ExperimentsClient", "Get", resp, "Failure sending request")
return
}
result, err = client.GetResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "frontdoor.ExperimentsClient", "Get", resp, "Failure responding to request")
}
return
}
// GetPreparer prepares the Get request.
func (client ExperimentsClient) GetPreparer(ctx context.Context, resourceGroupName string, profileName string, experimentName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"experimentName": autorest.Encode("path", experimentName),
"profileName": autorest.Encode("path", profileName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2019-11-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/NetworkExperimentProfiles/{profileName}/Experiments/{experimentName}", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// GetSender sends the Get request. The method will close the
// http.Response Body if it receives an error.
func (client ExperimentsClient) GetSender(req *http.Request) (*http.Response, error) {
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// GetResponder handles the response to the Get request. The method always
// closes the http.Response Body.
func (client ExperimentsClient) GetResponder(resp *http.Response) (result Experiment, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// ListByProfile sends the list by profile request.
// Parameters:
// resourceGroupName - name of the Resource group within the Azure subscription.
// profileName - the Profile identifier associated with the Tenant and Partner
func (client ExperimentsClient) ListByProfile(ctx context.Context, resourceGroupName string, profileName string) (result ExperimentListPage, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/ExperimentsClient.ListByProfile")
defer func() {
sc := -1
if result.el.Response.Response != nil {
sc = result.el.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 80, Chain: nil},
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9_\-\(\)\.]*[^\.]$`, Chain: nil}}},
{TargetValue: profileName,
Constraints: []validation.Constraint{{Target: "profileName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9_\-\(\)\.]*[^\.]$`, Chain: nil}}}}); err != nil {
return result, validation.NewError("frontdoor.ExperimentsClient", "ListByProfile", err.Error())
}
result.fn = client.listByProfileNextResults
req, err := client.ListByProfilePreparer(ctx, resourceGroupName, profileName)
if err != nil {
err = autorest.NewErrorWithError(err, "frontdoor.ExperimentsClient", "ListByProfile", nil, "Failure preparing request")
return
}
resp, err := client.ListByProfileSender(req)
if err != nil {
result.el.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "frontdoor.ExperimentsClient", "ListByProfile", resp, "Failure sending request")
return
}
result.el, err = client.ListByProfileResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "frontdoor.ExperimentsClient", "ListByProfile", resp, "Failure responding to request")
}
return
}
// ListByProfilePreparer prepares the ListByProfile request.
func (client ExperimentsClient) ListByProfilePreparer(ctx context.Context, resourceGroupName string, profileName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"profileName": autorest.Encode("path", profileName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2019-11-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/NetworkExperimentProfiles/{profileName}/Experiments", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// ListByProfileSender sends the ListByProfile request. The method will close the
// http.Response Body if it receives an error.
func (client ExperimentsClient) ListByProfileSender(req *http.Request) (*http.Response, error) {
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// ListByProfileResponder handles the response to the ListByProfile request. The method always
// closes the http.Response Body.
func (client ExperimentsClient) ListByProfileResponder(resp *http.Response) (result ExperimentList, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// listByProfileNextResults retrieves the next set of results, if any.
func (client ExperimentsClient) listByProfileNextResults(ctx context.Context, lastResults ExperimentList) (result ExperimentList, err error) {
req, err := lastResults.experimentListPreparer(ctx)
if err != nil {
return result, autorest.NewErrorWithError(err, "frontdoor.ExperimentsClient", "listByProfileNextResults", nil, "Failure preparing next results request")
}
if req == nil {
return
}
resp, err := client.ListByProfileSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
return result, autorest.NewErrorWithError(err, "frontdoor.ExperimentsClient", "listByProfileNextResults", resp, "Failure sending next results request")
}
result, err = client.ListByProfileResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "frontdoor.ExperimentsClient", "listByProfileNextResults", resp, "Failure responding to next results request")
}
return
}
// ListByProfileComplete enumerates all values, automatically crossing page boundaries as required.
func (client ExperimentsClient) ListByProfileComplete(ctx context.Context, resourceGroupName string, profileName string) (result ExperimentListIterator, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/ExperimentsClient.ListByProfile")
defer func() {
sc := -1
if result.Response().Response.Response != nil {
sc = result.page.Response().Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
result.page, err = client.ListByProfile(ctx, resourceGroupName, profileName)
return
}
// Update updates an Experiment
// Parameters:
// resourceGroupName - name of the Resource group within the Azure subscription.
// profileName - the Profile identifier associated with the Tenant and Partner
// experimentName - the Experiment identifier associated with the Experiment
// parameters - the Experiment Update Model
func (client ExperimentsClient) Update(ctx context.Context, resourceGroupName string, profileName string, experimentName string, parameters ExperimentUpdateModel) (result ExperimentsUpdateFuture, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/ExperimentsClient.Update")
defer func() {
sc := -1
if result.Response() != nil {
sc = result.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 80, Chain: nil},
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9_\-\(\)\.]*[^\.]$`, Chain: nil}}},
{TargetValue: profileName,
Constraints: []validation.Constraint{{Target: "profileName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9_\-\(\)\.]*[^\.]$`, Chain: nil}}},
{TargetValue: experimentName,
Constraints: []validation.Constraint{{Target: "experimentName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9_\-\(\)\.]*[^\.]$`, Chain: nil}}}}); err != nil {
return result, validation.NewError("frontdoor.ExperimentsClient", "Update", err.Error())
}
req, err := client.UpdatePreparer(ctx, resourceGroupName, profileName, experimentName, parameters)
if err != nil {
err = autorest.NewErrorWithError(err, "frontdoor.ExperimentsClient", "Update", nil, "Failure preparing request")
return
}
result, err = client.UpdateSender(req)
if err != nil {
err = autorest.NewErrorWithError(err, "frontdoor.ExperimentsClient", "Update", result.Response(), "Failure sending request")
return
}
return
}
// UpdatePreparer prepares the Update request.
func (client ExperimentsClient) UpdatePreparer(ctx context.Context, resourceGroupName string, profileName string, experimentName string, parameters ExperimentUpdateModel) (*http.Request, error) {
pathParameters := map[string]interface{}{
"experimentName": autorest.Encode("path", experimentName),
"profileName": autorest.Encode("path", profileName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2019-11-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsContentType("application/json; charset=utf-8"),
autorest.AsPatch(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/NetworkExperimentProfiles/{profileName}/Experiments/{experimentName}", pathParameters),
autorest.WithJSON(parameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// UpdateSender sends the Update request. The method will close the
// http.Response Body if it receives an error.
func (client ExperimentsClient) UpdateSender(req *http.Request) (future ExperimentsUpdateFuture, err error) {
var resp *http.Response
resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
future.Future, err = azure.NewFutureFromResponse(resp)
return
}
// UpdateResponder handles the response to the Update request. The method always
// closes the http.Response Body.
func (client ExperimentsClient) UpdateResponder(resp *http.Response) (result Experiment, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
| {
"pile_set_name": "Github"
} |
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package generators
import (
"fmt"
"os"
"path"
"strings"
"k8s.io/klog"
clientgentypes "k8s.io/code-generator/cmd/client-gen/types"
"k8s.io/gengo/args"
"k8s.io/gengo/generator"
"k8s.io/gengo/namer"
"k8s.io/gengo/types"
)
// NameSystems returns the name system used by the generators in this package.
func NameSystems() namer.NameSystems {
return namer.NameSystems{}
}
// DefaultNameSystem returns the default name system for ordering the types to be
// processed by the generators in this package.
func DefaultNameSystem() string {
return "public"
}
// Packages makes packages to generate.
func Packages(context *generator.Context, arguments *args.GeneratorArgs) generator.Packages {
boilerplate, err := arguments.LoadGoBoilerplate()
if err != nil {
klog.Fatalf("Failed loading boilerplate: %v", err)
}
packages := generator.Packages{}
for _, inputDir := range arguments.InputDirs {
pkg := context.Universe.Package(inputDir)
internal, err := isInternal(pkg)
if err != nil {
klog.V(5).Infof("skipping the generation of %s file, due to err %v", arguments.OutputFileBaseName, err)
continue
}
if internal {
klog.V(5).Infof("skipping the generation of %s file because %s package contains internal types, note that internal types don't have \"json\" tags", arguments.OutputFileBaseName, pkg.Name)
continue
}
registerFileName := "register.go"
searchPath := path.Join(args.DefaultSourceTree(), inputDir, registerFileName)
if _, err := os.Stat(path.Join(searchPath)); err == nil {
klog.V(5).Infof("skipping the generation of %s file because %s already exists in the path %s", arguments.OutputFileBaseName, registerFileName, searchPath)
continue
} else if err != nil && !os.IsNotExist(err) {
klog.Fatalf("an error %v has occurred while checking if %s exists", err, registerFileName)
}
gv := clientgentypes.GroupVersion{}
{
pathParts := strings.Split(pkg.Path, "/")
if len(pathParts) < 2 {
klog.Errorf("the path of the package must contain the group name and the version, path = %s", pkg.Path)
continue
}
gv.Group = clientgentypes.Group(pathParts[len(pathParts)-2])
gv.Version = clientgentypes.Version(pathParts[len(pathParts)-1])
// if there is a comment of the form "// +groupName=somegroup" or "// +groupName=somegroup.foo.bar.io",
// extract the fully qualified API group name from it and overwrite the group inferred from the package path
if override := types.ExtractCommentTags("+", pkg.DocComments)["groupName"]; override != nil {
groupName := override[0]
klog.V(5).Infof("overriding the group name with = %s", groupName)
gv.Group = clientgentypes.Group(groupName)
}
}
typesToRegister := []*types.Type{}
for _, t := range pkg.Types {
klog.V(5).Infof("considering type = %s", t.Name.String())
for _, typeMember := range t.Members {
if typeMember.Name == "TypeMeta" && typeMember.Embedded == true {
typesToRegister = append(typesToRegister, t)
}
}
}
packages = append(packages,
&generator.DefaultPackage{
PackageName: pkg.Name,
PackagePath: pkg.Path,
HeaderText: boilerplate,
GeneratorFunc: func(c *generator.Context) (generators []generator.Generator) {
return []generator.Generator{
®isterExternalGenerator{
DefaultGen: generator.DefaultGen{
OptionalName: arguments.OutputFileBaseName,
},
gv: gv,
typesToGenerate: typesToRegister,
outputPackage: pkg.Path,
imports: generator.NewImportTracker(),
},
}
},
})
}
return packages
}
// isInternal determines whether the given package
// contains the internal types or not
func isInternal(p *types.Package) (bool, error) {
for _, t := range p.Types {
for _, member := range t.Members {
if member.Name == "TypeMeta" {
return !strings.Contains(member.Tags, "json"), nil
}
}
}
return false, fmt.Errorf("unable to find TypeMeta for any types in package %s", p.Path)
}
| {
"pile_set_name": "Github"
} |
<!DOCTYPE html>
<html lang="en">
<head>
<meta http-equiv="refresh" content="0;URL=struct.SplitTerminator.html">
</head>
<body>
<p>Redirecting to <a href="struct.SplitTerminator.html">struct.SplitTerminator.html</a>...</p>
<script>location.replace("struct.SplitTerminator.html" + location.search + location.hash);</script>
</body>
</html> | {
"pile_set_name": "Github"
} |
/*
* This file is part of OpenTTD.
* OpenTTD is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, version 2.
* OpenTTD is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with OpenTTD. If not, see <http://www.gnu.org/licenses/>.
*/
/** @file fileio_func.h Functions for Standard In/Out file operations */
#ifndef FILEIO_FUNC_H
#define FILEIO_FUNC_H
#include "core/enum_type.hpp"
#include "fileio_type.h"
void FioSeekTo(size_t pos, int mode);
void FioSeekToFile(uint slot, size_t pos);
size_t FioGetPos();
const char *FioGetFilename(uint slot);
byte FioReadByte();
uint16 FioReadWord();
uint32 FioReadDword();
void FioCloseAll();
void FioOpenFile(uint slot, const char *filename, Subdirectory subdir, char **output_filename = nullptr);
void FioReadBlock(void *ptr, size_t size);
void FioSkipBytes(int n);
/**
* The search paths OpenTTD could search through.
* At least one of the slots has to be filled with a path.
* nullptr paths tell that there is no such path for the
* current operating system.
*/
extern const char *_searchpaths[NUM_SEARCHPATHS];
/**
* Checks whether the given search path is a valid search path
* @param sp the search path to check
* @return true if the search path is valid
*/
static inline bool IsValidSearchPath(Searchpath sp)
{
return sp < NUM_SEARCHPATHS && _searchpaths[sp] != nullptr;
}
/** Iterator for all the search paths */
#define FOR_ALL_SEARCHPATHS(sp) for (sp = SP_FIRST_DIR; sp < NUM_SEARCHPATHS; sp++) if (IsValidSearchPath(sp))
void FioFCloseFile(FILE *f);
FILE *FioFOpenFile(const char *filename, const char *mode, Subdirectory subdir, size_t *filesize = nullptr, char **output_filename = nullptr);
bool FioCheckFileExists(const char *filename, Subdirectory subdir);
char *FioGetFullPath(char *buf, const char *last, Searchpath sp, Subdirectory subdir, const char *filename);
char *FioFindFullPath(char *buf, const char *last, Subdirectory subdir, const char *filename);
char *FioAppendDirectory(char *buf, const char *last, Searchpath sp, Subdirectory subdir);
char *FioGetDirectory(char *buf, const char *last, Subdirectory subdir);
void FioCreateDirectory(const char *name);
const char *FiosGetScreenshotDir();
void SanitizeFilename(char *filename);
bool AppendPathSeparator(char *buf, const char *last);
void DeterminePaths(const char *exe);
void *ReadFileToMem(const char *filename, size_t *lenp, size_t maxsize);
bool FileExists(const char *filename);
bool ExtractTar(const char *tar_filename, Subdirectory subdir);
extern const char *_personal_dir; ///< custom directory for personal settings, saves, newgrf, etc.
/** Helper for scanning for files with a given name */
class FileScanner {
protected:
Subdirectory subdir; ///< The current sub directory we are searching through
public:
/** Destruct the proper one... */
virtual ~FileScanner() {}
uint Scan(const char *extension, Subdirectory sd, bool tars = true, bool recursive = true);
uint Scan(const char *extension, const char *directory, bool recursive = true);
/**
* Add a file with the given filename.
* @param filename the full path to the file to read
* @param basepath_length amount of characters to chop of before to get a
* filename relative to the search path.
* @param tar_filename the name of the tar file the file is read from.
* @return true if the file is added.
*/
virtual bool AddFile(const char *filename, size_t basepath_length, const char *tar_filename) = 0;
};
/** Helper for scanning for files with tar as extension */
class TarScanner : FileScanner {
uint DoScan(Subdirectory sd);
public:
/** The mode of tar scanning. */
enum Mode {
NONE = 0, ///< Scan nothing.
BASESET = 1 << 0, ///< Scan for base sets.
NEWGRF = 1 << 1, ///< Scan for non-base sets.
AI = 1 << 2, ///< Scan for AIs and its libraries.
SCENARIO = 1 << 3, ///< Scan for scenarios and heightmaps.
GAME = 1 << 4, ///< Scan for game scripts.
ALL = BASESET | NEWGRF | AI | SCENARIO | GAME, ///< Scan for everything.
};
bool AddFile(const char *filename, size_t basepath_length, const char *tar_filename = nullptr) override;
bool AddFile(Subdirectory sd, const char *filename);
/** Do the scan for Tars. */
static uint DoScan(TarScanner::Mode mode);
};
DECLARE_ENUM_AS_BIT_SET(TarScanner::Mode)
/* Implementation of opendir/readdir/closedir for Windows */
#if defined(_WIN32)
struct DIR;
struct dirent { // XXX - only d_name implemented
TCHAR *d_name; // name of found file
/* little hack which will point to parent DIR struct which will
* save us a call to GetFileAttributes if we want information
* about the file (for example in function fio_bla) */
DIR *dir;
};
DIR *opendir(const TCHAR *path);
struct dirent *readdir(DIR *d);
int closedir(DIR *d);
#else
/* Use system-supplied opendir/readdir/closedir functions */
# include <sys/types.h>
# include <dirent.h>
#endif /* defined(_WIN32) */
/**
* A wrapper around opendir() which will convert the string from
* OPENTTD encoding to that of the filesystem. For all purposes this
* function behaves the same as the original opendir function
* @param path string to open directory of
* @return DIR pointer
*/
static inline DIR *ttd_opendir(const char *path)
{
return opendir(OTTD2FS(path));
}
/** Auto-close a file upon scope exit. */
class FileCloser {
FILE *f;
public:
FileCloser(FILE *_f) : f(_f) {}
~FileCloser()
{
fclose(f);
}
};
/** Helper to manage a FILE with a \c std::unique_ptr. */
struct FileDeleter {
void operator()(FILE *f)
{
if (f) fclose(f);
}
};
#endif /* FILEIO_FUNC_H */
| {
"pile_set_name": "Github"
} |
/*
* Copyright (C) 2005-2011 MaNGOS <http://getmangos.com/>
* Copyright (C) 2009-2011 MaNGOSZero <https://github.com/mangos/zero>
* Copyright (C) 2011-2016 Nostalrius <https://nostalrius.org>
* Copyright (C) 2016-2017 Elysium Project <https://github.com/elysium-project>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include "Common.h"
#include "UpdateMask.h"
#include "Opcodes.h"
#include "World.h"
#include "ObjectAccessor.h"
#include "Database/DatabaseEnv.h"
#include "GridNotifiers.h"
#include "CellImpl.h"
#include "GridNotifiersImpl.h"
#include "SpellMgr.h"
DynamicObject::DynamicObject() : WorldObject(), m_effIndex(EFFECT_INDEX_0), m_spellId(0), m_aliveDuration(0), m_positive(false), m_radius(0)
{
m_objectType |= TYPEMASK_DYNAMICOBJECT;
m_objectTypeId = TYPEID_DYNAMICOBJECT;
m_updateFlag = (UPDATEFLAG_ALL | UPDATEFLAG_HAS_POSITION);
m_valuesCount = DYNAMICOBJECT_END;
}
void DynamicObject::AddToWorld()
{
///- Register the dynamicObject for guid lookup
if (!IsInWorld())
GetMap()->InsertObject<DynamicObject>(GetObjectGuid(), this);
Object::AddToWorld();
}
void DynamicObject::RemoveFromWorld()
{
///- Remove the dynamicObject from the accessor
if (IsInWorld())
{
GetMap()->EraseObject<DynamicObject>(GetObjectGuid());
GetViewPoint().Event_RemovedFromWorld();
}
Object::RemoveFromWorld();
}
bool DynamicObject::Create(uint32 guidlow, Unit *caster, uint32 spellId, SpellEffectIndex effIndex, float x, float y, float z, int32 duration, float radius, DynamicObjectType type)
{
WorldObject::_Create(guidlow, HIGHGUID_DYNAMICOBJECT);
SetMap(caster->GetMap());
Relocate(x, y, z, 0);
if (!IsPositionValid())
{
sLog.outError("DynamicObject (spell %u eff %u) not created. Suggested coordinates isn't valid (X: %f Y: %f)", spellId, effIndex, GetPositionX(), GetPositionY());
return false;
}
SetEntry(spellId);
SetObjectScale(DEFAULT_OBJECT_SCALE);
SetGuidValue(DYNAMICOBJECT_CASTER, caster->GetObjectGuid());
/* Bytes field, so it's really 4 bit fields. These flags are unknown, but we do know that 0x00000001 is set for most.
Farsight for example, does not have this flag, instead it has 0x80000002.
Flags are set dynamically with some conditions, so one spell may have different flags set, depending on those conditions.
The size of the visual may be controlled to some degree with these flags.
uint32 bytes = 0x00000000;
bytes |= 0x01;
bytes |= 0x00 << 8;
bytes |= 0x00 << 16;
bytes |= 0x00 << 24;
*/
uint32 bytes = type;
// Nostalrius: fusee eclairante. Fix diametre visuel.
if (spellId == 1543)
bytes = 0x10; // Aucune idee de ce que ca represente ...
SetUInt32Value(DYNAMICOBJECT_BYTES, bytes);
SetUInt32Value(DYNAMICOBJECT_SPELLID, spellId);
if (bytes == 0x10)
SetFloatValue(DYNAMICOBJECT_RADIUS, radius * 2.0f); // Serait-ce enfait le diametre dans ce cas ?
else
SetFloatValue(DYNAMICOBJECT_RADIUS, radius);
SetFloatValue(DYNAMICOBJECT_POS_X, x);
SetFloatValue(DYNAMICOBJECT_POS_Y, y);
SetFloatValue(DYNAMICOBJECT_POS_Z, z);
SpellEntry const* spellProto = sSpellMgr.GetSpellEntry(spellId);
if (!spellProto)
{
sLog.outError("DynamicObject (spell %u) not created. Spell not exist!", spellId, GetPositionX(), GetPositionY());
return false;
}
m_aliveDuration = duration;
m_radius = radius;
m_effIndex = effIndex;
m_spellId = spellId;
m_positive = IsPositiveEffect(spellProto, m_effIndex);
m_channeled = IsChanneledSpell(spellProto);
return true;
}
Unit* DynamicObject::GetCaster() const
{
// can be not found in some cases
return ObjectAccessor::GetUnit(*this, GetCasterGuid());
}
void DynamicObject::Update(uint32 update_diff, uint32 p_time)
{
WorldObject::Update(update_diff, p_time);
// caster can be not in world at time dynamic object update, but dynamic object not yet deleted in Unit destructor
Unit* caster = GetCaster();
if (!caster)
{
Delete();
return;
}
if (_deleted)
return;
// If this object is from the current channeled spell, do not delete it. Otherwise
// we can lose the last tick of the effect due to differeng updates. The spell
// itself will call for the object to be removed at the end of the cast
bool deleteThis = false;
m_aliveDuration -= p_time;
if (m_aliveDuration <= 0)
m_aliveDuration = 0;
if (m_aliveDuration == 0 && (!m_channeled || caster->GetChannelObjectGuid() != GetObjectGuid()))
deleteThis = true;
for (AffectedMap::iterator iter = m_affected.begin(); iter != m_affected.end(); ++iter)
iter->second += update_diff;
// have radius and work as persistent effect
if (m_radius)
{
// TODO: make a timer and update this in larger intervals
MaNGOS::DynamicObjectUpdater notifier(*this, caster, m_positive);
Cell::VisitAllObjects(this, notifier, m_radius);
// Nostalrius
// Hackfix pour Piege explosif. Ne doit s'activer qu'une fois.
switch (m_spellId)
{
case 13812: // rang 1
case 14314: // rang 2
case 14315: // rang 3
m_radius = 0.0f;
break;
}
}
if (deleteThis)
{
caster->RemoveDynObjectWithGUID(GetObjectGuid());
Delete();
}
}
void DynamicObject::Delete()
{
SendObjectDeSpawnAnim(GetObjectGuid());
AddObjectToRemoveList();
}
void DynamicObject::Delay(int32 delaytime)
{
m_aliveDuration -= delaytime;
for (AffectedMap::iterator iter = m_affected.begin(); iter != m_affected.end();)
{
Unit *target = GetMap()->GetUnit(iter->first);
if (target)
{
SpellAuraHolder *holder = target->GetSpellAuraHolder(m_spellId, GetCasterGuid());
if (!holder)
{
++iter;
continue;
}
bool foundAura = false;
for (int32 i = m_effIndex + 1; i < MAX_EFFECT_INDEX; ++i)
{
if ((holder->GetSpellProto()->Effect[i] == SPELL_EFFECT_PERSISTENT_AREA_AURA || holder->GetSpellProto()->Effect[i] == SPELL_EFFECT_ADD_FARSIGHT) && holder->m_auras[i])
{
foundAura = true;
break;
}
}
if (foundAura)
{
++iter;
continue;
}
target->DelaySpellAuraHolder(m_spellId, delaytime, GetCasterGuid());
++iter;
}
else
m_affected.erase(iter++);
}
}
bool DynamicObject::isVisibleForInState(Player const* u, WorldObject const* viewPoint, bool inVisibleList) const
{
if (!IsInWorld() || !u->IsInWorld())
return false;
// always seen by owner
if (GetCasterGuid() == u->GetObjectGuid())
return true;
// normal case
return IsWithinDistInMap(viewPoint, GetMap()->GetVisibilityDistance() + (inVisibleList ? World::GetVisibleObjectGreyDistance() : 0.0f) + GetVisibilityModifier(), false);
}
bool DynamicObject::IsHostileTo(Unit const* unit) const
{
if (Unit* owner = GetCaster())
return owner->IsHostileTo(unit);
else
return false;
}
bool DynamicObject::IsFriendlyTo(Unit const* unit) const
{
if (Unit* owner = GetCaster())
return owner->IsFriendlyTo(unit);
else
return true;
}
bool DynamicObject::NeedsRefresh(Unit* unit) const
{
AffectedMap::const_iterator it = m_affected.find(unit->GetObjectGuid());
return it == m_affected.end() || it->second > 2000;
}
| {
"pile_set_name": "Github"
} |
export type RuLocale = 'ru';
export type EnLocale = 'en';
export type Locales = RuLocale | EnLocale;
export type Langs = Locales; | {
"pile_set_name": "Github"
} |
package yaml
// Set the writer error and return false.
func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool {
emitter.error = yaml_WRITER_ERROR
emitter.problem = problem
return false
}
// Flush the output buffer.
func yaml_emitter_flush(emitter *yaml_emitter_t) bool {
if emitter.write_handler == nil {
panic("write handler not set")
}
// Check if the buffer is empty.
if emitter.buffer_pos == 0 {
return true
}
// If the output encoding is UTF-8, we don't need to recode the buffer.
if emitter.encoding == yaml_UTF8_ENCODING {
if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil {
return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
}
emitter.buffer_pos = 0
return true
}
// Recode the buffer into the raw buffer.
var low, high int
if emitter.encoding == yaml_UTF16LE_ENCODING {
low, high = 0, 1
} else {
high, low = 1, 0
}
pos := 0
for pos < emitter.buffer_pos {
// See the "reader.c" code for more details on UTF-8 encoding. Note
// that we assume that the buffer contains a valid UTF-8 sequence.
// Read the next UTF-8 character.
octet := emitter.buffer[pos]
var w int
var value rune
switch {
case octet&0x80 == 0x00:
w, value = 1, rune(octet&0x7F)
case octet&0xE0 == 0xC0:
w, value = 2, rune(octet&0x1F)
case octet&0xF0 == 0xE0:
w, value = 3, rune(octet&0x0F)
case octet&0xF8 == 0xF0:
w, value = 4, rune(octet&0x07)
}
for k := 1; k < w; k++ {
octet = emitter.buffer[pos+k]
value = (value << 6) + (rune(octet) & 0x3F)
}
pos += w
// Write the character.
if value < 0x10000 {
var b [2]byte
b[high] = byte(value >> 8)
b[low] = byte(value & 0xFF)
emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1])
} else {
// Write the character using a surrogate pair (check "reader.c").
var b [4]byte
value -= 0x10000
b[high] = byte(0xD8 + (value >> 18))
b[low] = byte((value >> 10) & 0xFF)
b[high+2] = byte(0xDC + ((value >> 8) & 0xFF))
b[low+2] = byte(value & 0xFF)
emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1], b[2], b[3])
}
}
// Write the raw buffer.
if err := emitter.write_handler(emitter, emitter.raw_buffer); err != nil {
return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
}
emitter.buffer_pos = 0
emitter.raw_buffer = emitter.raw_buffer[:0]
return true
}
| {
"pile_set_name": "Github"
} |
dyn_integloop.getIntegrator(trick.Runge_Kutta_4, 5)
trick.exec_set_terminate_time(5.2)
| {
"pile_set_name": "Github"
} |
import CoreGraphics
// Case 1 - witness is imported accessor
protocol OtherPoint {
associatedtype FloatType
var x: FloatType { get set }
var y: FloatType { get set }
}
extension CGPoint: OtherPoint {}
// Case 2 - witness is extension method of imported type
extension CGPoint {
var z: Float {
get { return 0.0 }
set { }
}
}
| {
"pile_set_name": "Github"
} |
import {containerReducer} from '../../../../packages/rcre/src/core/Container/reducer';
import {containerActionCreators} from '../../../../packages/rcre/src/core/Container/action';
import {setWith, deleteWith, Events, createReduxStore, RunTimeContextCollection, DataProviderEvent} from 'rcre';
describe('Container State', () => {
let initState: {
TEST: Object
};
let KEY = 'TEST';
beforeEach(() => {
initState = {
TEST: {}
};
});
const context: RunTimeContextCollection = {
rcre: {
$global: {},
$location: {
query: '123'
},
$query: {},
debug: false,
lang: '',
loadMode: 'default',
dataProviderEvent: new DataProviderEvent(),
events: new Events(),
store: createReduxStore(),
containerGraph: new Map(),
mode: 'React'
},
// @ts-ignore
container: {},
// @ts-ignore
form: {},
// @ts-ignore
iterator: {}
};
it('setData', () => {
let updateAction = containerActionCreators.setData({
name: 'datepicker',
value: '2017-12-20'
// @ts-ignore
}, KEY, context);
let state = containerReducer(initState, updateAction);
expect(state[KEY]).toEqual({
datepicker: '2017-12-20'
});
});
it('setData with array path', () => {
let updateAction = containerActionCreators.setData({
name: 'datepicker.0.year',
value: '2018'
// @ts-ignore
}, KEY, context);
let state = containerReducer(initState, updateAction);
expect(state[KEY]).toEqual({
datepicker: {
0: {
year: '2018'
}
}
});
});
it('setData with number path', () => {
let updateAction = containerActionCreators.setData({
name: 'datepicker.0.year',
value: '2018'
// @ts-ignore
}, KEY, context);
let state = containerReducer(initState, updateAction);
expect(state[KEY]).toEqual({
datepicker: {
0: {
year: '2018'
}
}
});
});
it('multiSetData return difference model', () => {
let oneUpdate = containerActionCreators.setData({
name: 'str',
value: 'a'
// @ts-ignore
}, KEY, context);
let twoUpdate = containerActionCreators.setData({
name: 'str',
value: 'b'
// @ts-ignore
}, KEY, context);
let state = containerReducer(initState, oneUpdate);
expect(initState[KEY] === state).toBe(false);
expect(containerReducer(state, twoUpdate) === state).toBe(false);
});
it('setDataWithRepeat', () => {
let updateAction = containerActionCreators.setData({
name: 'datepicker',
value: '2017-12-20'
// @ts-ignore
}, KEY, context);
let repeatAction = containerActionCreators.setData({
name: 'datepicker',
value: '2018-01-01'
// @ts-ignore
}, KEY, context);
let state = containerReducer(initState, updateAction);
state = containerReducer(state, repeatAction);
expect(state[KEY]).toEqual({
datepicker: '2018-01-01'
});
});
it('setData with invalid model', () => {
let updateAction = containerActionCreators.setData({
name: 'datepicker',
value: '2017-12-20'
// @ts-ignore
}, 'UNKNOWN', context);
let state = containerReducer(initState, updateAction);
expect(state).toEqual(state);
});
it('setData nameGroup', () => {
let updateAction = containerActionCreators.setData({
name: 'datepicker.startTime.timestamp',
value: '2017-12-20'
// @ts-ignore
}, KEY, context);
let repeateAction = containerActionCreators.setData({
name: 'datepicker.startTime.timestamp',
value: '2018-01-01'
// @ts-ignore
}, KEY, context);
let state = containerReducer(initState, updateAction);
state = containerReducer(state, repeateAction);
expect(state[KEY]).toEqual({
datepicker: {
startTime: {
timestamp: '2018-01-01'
}
}
});
});
it('asyncLoadProgress', () => {
let updateAction = containerActionCreators.asyncLoadDataProgress({
model: KEY
});
let state = containerReducer(initState, updateAction);
expect(state[KEY]).toEqual({$loading: true});
});
it('asyncLoadFail', () => {
let updateAction = containerActionCreators.asyncLoadDataFail({
model: KEY,
error: 'you got an error'
});
let state = containerReducer(initState, updateAction);
expect(state[KEY]).toEqual({
$loading: false,
$error: 'you got an error'
});
});
it('asynLoadDataSuccess', () => {
let updateAction = containerActionCreators.asyncLoadDataSuccess({
model: KEY,
data: {
name: 1
},
// @ts-ignore
context: context
});
let state = containerReducer(initState, updateAction);
expect(state[KEY]).toEqual({
name: 1,
$error: null,
$loading: false
});
});
it('syncLoadSuccess', () => {
let updateAction = containerActionCreators.syncLoadDataSuccess({
model: KEY,
data: {
name: 1
},
// @ts-ignore
context: context
});
let state = containerReducer(initState, updateAction);
expect(state[KEY]).toEqual({
name: 1
});
});
it('syncLoadFail', () => {
let updateAction = containerActionCreators.syncLoadDataFail({
model: KEY,
error: 'you got an error'
});
let state = containerReducer(initState, updateAction);
expect(state[KEY]).toEqual({
$error: 'you got an error'
});
});
it('dataCustomerPass', () => {
let updateAction = containerActionCreators.dataCustomerPass({
model: KEY,
data: {
name: 1
}
// @ts-ignore
}, context);
let state = containerReducer(initState, updateAction);
expect(state[KEY]).toEqual({
name: 1
});
});
it('removeData', () => {
let addAction = containerActionCreators.setData({
name: 'name',
value: 1
// @ts-ignore
}, KEY, context);
let updateAction = containerActionCreators.clearData({
model: KEY,
// @ts-ignore
context: context
});
let state = containerReducer(initState, addAction);
state = containerReducer(state, updateAction);
expect(state[KEY]).toEqual(undefined);
});
it('clearData', () => {
let addAction = containerActionCreators.setData({
name: 'name',
value: 1
// @ts-ignore
}, KEY, context);
let updateAction = containerActionCreators.clearData({
model: KEY,
// @ts-ignore
context: context
});
let state = containerReducer(initState, addAction);
state = containerReducer(state, updateAction);
expect(state).toEqual({});
});
it('deleteData', () => {
let addAction = containerActionCreators.setData({
name: 'name',
value: 1
// @ts-ignore
}, KEY, context);
let deleteAction = containerActionCreators.deleteData({
name: 'name'
// @ts-ignore
}, KEY, context);
let state = containerReducer(initState, addAction);
state = containerReducer(state, deleteAction);
expect(state[KEY]).toEqual({});
});
it('deleteData with paths', () => {
let addAction = containerActionCreators.setData({
name: 'name.age.a.b.c.d',
value: 1
// @ts-ignore
}, KEY, context);
let deleteAction = containerActionCreators.deleteData({
name: 'name.age.a.b.c.d'
// @ts-ignore
}, KEY, context);
let state = containerReducer(initState, addAction);
state = containerReducer(state, deleteAction);
expect(state[KEY]).toEqual({
name: {
age: {
a: {
b: {
c: {}
}
}
}
}
});
});
it('deleteWith', () => {
let obj = {
name: 'andycall',
arr: {
name: 'yhtree',
some: {
nest: 2
}
},
11111: {
name: 2
},
number: {
123456: 'aaa'
},
testArr: [{
name: '1'
}, 2, 3, {name: 1}, 5]
};
let retObj = deleteWith(obj, 'name');
expect(retObj['11111'] === obj['11111']).toBe(true);
let retObj2 = deleteWith(obj, 'arr.name');
expect(retObj2['11111'] === retObj['11111']).toBe(true);
expect(retObj2.arr === retObj.arr).toBe(false);
expect(retObj2.arr.some === retObj.arr.some).toBe(true);
let retObj3 = deleteWith(retObj2, 'testArr[0]');
expect(retObj3.testArr[0]).toBe(undefined);
expect(retObj3.testArr[3] === retObj.testArr[3]).toBe(true);
});
it('[setWith] point path', () => {
let object = {
name: {
age: '22'
},
other: {
text: '1234'
}
};
let obj = setWith(object, 'name.age', 1);
expect(object).toEqual({
name: {
age: '22'
},
other: {
text: '1234'
}
});
expect(obj).toEqual({
name: {
age: 1
},
other: {
text: '1234'
}
});
expect(object.name === obj['name']).toBe(false);
expect(object.other === obj['other']).toBe(true);
});
it('[setWith] arr path', () => {
let object = {
arr: [
1,
{
name: '1234'
}
],
other: {
text: '1234'
},
bad: null
};
let copy: any = setWith(object, 'arr[0]', 2);
expect(copy.arr[0]).toBe(2);
expect(copy.arr[1] === object.arr[1]).toBe(true);
expect(copy.other === object.other).toBe(true);
copy = setWith(copy, 'arr[1].name', '4567');
expect(copy.arr[1].name).toBe('4567');
expect(copy.arr[1] !== object.arr[1]).toBe(true);
expect(copy.other === object.other).toBe(true);
copy = setWith(copy, 'a.b.c.d.e.f.g', 10);
expect(copy.a.b.c.d.e.f.g).toBe(10);
copy = setWith(copy, 'unknown', 100);
expect(copy.unknown).toBe(100);
copy = setWith(copy, 'bad', undefined);
expect(copy.bad).toBe(undefined);
copy = setWith(copy, 'bad.bad', undefined);
expect(copy.bad.bad).toBe(undefined);
copy = setWith(copy, 'number.0', 1);
expect(copy.number).toEqual({
0: 1
});
copy = setWith(object, 'number2.0[0]', 1);
expect(copy.number2).toEqual({
0: [1]
});
copy = setWith(object, 'number3.1.2.3.4.5[10][0]', 1);
expect(copy.number3).toEqual({
1: {
2: {
3: {
4: {
5: [
undefined, undefined, undefined, undefined, undefined,
undefined, undefined, undefined, undefined, undefined,
[1]
]
}
}
}
}
});
copy = setWith(object, 'a[0][1]', 10);
expect(copy.a).toEqual([[undefined, 10]]);
});
it('[deleteWith] non exist path', () => {
let state = {
openExp: {
a: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
}
};
// 删除不存在项
let newState = deleteWith(state, 'openExp.a.11.allow_percent');
expect(newState).toEqual(state);
});
});
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE pkgmetadata SYSTEM "http://www.gentoo.org/dtd/metadata.dtd">
<pkgmetadata>
<maintainer type="person">
<email>[email protected]</email>
<name>Linlin Yan</name>
</maintainer>
<maintainer type="project">
<email>[email protected]</email>
<name>Gentoo Biology Project</name>
</maintainer>
<longdescription lang="en">
Bismark is a program to map bisulfite treated sequencing reads to a genome of
interest and perform methylation calls in a single step. The output can be
easily imported into a genome viewer, such as SeqMonk, and enables a researcher
to analyse the methylation levels of their samples straight away. It's main
features are: (1) Bisulfite mapping and methylation calling in one single step;
(2) Supports single-end and paired-end read alignments; (3) Supports ungapped
and gapped alignments; (4) Alignment seed length, number of mismatches etc. are
adjustable; (5) Output discriminates between cytosine methylation in CpG, CHG
and CHH context.
</longdescription>
</pkgmetadata>
| {
"pile_set_name": "Github"
} |
--
-- Test cases for COPY (INSERT/UPDATE/DELETE) TO
--
create table copydml_test (id serial, t text);
insert into copydml_test (t) values ('a');
insert into copydml_test (t) values ('b');
insert into copydml_test (t) values ('c');
insert into copydml_test (t) values ('d');
insert into copydml_test (t) values ('e');
--
-- Test COPY (insert/update/delete ...)
--
copy (insert into copydml_test (t) values ('f') returning id) to stdout;
6
copy (update copydml_test set t = 'g' where t = 'f' returning id) to stdout;
6
copy (delete from copydml_test where t = 'g' returning id) to stdout;
6
--
-- Test \copy (insert/update/delete ...)
--
\copy (insert into copydml_test (t) values ('f') returning id) to stdout;
7
\copy (update copydml_test set t = 'g' where t = 'f' returning id) to stdout;
7
\copy (delete from copydml_test where t = 'g' returning id) to stdout;
7
-- Error cases
copy (insert into copydml_test default values) to stdout;
ERROR: COPY query must have a RETURNING clause
copy (update copydml_test set t = 'g') to stdout;
ERROR: COPY query must have a RETURNING clause
copy (delete from copydml_test) to stdout;
ERROR: COPY query must have a RETURNING clause
create rule qqq as on insert to copydml_test do instead nothing;
copy (insert into copydml_test default values) to stdout;
ERROR: DO INSTEAD NOTHING rules are not supported for COPY
drop rule qqq on copydml_test;
create rule qqq as on insert to copydml_test do also delete from copydml_test;
copy (insert into copydml_test default values) to stdout;
ERROR: DO ALSO rules are not supported for the COPY
drop rule qqq on copydml_test;
create rule qqq as on insert to copydml_test do instead (delete from copydml_test; delete from copydml_test);
copy (insert into copydml_test default values) to stdout;
ERROR: multi-statement DO INSTEAD rules are not supported for COPY
drop rule qqq on copydml_test;
create rule qqq as on insert to copydml_test where new.t <> 'f' do instead delete from copydml_test;
copy (insert into copydml_test default values) to stdout;
ERROR: conditional DO INSTEAD rules are not supported for COPY
drop rule qqq on copydml_test;
create rule qqq as on update to copydml_test do instead nothing;
copy (update copydml_test set t = 'f') to stdout;
ERROR: DO INSTEAD NOTHING rules are not supported for COPY
drop rule qqq on copydml_test;
create rule qqq as on update to copydml_test do also delete from copydml_test;
copy (update copydml_test set t = 'f') to stdout;
ERROR: DO ALSO rules are not supported for the COPY
drop rule qqq on copydml_test;
create rule qqq as on update to copydml_test do instead (delete from copydml_test; delete from copydml_test);
copy (update copydml_test set t = 'f') to stdout;
ERROR: multi-statement DO INSTEAD rules are not supported for COPY
drop rule qqq on copydml_test;
create rule qqq as on update to copydml_test where new.t <> 'f' do instead delete from copydml_test;
copy (update copydml_test set t = 'f') to stdout;
ERROR: conditional DO INSTEAD rules are not supported for COPY
drop rule qqq on copydml_test;
create rule qqq as on delete to copydml_test do instead nothing;
copy (delete from copydml_test) to stdout;
ERROR: DO INSTEAD NOTHING rules are not supported for COPY
drop rule qqq on copydml_test;
create rule qqq as on delete to copydml_test do also insert into copydml_test default values;
copy (delete from copydml_test) to stdout;
ERROR: DO ALSO rules are not supported for the COPY
drop rule qqq on copydml_test;
create rule qqq as on delete to copydml_test do instead (insert into copydml_test default values; insert into copydml_test default values);
copy (delete from copydml_test) to stdout;
ERROR: multi-statement DO INSTEAD rules are not supported for COPY
drop rule qqq on copydml_test;
create rule qqq as on delete to copydml_test where old.t <> 'f' do instead insert into copydml_test default values;
copy (delete from copydml_test) to stdout;
ERROR: conditional DO INSTEAD rules are not supported for COPY
drop rule qqq on copydml_test;
-- triggers
create function qqq_trig() returns trigger as $$
begin
if tg_op in ('INSERT', 'UPDATE') then
raise notice '% %', tg_op, new.id;
return new;
else
raise notice '% %', tg_op, old.id;
return old;
end if;
end
$$ language plpgsql;
create trigger qqqbef before insert or update or delete on copydml_test
for each row execute procedure qqq_trig();
create trigger qqqaf after insert or update or delete on copydml_test
for each row execute procedure qqq_trig();
copy (insert into copydml_test (t) values ('f') returning id) to stdout;
NOTICE: INSERT 8
8
NOTICE: INSERT 8
copy (update copydml_test set t = 'g' where t = 'f' returning id) to stdout;
NOTICE: UPDATE 8
8
NOTICE: UPDATE 8
copy (delete from copydml_test where t = 'g' returning id) to stdout;
NOTICE: DELETE 8
8
NOTICE: DELETE 8
drop table copydml_test;
drop function qqq_trig();
| {
"pile_set_name": "Github"
} |
#!/usr/bin/env bash
set -e
# usage: ./generate.sh [versions]
# ie: ./generate.sh
# to update all Dockerfiles in this directory
# or: ./generate.sh centos-7
# to only update centos-7/Dockerfile
# or: ./generate.sh fedora-newversion
# to create a new folder and a Dockerfile within it
cd "$(dirname "$(readlink -f "$BASH_SOURCE")")"
versions=( "$@" )
if [ ${#versions[@]} -eq 0 ]; then
versions=( */ )
fi
versions=( "${versions[@]%/}" )
for version in "${versions[@]}"; do
distro="${version%-*}"
suite="${version##*-}"
from="${distro}:${suite}"
installer=yum
if [[ "$distro" == "fedora" ]]; then
installer=dnf
fi
if [[ "$distro" == "photon" ]]; then
installer=tdnf
fi
mkdir -p "$version"
echo "$version -> FROM $from"
cat > "$version/Dockerfile" <<-EOF
#
# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/amd64/generate.sh"!
#
FROM $from
EOF
echo >> "$version/Dockerfile"
extraBuildTags=
runcBuildTags=
case "$from" in
oraclelinux:6)
# We need a known version of the kernel-uek-devel headers to set CGO_CPPFLAGS, so grab the UEKR4 GA version
# This requires using yum-config-manager from yum-utils to enable the UEKR4 yum repo
echo "RUN yum install -y yum-utils && curl -o /etc/yum.repos.d/public-yum-ol6.repo http://yum.oracle.com/public-yum-ol6.repo && yum-config-manager -q --enable ol6_UEKR4" >> "$version/Dockerfile"
echo "RUN yum install -y kernel-uek-devel-4.1.12-32.el6uek" >> "$version/Dockerfile"
echo >> "$version/Dockerfile"
;;
fedora:*)
echo "RUN ${installer} -y upgrade" >> "$version/Dockerfile"
;;
*) ;;
esac
case "$from" in
centos:*|amazonlinux:latest)
# get "Development Tools" packages dependencies
echo 'RUN yum groupinstall -y "Development Tools"' >> "$version/Dockerfile"
if [[ "$version" == "centos-7" ]]; then
echo 'RUN yum -y swap -- remove systemd-container systemd-container-libs -- install systemd systemd-libs' >> "$version/Dockerfile"
fi
;;
oraclelinux:*)
# get "Development Tools" packages and dependencies
# we also need yum-utils for yum-config-manager to pull the latest repo file
echo 'RUN yum groupinstall -y "Development Tools"' >> "$version/Dockerfile"
;;
opensuse:*)
# get rpm-build and curl packages and dependencies
echo 'RUN zypper --non-interactive install ca-certificates* curl gzip rpm-build' >> "$version/Dockerfile"
;;
photon:*)
echo "RUN ${installer} install -y wget curl ca-certificates gzip make rpm-build sed gcc linux-api-headers glibc-devel binutils libseccomp elfutils" >> "$version/Dockerfile"
;;
*)
echo "RUN ${installer} install -y @development-tools fedora-packager" >> "$version/Dockerfile"
;;
esac
packages=(
btrfs-progs-devel # for "btrfs/ioctl.h" (and "version.h" if possible)
device-mapper-devel # for "libdevmapper.h"
glibc-static
libseccomp-devel # for "seccomp.h" & "libseccomp.so"
libselinux-devel # for "libselinux.so"
pkgconfig # for the pkg-config command
selinux-policy
selinux-policy-devel
systemd-devel # for "sd-journal.h" and libraries
tar # older versions of dev-tools do not have tar
git # required for containerd and runc clone
cmake # tini build
vim-common # tini build
)
case "$from" in
oraclelinux:7)
# Enable the optional repository
packages=( --enablerepo=ol7_optional_latest "${packages[*]}" )
;;
esac
case "$from" in
oraclelinux:6|amazonlinux:latest)
# doesn't use systemd, doesn't have a devel package for it
packages=( "${packages[@]/systemd-devel}" )
;;
esac
# opensuse & oraclelinx:6 do not have the right libseccomp libs
case "$from" in
opensuse:*|oraclelinux:6)
packages=( "${packages[@]/libseccomp-devel}" )
runcBuildTags="selinux"
;;
*)
extraBuildTags+=' seccomp'
runcBuildTags="seccomp selinux"
;;
esac
case "$from" in
opensuse:*)
packages=( "${packages[@]/btrfs-progs-devel/libbtrfs-devel}" )
packages=( "${packages[@]/pkgconfig/pkg-config}" )
packages=( "${packages[@]/vim-common/vim}" )
if [[ "$from" == "opensuse:13."* ]]; then
packages+=( systemd-rpm-macros )
fi
# use zypper
echo "RUN zypper --non-interactive install ${packages[*]}" >> "$version/Dockerfile"
;;
photon:*)
packages=( "${packages[@]/pkgconfig/pkg-config}" )
echo "RUN ${installer} install -y ${packages[*]}" >> "$version/Dockerfile"
;;
*)
echo "RUN ${installer} install -y ${packages[*]}" >> "$version/Dockerfile"
;;
esac
echo >> "$version/Dockerfile"
awk '$1 == "ENV" && $2 == "GO_VERSION" { print; exit }' ../../../../Dockerfile >> "$version/Dockerfile"
echo 'RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local' >> "$version/Dockerfile"
echo 'ENV PATH $PATH:/usr/local/go/bin' >> "$version/Dockerfile"
echo >> "$version/Dockerfile"
echo 'ENV AUTO_GOPATH 1' >> "$version/Dockerfile"
echo >> "$version/Dockerfile"
# print build tags in alphabetical order
buildTags=$( echo "selinux $extraBuildTags" | xargs -n1 | sort -n | tr '\n' ' ' | sed -e 's/[[:space:]]*$//' )
echo "ENV DOCKER_BUILDTAGS $buildTags" >> "$version/Dockerfile"
echo "ENV RUNC_BUILDTAGS $runcBuildTags" >> "$version/Dockerfile"
echo >> "$version/Dockerfile"
case "$from" in
oraclelinux:6)
# We need to set the CGO_CPPFLAGS environment to use the updated UEKR4 headers with all the userns stuff.
# The ordering is very important and should not be changed.
echo 'ENV CGO_CPPFLAGS -D__EXPORTED_HEADERS__ \' >> "$version/Dockerfile"
echo ' -I/usr/src/kernels/4.1.12-32.el6uek.x86_64/arch/x86/include/generated/uapi \' >> "$version/Dockerfile"
echo ' -I/usr/src/kernels/4.1.12-32.el6uek.x86_64/arch/x86/include/uapi \' >> "$version/Dockerfile"
echo ' -I/usr/src/kernels/4.1.12-32.el6uek.x86_64/include/generated/uapi \' >> "$version/Dockerfile"
echo ' -I/usr/src/kernels/4.1.12-32.el6uek.x86_64/include/uapi \' >> "$version/Dockerfile"
echo ' -I/usr/src/kernels/4.1.12-32.el6uek.x86_64/include' >> "$version/Dockerfile"
echo >> "$version/Dockerfile"
;;
*) ;;
esac
done
| {
"pile_set_name": "Github"
} |
<?php
/*
* This file is part of php-cache organization.
*
* (c) 2015 Aaron Scherer <[email protected]>, Tobias Nyholm <[email protected]>
*
* This source file is subject to the MIT license that is bundled
* with this source code in the file LICENSE.
*/
namespace Cache\Adapter\Common\Exception;
use Psr\Cache\InvalidArgumentException as CacheInvalidArgumentException;
use Psr\SimpleCache\InvalidArgumentException as SimpleCacheInvalidArgumentException;
class InvalidArgumentException extends CacheException implements CacheInvalidArgumentException, SimpleCacheInvalidArgumentException
{
}
| {
"pile_set_name": "Github"
} |
{
"name": "meteor-theme-hexo",
"version": "1.0.15",
"description": "[Read the docs docs.](https://docs-docs.netlify.com/docs/docs/)",
"main": "index.js",
"scripts": {
"test": "node ./tests/index.js"
},
"repository": {
"type": "git",
"url": "git+https://github.com/meteor/meteor-theme-hexo.git"
},
"keywords": [],
"author": "",
"license": "MIT",
"bugs": {
"url": "https://github.com/meteor/meteor-hexo-theme/issues"
},
"homepage": "https://github.com/meteor/meteor-hexo-theme#readme",
"devDependencies": {
"fs-extra": "^5.0.0",
"hanabi": "^0.4.0",
"js-yaml": "^3.10.0",
"shelljs": "^0.8.1",
"simple-git": "^1.89.0",
"tarball-extract": "0.0.6",
"tmp-promise": "^1.0.4"
},
"dependencies": {}
}
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="UTF-8"?>
<Scheme
LastUpgradeVersion = "1020"
version = "1.3">
<BuildAction
parallelizeBuildables = "YES"
buildImplicitDependencies = "YES">
<BuildActionEntries>
<BuildActionEntry
buildForTesting = "YES"
buildForRunning = "YES"
buildForProfiling = "YES"
buildForArchiving = "YES"
buildForAnalyzing = "YES">
<BuildableReference
BuildableIdentifier = "primary"
BlueprintIdentifier = "97C146ED1CF9000F007C117D"
BuildableName = "Runner.app"
BlueprintName = "Runner"
ReferencedContainer = "container:Runner.xcodeproj">
</BuildableReference>
</BuildActionEntry>
</BuildActionEntries>
</BuildAction>
<TestAction
buildConfiguration = "Debug"
selectedDebuggerIdentifier = "Xcode.DebuggerFoundation.Debugger.LLDB"
selectedLauncherIdentifier = "Xcode.DebuggerFoundation.Launcher.LLDB"
shouldUseLaunchSchemeArgsEnv = "YES">
<Testables>
</Testables>
<MacroExpansion>
<BuildableReference
BuildableIdentifier = "primary"
BlueprintIdentifier = "97C146ED1CF9000F007C117D"
BuildableName = "Runner.app"
BlueprintName = "Runner"
ReferencedContainer = "container:Runner.xcodeproj">
</BuildableReference>
</MacroExpansion>
<AdditionalOptions>
</AdditionalOptions>
</TestAction>
<LaunchAction
buildConfiguration = "Debug"
selectedDebuggerIdentifier = "Xcode.DebuggerFoundation.Debugger.LLDB"
selectedLauncherIdentifier = "Xcode.DebuggerFoundation.Launcher.LLDB"
launchStyle = "0"
useCustomWorkingDirectory = "NO"
ignoresPersistentStateOnLaunch = "NO"
debugDocumentVersioning = "YES"
debugServiceExtension = "internal"
allowLocationSimulation = "YES">
<BuildableProductRunnable
runnableDebuggingMode = "0">
<BuildableReference
BuildableIdentifier = "primary"
BlueprintIdentifier = "97C146ED1CF9000F007C117D"
BuildableName = "Runner.app"
BlueprintName = "Runner"
ReferencedContainer = "container:Runner.xcodeproj">
</BuildableReference>
</BuildableProductRunnable>
<AdditionalOptions>
</AdditionalOptions>
</LaunchAction>
<ProfileAction
buildConfiguration = "Release"
shouldUseLaunchSchemeArgsEnv = "YES"
savedToolIdentifier = ""
useCustomWorkingDirectory = "NO"
debugDocumentVersioning = "YES">
<BuildableProductRunnable
runnableDebuggingMode = "0">
<BuildableReference
BuildableIdentifier = "primary"
BlueprintIdentifier = "97C146ED1CF9000F007C117D"
BuildableName = "Runner.app"
BlueprintName = "Runner"
ReferencedContainer = "container:Runner.xcodeproj">
</BuildableReference>
</BuildableProductRunnable>
</ProfileAction>
<AnalyzeAction
buildConfiguration = "Debug">
</AnalyzeAction>
<ArchiveAction
buildConfiguration = "Release"
revealArchiveInOrganizer = "YES">
</ArchiveAction>
</Scheme>
| {
"pile_set_name": "Github"
} |
/*!
\page RecoBTag_Configuration Package RecoBTag/Configuration
<center>
<small>
<!-- @CVS_TAG@ will be substituted at build time, no need to touch -->
<a href=http://cmsdoc.cern.ch/swdev/viewcvs/viewcvs.cgi/CMSSW/RecoBTag/Configuration/?cvsroot=CMSSW&only_with_tag=@CVS_TAG@>Source code (CVS tag: @CVS_TAG@)</a> -
<a href=http://cmsdoc.cern.ch/swdev/viewcvs/viewcvs.cgi/CMSSW/RecoBTag/Configuration/.admin/developers?rev=HEAD&cvsroot=CMSSW&content-type=text/vnd.viewcvs-markup>Administrative privileges</a>
</small>
</center>
\section desc Description
<!-- Short description of what this package is supposed to provide -->
This package documents the b tag contents of the RECO and AOD datasets.
\subsection interface Public interface
<!-- List the classes that are provided for use in other packages (if any) -->
\subsection modules Modules
<!-- Describe modules implemented in this package and their parameter set -->
\subsection tests Unit tests and examples
<!-- Describe cppunit tests and example configuration files -->
Unknown
\section status Status and planned development
<!-- e.g. completed, stable, missing features -->
Unknown
<hr>
Last updated:
@DATE@ Author: I.Tomalin
*/
| {
"pile_set_name": "Github"
} |
// Copyright 2014 MongoDB Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <mongocxx/model/replace_one.hpp>
#include <mongocxx/config/private/prelude.hh>
namespace mongocxx {
MONGOCXX_INLINE_NAMESPACE_BEGIN
namespace model {
replace_one::replace_one(bsoncxx::document::view_or_value filter,
bsoncxx::document::view_or_value replacement)
: _filter(std::move(filter)), _replacement(std::move(replacement)) {}
const bsoncxx::document::view_or_value& replace_one::filter() const {
return _filter;
}
const bsoncxx::document::view_or_value& replace_one::replacement() const {
return _replacement;
}
replace_one& replace_one::collation(bsoncxx::document::view_or_value collation) {
_collation = collation;
return *this;
}
const stdx::optional<bsoncxx::document::view_or_value>& replace_one::collation() const {
return _collation;
}
replace_one& replace_one::upsert(bool upsert) {
_upsert = upsert;
return *this;
}
const stdx::optional<bool>& replace_one::upsert() const {
return _upsert;
}
replace_one& replace_one::hint(class hint index_hint) {
_hint = std::move(index_hint);
return *this;
}
const stdx::optional<class hint>& replace_one::hint() const {
return _hint;
}
} // namespace model
MONGOCXX_INLINE_NAMESPACE_END
} // namespace mongocxx
| {
"pile_set_name": "Github"
} |
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
var gTestfile = 'regress-452498-030.js';
//-----------------------------------------------------------------------------
var BUGNUMBER = 452498;
var summary = 'TM: upvar2 regression tests';
var actual = '';
var expect = '';
//-----------------------------------------------------------------------------
test();
//-----------------------------------------------------------------------------
function test()
{
enterFunc ('test');
printBugNumber(BUGNUMBER);
printStatus (summary);
// ------- Comment #30 From Mike Shaver
function f() { var i = 0; var i = 5; }
f();
reportCompare(expect, actual, summary);
exitFunc ('test');
}
| {
"pile_set_name": "Github"
} |
/*
** Debug library.
** Copyright (C) 2005-2017 Mike Pall. See Copyright Notice in luajit.h
**
** Major portions taken verbatim or adapted from the Lua interpreter.
** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h
*/
#define lib_debug_c
#define LUA_LIB
#include "lua.h"
#include "lauxlib.h"
#include "lualib.h"
#include "lj_obj.h"
#include "lj_gc.h"
#include "lj_err.h"
#include "lj_debug.h"
#include "lj_lib.h"
/* ------------------------------------------------------------------------ */
#define LJLIB_MODULE_debug
LJLIB_CF(debug_getregistry)
{
copyTV(L, L->top++, registry(L));
return 1;
}
LJLIB_CF(debug_getmetatable)
{
lj_lib_checkany(L, 1);
if (!lua_getmetatable(L, 1)) {
setnilV(L->top-1);
}
return 1;
}
LJLIB_CF(debug_setmetatable)
{
lj_lib_checktabornil(L, 2);
L->top = L->base+2;
lua_setmetatable(L, 1);
#if !LJ_52
setboolV(L->top-1, 1);
#endif
return 1;
}
LJLIB_CF(debug_getfenv)
{
lj_lib_checkany(L, 1);
lua_getfenv(L, 1);
return 1;
}
LJLIB_CF(debug_setfenv)
{
lj_lib_checktab(L, 2);
L->top = L->base+2;
if (!lua_setfenv(L, 1))
lj_err_caller(L, LJ_ERR_SETFENV);
return 1;
}
/* ------------------------------------------------------------------------ */
static void settabss(lua_State *L, const char *i, const char *v)
{
lua_pushstring(L, v);
lua_setfield(L, -2, i);
}
static void settabsi(lua_State *L, const char *i, int v)
{
lua_pushinteger(L, v);
lua_setfield(L, -2, i);
}
static void settabsb(lua_State *L, const char *i, int v)
{
lua_pushboolean(L, v);
lua_setfield(L, -2, i);
}
static lua_State *getthread(lua_State *L, int *arg)
{
if (L->base < L->top && tvisthread(L->base)) {
*arg = 1;
return threadV(L->base);
} else {
*arg = 0;
return L;
}
}
static void treatstackoption(lua_State *L, lua_State *L1, const char *fname)
{
if (L == L1) {
lua_pushvalue(L, -2);
lua_remove(L, -3);
}
else
lua_xmove(L1, L, 1);
lua_setfield(L, -2, fname);
}
LJLIB_CF(debug_getinfo)
{
lj_Debug ar;
int arg, opt_f = 0, opt_L = 0;
lua_State *L1 = getthread(L, &arg);
const char *options = luaL_optstring(L, arg+2, "flnSu");
if (lua_isnumber(L, arg+1)) {
if (!lua_getstack(L1, (int)lua_tointeger(L, arg+1), (lua_Debug *)&ar)) {
setnilV(L->top-1);
return 1;
}
} else if (L->base+arg < L->top && tvisfunc(L->base+arg)) {
options = lua_pushfstring(L, ">%s", options);
setfuncV(L1, L1->top++, funcV(L->base+arg));
} else {
lj_err_arg(L, arg+1, LJ_ERR_NOFUNCL);
}
if (!lj_debug_getinfo(L1, options, &ar, 1))
lj_err_arg(L, arg+2, LJ_ERR_INVOPT);
lua_createtable(L, 0, 16); /* Create result table. */
for (; *options; options++) {
switch (*options) {
case 'S':
settabss(L, "source", ar.source);
settabss(L, "short_src", ar.short_src);
settabsi(L, "linedefined", ar.linedefined);
settabsi(L, "lastlinedefined", ar.lastlinedefined);
settabss(L, "what", ar.what);
break;
case 'l':
settabsi(L, "currentline", ar.currentline);
break;
case 'u':
settabsi(L, "nups", ar.nups);
settabsi(L, "nparams", ar.nparams);
settabsb(L, "isvararg", ar.isvararg);
break;
case 'n':
settabss(L, "name", ar.name);
settabss(L, "namewhat", ar.namewhat);
break;
case 'f': opt_f = 1; break;
case 'L': opt_L = 1; break;
default: break;
}
}
if (opt_L) treatstackoption(L, L1, "activelines");
if (opt_f) treatstackoption(L, L1, "func");
return 1; /* Return result table. */
}
LJLIB_CF(debug_getlocal)
{
int arg;
lua_State *L1 = getthread(L, &arg);
lua_Debug ar;
const char *name;
int slot = lj_lib_checkint(L, arg+2);
if (tvisfunc(L->base+arg)) {
L->top = L->base+arg+1;
lua_pushstring(L, lua_getlocal(L, NULL, slot));
return 1;
}
if (!lua_getstack(L1, lj_lib_checkint(L, arg+1), &ar))
lj_err_arg(L, arg+1, LJ_ERR_LVLRNG);
name = lua_getlocal(L1, &ar, slot);
if (name) {
lua_xmove(L1, L, 1);
lua_pushstring(L, name);
lua_pushvalue(L, -2);
return 2;
} else {
setnilV(L->top-1);
return 1;
}
}
LJLIB_CF(debug_setlocal)
{
int arg;
lua_State *L1 = getthread(L, &arg);
lua_Debug ar;
TValue *tv;
if (!lua_getstack(L1, lj_lib_checkint(L, arg+1), &ar))
lj_err_arg(L, arg+1, LJ_ERR_LVLRNG);
tv = lj_lib_checkany(L, arg+3);
copyTV(L1, L1->top++, tv);
lua_pushstring(L, lua_setlocal(L1, &ar, lj_lib_checkint(L, arg+2)));
return 1;
}
static int debug_getupvalue(lua_State *L, int get)
{
int32_t n = lj_lib_checkint(L, 2);
const char *name;
lj_lib_checkfunc(L, 1);
name = get ? lua_getupvalue(L, 1, n) : lua_setupvalue(L, 1, n);
if (name) {
lua_pushstring(L, name);
if (!get) return 1;
copyTV(L, L->top, L->top-2);
L->top++;
return 2;
}
return 0;
}
LJLIB_CF(debug_getupvalue)
{
return debug_getupvalue(L, 1);
}
LJLIB_CF(debug_setupvalue)
{
lj_lib_checkany(L, 3);
return debug_getupvalue(L, 0);
}
LJLIB_CF(debug_upvalueid)
{
GCfunc *fn = lj_lib_checkfunc(L, 1);
int32_t n = lj_lib_checkint(L, 2) - 1;
if ((uint32_t)n >= fn->l.nupvalues)
lj_err_arg(L, 2, LJ_ERR_IDXRNG);
setlightudV(L->top-1, isluafunc(fn) ? (void *)gcref(fn->l.uvptr[n]) :
(void *)&fn->c.upvalue[n]);
return 1;
}
LJLIB_CF(debug_upvaluejoin)
{
GCfunc *fn[2];
GCRef *p[2];
int i;
for (i = 0; i < 2; i++) {
int32_t n;
fn[i] = lj_lib_checkfunc(L, 2*i+1);
if (!isluafunc(fn[i]))
lj_err_arg(L, 2*i+1, LJ_ERR_NOLFUNC);
n = lj_lib_checkint(L, 2*i+2) - 1;
if ((uint32_t)n >= fn[i]->l.nupvalues)
lj_err_arg(L, 2*i+2, LJ_ERR_IDXRNG);
p[i] = &fn[i]->l.uvptr[n];
}
setgcrefr(*p[0], *p[1]);
lj_gc_objbarrier(L, fn[0], gcref(*p[1]));
return 0;
}
#if LJ_52
LJLIB_CF(debug_getuservalue)
{
TValue *o = L->base;
if (o < L->top && tvisudata(o))
settabV(L, o, tabref(udataV(o)->env));
else
setnilV(o);
L->top = o+1;
return 1;
}
LJLIB_CF(debug_setuservalue)
{
TValue *o = L->base;
if (!(o < L->top && tvisudata(o)))
lj_err_argt(L, 1, LUA_TUSERDATA);
if (!(o+1 < L->top && tvistab(o+1)))
lj_err_argt(L, 2, LUA_TTABLE);
L->top = o+2;
lua_setfenv(L, 1);
return 1;
}
#endif
/* ------------------------------------------------------------------------ */
static const char KEY_HOOK = 'h';
static void hookf(lua_State *L, lua_Debug *ar)
{
static const char *const hooknames[] =
{"call", "return", "line", "count", "tail return"};
lua_pushlightuserdata(L, (void *)&KEY_HOOK);
lua_rawget(L, LUA_REGISTRYINDEX);
if (lua_isfunction(L, -1)) {
lua_pushstring(L, hooknames[(int)ar->event]);
if (ar->currentline >= 0)
lua_pushinteger(L, ar->currentline);
else lua_pushnil(L);
lua_call(L, 2, 0);
}
}
static int makemask(const char *smask, int count)
{
int mask = 0;
if (strchr(smask, 'c')) mask |= LUA_MASKCALL;
if (strchr(smask, 'r')) mask |= LUA_MASKRET;
if (strchr(smask, 'l')) mask |= LUA_MASKLINE;
if (count > 0) mask |= LUA_MASKCOUNT;
return mask;
}
static char *unmakemask(int mask, char *smask)
{
int i = 0;
if (mask & LUA_MASKCALL) smask[i++] = 'c';
if (mask & LUA_MASKRET) smask[i++] = 'r';
if (mask & LUA_MASKLINE) smask[i++] = 'l';
smask[i] = '\0';
return smask;
}
LJLIB_CF(debug_sethook)
{
int arg, mask, count;
lua_Hook func;
(void)getthread(L, &arg);
if (lua_isnoneornil(L, arg+1)) {
lua_settop(L, arg+1);
func = NULL; mask = 0; count = 0; /* turn off hooks */
} else {
const char *smask = luaL_checkstring(L, arg+2);
luaL_checktype(L, arg+1, LUA_TFUNCTION);
count = luaL_optint(L, arg+3, 0);
func = hookf; mask = makemask(smask, count);
}
lua_pushlightuserdata(L, (void *)&KEY_HOOK);
lua_pushvalue(L, arg+1);
lua_rawset(L, LUA_REGISTRYINDEX);
lua_sethook(L, func, mask, count);
return 0;
}
LJLIB_CF(debug_gethook)
{
char buff[5];
int mask = lua_gethookmask(L);
lua_Hook hook = lua_gethook(L);
if (hook != NULL && hook != hookf) { /* external hook? */
lua_pushliteral(L, "external hook");
} else {
lua_pushlightuserdata(L, (void *)&KEY_HOOK);
lua_rawget(L, LUA_REGISTRYINDEX); /* get hook */
}
lua_pushstring(L, unmakemask(mask, buff));
lua_pushinteger(L, lua_gethookcount(L));
return 3;
}
/* ------------------------------------------------------------------------ */
LJLIB_CF(debug_debug)
{
for (;;) {
char buffer[250];
fputs("lua_debug> ", stderr);
if (fgets(buffer, sizeof(buffer), stdin) == 0 ||
strcmp(buffer, "cont\n") == 0)
return 0;
if (luaL_loadbuffer(L, buffer, strlen(buffer), "=(debug command)") ||
lua_pcall(L, 0, 0, 0)) {
fputs(lua_tostring(L, -1), stderr);
fputs("\n", stderr);
}
lua_settop(L, 0); /* remove eventual returns */
}
}
/* ------------------------------------------------------------------------ */
#define LEVELS1 12 /* size of the first part of the stack */
#define LEVELS2 10 /* size of the second part of the stack */
LJLIB_CF(debug_traceback)
{
int arg;
lua_State *L1 = getthread(L, &arg);
const char *msg = lua_tostring(L, arg+1);
if (msg == NULL && L->top > L->base+arg)
L->top = L->base+arg+1;
else
luaL_traceback(L, L1, msg, lj_lib_optint(L, arg+2, (L == L1)));
return 1;
}
/* ------------------------------------------------------------------------ */
#include "lj_libdef.h"
LUALIB_API int luaopen_debug(lua_State *L)
{
LJ_LIB_REG(L, LUA_DBLIBNAME, debug);
return 1;
}
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="utf-8" ?>
<controls:TestContentPage xmlns="http://xamarin.com/schemas/2014/forms"
xmlns:x="http://schemas.microsoft.com/winfx/2009/xaml"
xmlns:d="http://xamarin.com/schemas/2014/forms/design"
xmlns:mc="http://schemas.openxmlformats.org/markup-compatibility/2006"
xmlns:controls="clr-namespace:Xamarin.Forms.Controls"
mc:Ignorable="d"
x:Class="Xamarin.Forms.Controls.Issues.Issue7792"
Title="Issue 7792" >
<ContentPage.Content>
<StackLayout>
<Label
Text="If you can see the text of the Carousel EmptyView below, the test passes."/>
<CarouselView
BackgroundColor="Yellow"
ItemsSource="{Binding IteEmptyItemsms}"
EmptyView="No items to display (EmptyView)." />
</StackLayout>
</ContentPage.Content>
</controls:TestContentPage> | {
"pile_set_name": "Github"
} |
// Copyright 2011-2016 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.security.zynamics.binnavi.ZyGraph.Updaters.CodeNodes;
import com.google.common.base.Preconditions;
import com.google.security.zynamics.binnavi.disassembly.COperandTreeNode;
import com.google.security.zynamics.binnavi.disassembly.INaviOperandTreeNode;
import com.google.security.zynamics.binnavi.disassembly.INaviOperandTreeNodeListener;
import com.google.security.zynamics.binnavi.disassembly.OperandDisplayStyle;
import com.google.security.zynamics.binnavi.yfileswrap.zygraph.ZyGraph;
import com.google.security.zynamics.zylib.disassembly.IReference;
import com.google.security.zynamics.zylib.yfileswrap.gui.zygraph.realizers.IZyNodeRealizer;
/**
* Updates the code node on changes to the operands.
*/
public final class COperandUpdater implements INaviOperandTreeNodeListener {
/**
* Graph to update on changes.
*/
private final ZyGraph graph;
/**
* Realizer of the node to update.
*/
private IZyNodeRealizer nodeRealizer;
/**
* Creates a new updater object.
*
* @param graph Graph to update on changes.
*/
public COperandUpdater(final ZyGraph graph) {
this.graph = Preconditions.checkNotNull(graph, "Error: graph argument can not be null.");
}
/**
* Regenerates the content of the node and updates the graph view.
*/
private void rebuildNode() {
nodeRealizer.regenerate();
graph.updateViews();
}
@Override
public void addedReference(
final INaviOperandTreeNode operandTreeNode, final IReference reference) {
// References are not shown in code nodes => No rebuild necessary when a reference changes
}
@Override
public void changedDisplayStyle(
final COperandTreeNode operandTreeNode, final OperandDisplayStyle style) {
rebuildNode();
}
@Override
public void changedValue(final INaviOperandTreeNode operandTreeNode) {
rebuildNode();
}
@Override
public void removedReference(
final INaviOperandTreeNode operandTreeNode, final IReference reference) {
// References are not shown in code nodes => No rebuild necessary when a reference changes
}
/**
* Sets the realizer of the node to update.
*
* @param realizer The realizer of the node to update.
*/
public void setRealizer(final IZyNodeRealizer realizer) {
nodeRealizer = realizer;
}
}
| {
"pile_set_name": "Github"
} |
{
"name": "com.unity.multiplayer-hlapi.Editor",
"references": [
"com.unity.multiplayer-hlapi.Runtime",
"com.unity.multiplayer-weaver.Editor"
],
"includePlatforms": [
"Editor"
],
"excludePlatforms": []
}
| {
"pile_set_name": "Github"
} |
/*
* Copyright (C) 2019 Zilliqa
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#include "SWInfo.h"
#include <curl/curl.h>
#include <json/json.h>
#include "libMessage/MessengerSWInfo.h"
#include "libUtils/Logger.h"
using namespace std;
static const std::string ZILLIQA_RELEASE_TAG_URL(
"https://api.github.com/repos/Zilliqa/Zilliqa/tags");
SWInfo::SWInfo()
: m_zilliqaMajorVersion(0),
m_zilliqaMinorVersion(0),
m_zilliqaFixVersion(0),
m_zilliqaUpgradeDS(0),
m_zilliqaCommit(0),
m_scillaMajorVersion(0),
m_scillaMinorVersion(0),
m_scillaFixVersion(0),
m_scillaUpgradeDS(0),
m_scillaCommit(0) {}
SWInfo::SWInfo(const uint32_t& zilliqaMajorVersion,
const uint32_t& zilliqaMinorVersion,
const uint32_t& zilliqaFixVersion,
const uint64_t& zilliqaUpgradeDS, const uint32_t& zilliqaCommit,
const uint32_t& scillaMajorVersion,
const uint32_t& scillaMinorVersion,
const uint32_t& scillaFixVersion,
const uint64_t& scillaUpgradeDS, const uint32_t& scillaCommit)
: m_zilliqaMajorVersion(zilliqaMajorVersion),
m_zilliqaMinorVersion(zilliqaMinorVersion),
m_zilliqaFixVersion(zilliqaFixVersion),
m_zilliqaUpgradeDS(zilliqaUpgradeDS),
m_zilliqaCommit(zilliqaCommit),
m_scillaMajorVersion(scillaMajorVersion),
m_scillaMinorVersion(scillaMinorVersion),
m_scillaFixVersion(scillaFixVersion),
m_scillaUpgradeDS(scillaUpgradeDS),
m_scillaCommit(scillaCommit) {}
/// Implements the Serialize function inherited from Serializable.
unsigned int SWInfo::Serialize(bytes& dst, unsigned int offset) const {
// LOG_MARKER();
if ((offset + SIZE) > dst.size()) {
dst.resize(offset + SIZE);
}
unsigned int curOffset = offset;
SetNumber<uint32_t>(dst, curOffset, m_zilliqaMajorVersion, sizeof(uint32_t));
curOffset += sizeof(uint32_t);
SetNumber<uint32_t>(dst, curOffset, m_zilliqaMinorVersion, sizeof(uint32_t));
curOffset += sizeof(uint32_t);
SetNumber<uint32_t>(dst, curOffset, m_zilliqaFixVersion, sizeof(uint32_t));
curOffset += sizeof(uint32_t);
SetNumber<uint64_t>(dst, curOffset, m_zilliqaUpgradeDS, sizeof(uint64_t));
curOffset += sizeof(uint64_t);
SetNumber<uint32_t>(dst, curOffset, m_zilliqaCommit, sizeof(uint32_t));
curOffset += sizeof(uint32_t);
SetNumber<uint32_t>(dst, curOffset, m_scillaMajorVersion, sizeof(uint32_t));
curOffset += sizeof(uint32_t);
SetNumber<uint32_t>(dst, curOffset, m_scillaMinorVersion, sizeof(uint32_t));
curOffset += sizeof(uint32_t);
SetNumber<uint32_t>(dst, curOffset, m_scillaFixVersion, sizeof(uint32_t));
curOffset += sizeof(uint32_t);
SetNumber<uint64_t>(dst, curOffset, m_scillaUpgradeDS, sizeof(uint64_t));
curOffset += sizeof(uint64_t);
SetNumber<uint32_t>(dst, curOffset, m_scillaCommit, sizeof(uint32_t));
curOffset += sizeof(uint32_t);
return SIZE;
}
/// Implements the Deserialize function inherited from Serializable.
int SWInfo::Deserialize(const bytes& src, unsigned int offset) {
// LOG_MARKER();
unsigned int curOffset = offset;
try {
m_zilliqaMajorVersion =
GetNumber<uint32_t>(src, curOffset, sizeof(uint32_t));
curOffset += sizeof(uint32_t);
m_zilliqaMinorVersion =
GetNumber<uint32_t>(src, curOffset, sizeof(uint32_t));
curOffset += sizeof(uint32_t);
m_zilliqaFixVersion = GetNumber<uint32_t>(src, curOffset, sizeof(uint32_t));
curOffset += sizeof(uint32_t);
m_zilliqaUpgradeDS = GetNumber<uint64_t>(src, curOffset, sizeof(uint64_t));
curOffset += sizeof(uint64_t);
m_zilliqaCommit = GetNumber<uint32_t>(src, curOffset, sizeof(uint32_t));
curOffset += sizeof(uint32_t);
m_scillaMajorVersion =
GetNumber<uint32_t>(src, curOffset, sizeof(uint32_t));
curOffset += sizeof(uint32_t);
m_scillaMinorVersion =
GetNumber<uint32_t>(src, curOffset, sizeof(uint32_t));
curOffset += sizeof(uint32_t);
m_scillaFixVersion = GetNumber<uint32_t>(src, curOffset, sizeof(uint32_t));
curOffset += sizeof(uint32_t);
m_scillaUpgradeDS = GetNumber<uint64_t>(src, curOffset, sizeof(uint64_t));
curOffset += sizeof(uint64_t);
m_scillaCommit = GetNumber<uint32_t>(src, curOffset, sizeof(uint32_t));
curOffset += sizeof(uint32_t);
} catch (const std::exception& e) {
LOG_GENERAL(WARNING, "Error with SWInfo::Deserialize." << ' ' << e.what());
return -1;
}
return 0;
}
/// Less-than comparison operator.
bool SWInfo::operator<(const SWInfo& r) const {
return tie(m_zilliqaMajorVersion, m_zilliqaMinorVersion, m_zilliqaFixVersion,
m_zilliqaUpgradeDS, m_zilliqaCommit, m_scillaMajorVersion,
m_scillaMinorVersion, m_scillaFixVersion, m_scillaUpgradeDS,
m_scillaCommit) <
tie(r.m_zilliqaMajorVersion, r.m_zilliqaMinorVersion,
r.m_zilliqaFixVersion, r.m_zilliqaUpgradeDS, r.m_zilliqaCommit,
r.m_scillaMajorVersion, r.m_scillaMinorVersion,
r.m_scillaFixVersion, r.m_scillaUpgradeDS, r.m_scillaCommit);
}
/// Greater-than comparison operator.
bool SWInfo::operator>(const SWInfo& r) const { return r < *this; }
/// Equality operator.
bool SWInfo::operator==(const SWInfo& r) const {
return tie(m_zilliqaMajorVersion, m_zilliqaMinorVersion, m_zilliqaFixVersion,
m_zilliqaUpgradeDS, m_zilliqaCommit, m_scillaMajorVersion,
m_scillaMinorVersion, m_scillaFixVersion, m_scillaUpgradeDS,
m_scillaCommit) ==
tie(r.m_zilliqaMajorVersion, r.m_zilliqaMinorVersion,
r.m_zilliqaFixVersion, r.m_zilliqaUpgradeDS, r.m_zilliqaCommit,
r.m_scillaMajorVersion, r.m_scillaMinorVersion,
r.m_scillaFixVersion, r.m_scillaUpgradeDS, r.m_scillaCommit);
}
/// Unequality operator.
bool SWInfo::operator!=(const SWInfo& r) const { return !(*this == r); }
/// Getters.
const uint32_t& SWInfo::GetZilliqaMajorVersion() const {
return m_zilliqaMajorVersion;
}
const uint32_t& SWInfo::GetZilliqaMinorVersion() const {
return m_zilliqaMinorVersion;
}
const uint32_t& SWInfo::GetZilliqaFixVersion() const {
return m_zilliqaFixVersion;
}
const uint64_t& SWInfo::GetZilliqaUpgradeDS() const {
return m_zilliqaUpgradeDS;
}
const uint32_t& SWInfo::GetZilliqaCommit() const { return m_zilliqaCommit; }
const uint32_t& SWInfo::GetScillaMajorVersion() const {
return m_scillaMajorVersion;
}
const uint32_t& SWInfo::GetScillaMinorVersion() const {
return m_scillaMinorVersion;
}
const uint32_t& SWInfo::GetScillaFixVersion() const {
return m_scillaFixVersion;
}
const uint64_t& SWInfo::GetScillaUpgradeDS() const { return m_scillaUpgradeDS; }
const uint32_t& SWInfo::GetScillaCommit() const { return m_scillaCommit; }
static size_t WriteString(void* contents, size_t size, size_t nmemb,
void* userp) {
((string*)userp)->append((char*)contents, size * nmemb);
return size * nmemb;
}
bool SWInfo::IsLatestVersion() {
string curlRes;
auto curl = curl_easy_init();
curl_easy_setopt(curl, CURLOPT_VERBOSE, 1L);
curl_easy_setopt(curl, CURLOPT_URL, ZILLIQA_RELEASE_TAG_URL.c_str());
curl_easy_setopt(curl, CURLOPT_USERAGENT, "zilliqa");
curl_easy_setopt(curl, CURLOPT_USE_SSL, CURLUSESSL_ALL);
curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, WriteString);
curl_easy_setopt(curl, CURLOPT_WRITEDATA, &curlRes);
CURLcode res = curl_easy_perform(curl);
curl_easy_cleanup(curl);
if (res != CURLE_OK) {
LOG_GENERAL(WARNING,
"curl_easy_perform() failed to get latest release tag");
return false;
}
try {
Json::Value jsonValue;
std::string errors;
Json::CharReaderBuilder builder;
auto reader = std::unique_ptr<Json::CharReader>(builder.newCharReader());
if (!reader->parse(curlRes.c_str(), curlRes.c_str() + curlRes.size(),
&jsonValue, &errors)) {
LOG_GENERAL(WARNING,
"Failed to parse return result to json: " << curlRes);
LOG_GENERAL(WARNING, "Error: " << errors);
return false;
}
Json::Value jsonLatestTag = jsonValue[0];
std::string latestTag = jsonLatestTag["name"].asCString();
LOG_GENERAL(INFO, "The latest software version: " << latestTag);
if (VERSION_TAG < latestTag) {
return false;
}
} catch (const std::exception& e) {
LOG_GENERAL(WARNING,
"Failed to parse tag information, exception: " << e.what());
return false;
}
return true;
}
| {
"pile_set_name": "Github"
} |
{
"templateName":"微信模板",
"directories": "property",
"templateCode":"wechatSmsTemplate",
"templateKey":"templateId",
"templateKeyName":"ID",
"searchCode": "templateId",
"searchName": "ID",
"conditions": [
{
"name": "微信模板ID",
"inputType": "input",
"code": "smsTemplateId",
"whereCondition": "equal"
},
{
"name": "模板类型",
"inputType": "select",
"selectValue":"10001,10002,10003",
"selectValueName":"欠费催缴,停电通知,停水通知",
"code": "templateType",
"whereCondition": "equal"
},
{
"name": "模板编码",
"inputType": "input",
"code": "templateId",
"whereCondition": "equal"
}
],
"columns":[
{
"code":"templateType",
"cnCode":"模板类型",
"desc":"必填,请填写模板类型",
"required":true,
"hasDefaultValue":false,
"inputType": "select",
"selectValue":"10001,10002,10003",
"selectValueName":"欠费催缴,停电通知,停水通知",
"limit":"num",
"limitParam":"",
"limitErrInfo":"模板格式错误",
"show": true
},
{
"code": "smsTemplateId",
"cnCode":"微信模板ID",
"desc":"必填,请填写微信模板ID",
"required":true,
"hasDefaultValue":false,
"inputType": "input",
"limit":"maxLength",
"limitParam":"64",
"limitErrInfo":"微信模板ID太长",
"show": true
},
{
"code": "remark",
"cnCode":"说明",
"desc":"选填,请填写说明",
"required":false,
"hasDefaultValue":false,
"inputType": "input",
"limit":"maxLength",
"limitParam":"500",
"limitErrInfo":"说明不能超过500位",
"show": false
}
]
} | {
"pile_set_name": "Github"
} |
---
layout: postag
title: 'DET'
shortdef: 'determiner'
---
The English `DET` covers most cases of Penn Treebank DT, PDT, WDT. However, when a Penn Treebank word with one of these tags stands alone as a noun phrase rather than modifying another word, then it becomes `PRON`.
| {
"pile_set_name": "Github"
} |
/*
* Copyright (C) 2016 Austin English
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
*/
#ifndef __BLUETOOTHAPIS_H
#define __BLUETOOTHAPIS_H
#ifdef __cplusplus
extern "C" {
#endif
typedef ULONGLONG BTH_ADDR;
typedef struct _BLUETOOTH_ADDRESS {
union {
BTH_ADDR ullLong;
BYTE rgBytes[6];
} DUMMYUNIONNAME;
} BLUETOOTH_ADDRESS_STRUCT;
#define BLUETOOTH_ADDRESS BLUETOOTH_ADDRESS_STRUCT
#define BLUETOOTH_NULL_ADDRESS ((ULONGLONG) 0x0)
#define BLUETOOTH_MAX_NAME_SIZE (248)
#define BLUETOOTH_MAX_PASSKEY_SIZE (16)
#define BLUETOOTH_MAX_PASSKEY_BUFFER_SIZE (BLUETOOTH_MAX_PASSKEY_SIZE + 1)
#define BLUETOOTH_SERVICE_DISABLE 0x00
#define BLUETOOTH_SERVICE_ENABLE 0x01
#define BLUETOOTH_SERVICE_MASK (BLUETOOTH_ENABLE_SERVICE | BLUETOOTH_DISABLE_SERVICE)
typedef struct _BLUETOOTH_FIND_RADIO_PARAMS {
DWORD dwSize;
} BLUETOOTH_FIND_RADIO_PARAMS;
typedef struct _BLUETOOTH_RADIO_INFO {
DWORD dwSize;
BLUETOOTH_ADDRESS address;
WCHAR szName[BLUETOOTH_MAX_NAME_SIZE];
ULONG ulClassofDevice;
USHORT lmpSubversion;
USHORT manufacturer;
} BLUETOOTH_RADIO_INFO, *PBLUETOOTH_RADIO_INFO;
typedef struct _BLUETOOTH_DEVICE_INFO {
DWORD dwSize;
BLUETOOTH_ADDRESS Address;
ULONG ulClassofDevice;
BOOL fConnected;
BOOL fRemembered;
BOOL fAuthenticated;
SYSTEMTIME stLastSeen;
SYSTEMTIME stLastUsed;
WCHAR szName[BLUETOOTH_MAX_NAME_SIZE];
} BLUETOOTH_DEVICE_INFO, BLUETOOTH_DEVICE_INFO_STRUCT, *PBLUETOOTH_DEVICE_INFO;
typedef struct _BLUETOOTH_DEVICE_SEARCH_PARAMS {
DWORD dwSize;
BOOL fReturnAuthenticated;
BOOL fReturnRemembered;
BOOL fReturnUnknown;
BOOL fReturnConnected;
BOOL fIssueInquiry;
UCHAR cTimeoutMultiplier;
HANDLE hRadio;
} BLUETOOTH_DEVICE_SEARCH_PARAMS;
typedef HANDLE HBLUETOOTH_AUTHENTICATION_REGISTRATION;
typedef HANDLE HBLUETOOTH_CONTAINER_ELEMENT;
typedef HANDLE HBLUETOOTH_DEVICE_FIND;
typedef HANDLE HBLUETOOTH_RADIO_FIND;
typedef struct _BLUETOOTH_COD_PAIRS {
ULONG ulCODMask;
const WCHAR *pcszDescription;
} BLUETOOTH_COD_PAIRS;
typedef BOOL (WINAPI *PFN_DEVICE_CALLBACK)(void *pvParam, const BLUETOOTH_DEVICE_INFO *pDevice);
typedef struct _BLUETOOTH_SELECT_DEVICE_PARAMS {
DWORD dwSize;
ULONG cNumOfClasses;
BLUETOOTH_COD_PAIRS *prgClassOfDevices;
WCHAR *pszInfo;
HWND hwndParent;
BOOL fForceAuthentication;
BOOL fShowAuthenticated;
BOOL fShowRemembered;
BOOL fShowUnknown;
BOOL fAddNewDeviceWizard;
BOOL fSkipServicesPage;
PFN_DEVICE_CALLBACK pfnDeviceCallback;
void *pvParam;
DWORD cNumDevices;
PBLUETOOTH_DEVICE_INFO pDevices;
} BLUETOOTH_SELECT_DEVICE_PARAMS;
typedef BOOL (WINAPI *PFN_AUTHENTICATION_CALLBACK)(void *, PBLUETOOTH_DEVICE_INFO);
typedef struct _SDP_ELEMENT_DATA {
SDP_TYPE type;
SDP_SPECIFICTYPE specificType;
union {
SDP_LARGE_INTEGER_16 int128;
LONGLONG int64;
LONG int32;
SHORT int16;
CHAR int8;
SDP_ULARGE_INTEGER_16 uint128;
ULONGLONG uint64;
ULONG uint32;
USHORT uint16;
UCHAR uint8;
UCHAR booleanVal;
GUID uuid128;
ULONG uuid32;
USHORT uuid16;
struct {
BYTE *value;
ULONG length;
} string;
struct {
BYTE *value;
ULONG length;
} url;
struct {
BYTE *value;
ULONG length;
} sequence;
struct {
BYTE *value;
ULONG length;
} alternative;
} data;
} SDP_ELEMENT_DATA, *PSDP_ELEMENT_DATA;
typedef struct _SDP_STRING_TYPE_DATA {
USHORT encoding;
USHORT mibeNum;
USHORT attributeId;
} SDP_STRING_TYPE_DATA, *PSDP_STRING_TYPE_DATA;
typedef BOOL (CALLBACK *PFN_BLUETOOTH_ENUM_ATTRIBUTES_CALLBACK)(
ULONG uAttribId,
BYTE *pValueStream,
ULONG cbStreamSize,
void *pvParam);
DWORD WINAPI BluetoothAuthenticateDevice(HWND, HANDLE, BLUETOOTH_DEVICE_INFO *, WCHAR *, ULONG);
DWORD WINAPI BluetoothAuthenticateMultipleDevices(HWND, HANDLE, DWORD, BLUETOOTH_DEVICE_INFO *);
BOOL WINAPI BluetoothDisplayDeviceProperties(HWND, BLUETOOTH_DEVICE_INFO *);
BOOL WINAPI BluetoothEnableDiscovery(HANDLE, BOOL);
BOOL WINAPI BluetoothEnableIncomingConnections(HANDLE, BOOL);
DWORD WINAPI BluetoothEnumerateInstalledServices(HANDLE, BLUETOOTH_DEVICE_INFO *, DWORD *, GUID *);
BOOL WINAPI BluetoothFindDeviceClose(HBLUETOOTH_DEVICE_FIND);
HBLUETOOTH_DEVICE_FIND WINAPI BluetoothFindFirstDevice(BLUETOOTH_DEVICE_SEARCH_PARAMS *, BLUETOOTH_DEVICE_INFO *);
HBLUETOOTH_RADIO_FIND WINAPI BluetoothFindFirstRadio(BLUETOOTH_FIND_RADIO_PARAMS *, HANDLE *);
BOOL WINAPI BluetoothFindNextDevice(HBLUETOOTH_DEVICE_FIND, BLUETOOTH_DEVICE_INFO *);
BOOL WINAPI BluetoothFindNextRadio(HBLUETOOTH_RADIO_FIND, HANDLE *);
BOOL WINAPI BluetoothFindRadioClose(HBLUETOOTH_RADIO_FIND);
DWORD WINAPI BluetoothGetDeviceInfo(HANDLE, BLUETOOTH_DEVICE_INFO *);
DWORD WINAPI BluetoothGetRadioInfo(HANDLE, PBLUETOOTH_RADIO_INFO);
BOOL WINAPI BluetoothIsConnectable(HANDLE);
BOOL WINAPI BluetoothIsDiscoverable(HANDLE);
DWORD WINAPI BluetoothRegisterForAuthentication(BLUETOOTH_DEVICE_INFO *, HBLUETOOTH_AUTHENTICATION_REGISTRATION *, PFN_AUTHENTICATION_CALLBACK, void *);
DWORD WINAPI BluetoothRemoveDevice(BLUETOOTH_ADDRESS *);
#define BluetoothEnumAttributes BluetoothSdpEnumAttributes
BOOL WINAPI BluetoothSdpEnumAttributes(BYTE *, ULONG, PFN_BLUETOOTH_ENUM_ATTRIBUTES_CALLBACK, void *);
DWORD WINAPI BluetoothSdpGetAttributeValue(BYTE *, ULONG, USHORT, PSDP_ELEMENT_DATA);
DWORD WINAPI BluetoothSdpGetContainerElementData(BYTE *, ULONG, HBLUETOOTH_CONTAINER_ELEMENT *, PSDP_ELEMENT_DATA);
DWORD WINAPI BluetoothSdpGetElementData(BYTE *, ULONG, PSDP_ELEMENT_DATA);
DWORD WINAPI BluetoothSdpGetString(BYTE *, ULONG, PSDP_STRING_TYPE_DATA, USHORT, WCHAR *, ULONG *);
BOOL WINAPI BluetoothSelectDevices(BLUETOOTH_SELECT_DEVICE_PARAMS *);
BOOL WINAPI BluetoothSelectDevicesFree(BLUETOOTH_SELECT_DEVICE_PARAMS *);
DWORD WINAPI BluetoothSendAuthenticationResponse(HANDLE, BLUETOOTH_DEVICE_INFO *, WCHAR *);
DWORD WINAPI BluetoothSetServiceState(HANDLE, BLUETOOTH_DEVICE_INFO *, GUID *, DWORD);
BOOL WINAPI BluetoothUnregisterAuthentication(HBLUETOOTH_AUTHENTICATION_REGISTRATION);
DWORD WINAPI BluetoothUpdateDeviceRecord(BLUETOOTH_DEVICE_INFO *);
#ifdef __cplusplus
}
#endif
#endif /* __BLUETOOTHAPIS_H */
| {
"pile_set_name": "Github"
} |
<?php
/*
* Copyright (c) 2011-2015 Lp digital system
*
* This file is part of BackBee.
*
* BackBee is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* BackBee is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with BackBee. If not, see <http://www.gnu.org/licenses/>.
*
* @author Charles Rouillon <[email protected]>
*/
namespace BackBee\ClassContent\Exception;
use BackBee\Exception\BBException;
/**
* ClassContent exceptions.
*
* Error codes defined are :
*
* * UNKNOWN_PROPERTY : the property does not exist for the content
* * UNKNOWN_METHOD : the method does not exist for the content
* * REVISION_OUTOFDATE : the revision is out of date
* * REVISION_ORPHAN : the revision is orphan
* * REVISION_UPTODATE : the revision is already up to date
* * REVISION_CONFLICTED : the revision is on conflict
* * REVISION_ADDED : the revision is aleready added
* * UNMATCH_REVISION : the revision does not match the content
* * REVISION_MISSING : none revision defined for the content
* * REVISION_UNLOADED : the revision is unloaded
* * MALFORMED_PARAM : the parameter is malformed
*
* @category BackBee
*
* @copyright Lp digital system
* @author c.rouillon <[email protected]>
*/
class ClassContentException extends BBException
{
/**
* The property does not exist for the content.
*
* @var int
*/
const UNKNOWN_PROPERTY = 3001;
/**
* The method does not exist for the content.
*
* @var int
*/
const UNKNOWN_METHOD = 3002;
/**
* The revision is out of date.
*
* @var int
*/
const REVISION_OUTOFDATE = 3003;
/**
* The revision is orphan (the content does not exist anymore).
*
* @var int
*/
const REVISION_ORPHAN = 3004;
/**
* The revision is already up to date.
*
* @var int
*/
const REVISION_UPTODATE = 3005;
/**
* The revision is conflicted.
*
* @var int
*/
const REVISION_CONFLICTED = 3006;
/**
* The revision is already added.
*
* @var int
*/
const REVISION_ADDED = 3007;
/**
* The revision does not match the content.
*
* @var int
*/
const UNMATCH_REVISION = 3008;
/**
* None revision defined for the content.
*
* @var int
*/
const REVISION_MISSING = 3009;
/**
* The revision is unloaded.
*
* @var int
*/
const REVISION_UNLOADED = 3010;
/**
* The parameter is malformed.
*
* @var int
*/
const MALFORMED_PARAM = 3011;
}
| {
"pile_set_name": "Github"
} |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package groovy.util;
import org.codehaus.groovy.runtime.typehandling.DefaultTypeTransformation;
import org.codehaus.groovy.runtime.ScriptBytecodeAdapter;
import org.codehaus.groovy.runtime.DefaultGroovyMethods;
import java.util.*;
/**
* A Collections utility class
*
* @author Paul King
* @author Jim White
*/
public class GroovyCollections {
/**
* Finds all combinations of items from the given collections.
*
* @param collections the given collections
* @return a List of the combinations found
* @see #combinations(Collection)
*/
public static List combinations(Object[] collections) {
return combinations((Iterable)Arrays.asList(collections));
}
/**
* Finds all non-null subsequences of a list.
* E.g. <code>subsequences([1, 2, 3])</code> would be:
* [[1, 2, 3], [1, 3], [2, 3], [1, 2], [1], [2], [3]]
*
* @param items the List of items
* @return the subsequences from items
*/
public static <T> Set<List<T>> subsequences(List<T> items) {
// items.inject([]){ ss, h -> ss.collect { it + [h] } + ss + [[h]] }
Set<List<T>> ans = new HashSet<List<T>>();
for (T h : items) {
Set<List<T>> next = new HashSet<List<T>>();
for (List<T> it : ans) {
List<T> sublist = new ArrayList<T>(it);
sublist.add(h);
next.add(sublist);
}
next.addAll(ans);
List<T> hlist = new ArrayList<T>();
hlist.add(h);
next.add(hlist);
ans = next;
}
return ans;
}
/**
* @param collections the given collections
* @deprecated use combinations(Iterable)
*/
@Deprecated
public static List combinations(Collection collections) {
return combinations((Iterable)collections);
}
/**
* Finds all combinations of items from the given Iterable aggregate of collections.
* So, <code>combinations([[true, false], [true, false]])</code>
* is <code>[[true, true], [false, true], [true, false], [false, false]]</code>
* and <code>combinations([['a', 'b'],[1, 2, 3]])</code>
* is <code>[['a', 1], ['b', 1], ['a', 2], ['b', 2], ['a', 3], ['b', 3]]</code>.
* If a non-collection item is given, it is treated as a singleton collection,
* i.e. <code>combinations([[1, 2], 'x'])</code> is <code>[[1, 'x'], [2, 'x']]</code>.
*
* @param collections the Iterable of given collections
* @return a List of the combinations found
* @since 2.2.0
*/
public static List combinations(Iterable collections) {
List collectedCombos = new ArrayList();
for (Object collection : collections) {
Iterable items = DefaultTypeTransformation.asCollection(collection);
if (collectedCombos.isEmpty()) {
for (Object item : items) {
List l = new ArrayList();
l.add(item);
collectedCombos.add(l);
}
} else {
List savedCombos = new ArrayList(collectedCombos);
List newCombos = new ArrayList();
for (Object value : items) {
for (Object savedCombo : savedCombos) {
List oldList = new ArrayList((List) savedCombo);
oldList.add(value);
newCombos.add(oldList);
}
}
collectedCombos = newCombos;
}
}
return collectedCombos;
}
/**
* Transposes an array of lists.
*
* @param lists the given lists
* @return a List of the transposed lists
* @see #transpose(List)
*/
public static List transpose(Object[] lists) {
return transpose(Arrays.asList(lists));
}
/**
* Transposes the given lists.
* So, <code>transpose([['a', 'b'], [1, 2]])</code>
* is <code>[['a', 1], ['b', 2]]</code> and
* <code>transpose([['a', 'b', 'c']])</code>
* is <code>[['a'], ['b'], ['c']]</code>.
*
* @param lists the given lists
* @return a List of the transposed lists
*/
public static List transpose(List lists) {
List result = new ArrayList();
if (lists.isEmpty() || lists.size() == 0) return result;
int minSize = Integer.MAX_VALUE;
for (Object listLike : lists) {
List list = (List) DefaultTypeTransformation.castToType(listLike, List.class);
if (list.size() < minSize) minSize = list.size();
}
if (minSize == 0) return result;
for (int i = 0; i < minSize; i++) {
result.add(new ArrayList());
}
for (Object listLike : lists) {
List list = (List) DefaultTypeTransformation.castToType(listLike, List.class);
for (int i = 0; i < minSize; i++) {
List resultList = (List) result.get(i);
resultList.add(list.get(i));
}
}
return result;
}
/**
* Selects the minimum value found in an array of items, so
* min([2, 4, 6] as Object[]) == 2.
*
* @param items an array of items
* @return the minimum value
*/
public static <T> T min(T[] items) {
return min((Iterable<T>)Arrays.asList(items));
}
/**
* @deprecated use min(Iterable)
*/
@Deprecated
public static <T> T min(Collection<T> items) {
return min((Iterable<T>)items);
}
/**
* Selects the minimum value found in an Iterable of items.
*
* @param items an Iterable
* @return the minimum value
* @since 2.2.0
*/
public static <T> T min(Iterable<T> items) {
T answer = null;
for (T value : items) {
if (value != null) {
if (answer == null || ScriptBytecodeAdapter.compareLessThan(value, answer)) {
answer = value;
}
}
}
return answer;
}
/**
* Selects the maximum value found in an array of items, so
* min([2, 4, 6] as Object[]) == 6.
*
* @param items an array of items
* @return the maximum value
*/
public static <T> T max(T[] items) {
return max((Iterable<T>)Arrays.asList(items));
}
/**
* @deprecated use max(Iterable)
*/
@Deprecated
public static <T> T max(Collection<T> items) {
return max((Iterable<T>)items);
}
/**
* Selects the maximum value found in an Iterable.
*
* @param items a Collection
* @return the maximum value
* @since 2.2.0
*/
public static <T> T max(Iterable<T> items) {
T answer = null;
for (T value : items) {
if (value != null) {
if (answer == null || ScriptBytecodeAdapter.compareGreaterThan(value, answer)) {
answer = value;
}
}
}
return answer;
}
/**
* Sums all the items from an array of items.
*
* @param items an array of items
* @return the sum of the items
*/
public static Object sum(Object[] items) {
return sum((Iterable)Arrays.asList(items));
}
/**
* @deprecated use sum(Iterable)
*/
@Deprecated
public static Object sum(Collection items) {
return sum((Iterable)items);
}
/**
* Sums all the given items.
*
* @param items an Iterable of items
* @return the sum of the item
* @since 2.2.0
*/
public static Object sum(Iterable items) {
return DefaultGroovyMethods.sum(items);
}
}
| {
"pile_set_name": "Github"
} |
package ini
import (
"io"
"os"
"github.com/aws/aws-sdk-go/aws/awserr"
)
// OpenFile takes a path to a given file, and will open and parse
// that file.
func OpenFile(path string) (Sections, error) {
f, err := os.Open(path)
if err != nil {
return Sections{}, awserr.New(ErrCodeUnableToReadFile, "unable to open file", err)
}
defer f.Close()
return Parse(f)
}
// Parse will parse the given file using the shared config
// visitor.
func Parse(f io.Reader) (Sections, error) {
tree, err := ParseAST(f)
if err != nil {
return Sections{}, err
}
v := NewDefaultVisitor()
if err = Walk(tree, v); err != nil {
return Sections{}, err
}
return v.Sections, nil
}
// ParseBytes will parse the given bytes and return the parsed sections.
func ParseBytes(b []byte) (Sections, error) {
tree, err := ParseASTBytes(b)
if err != nil {
return Sections{}, err
}
v := NewDefaultVisitor()
if err = Walk(tree, v); err != nil {
return Sections{}, err
}
return v.Sections, nil
}
| {
"pile_set_name": "Github"
} |
fileFormatVersion: 2
guid: 4581c45ac4aa2264187087659a4cc252
timeCreated: 1460031632
licenseType: Store
MonoImporter:
serializedVersion: 2
defaultReferences: []
executionOrder: 0
icon: {instanceID: 0}
userData:
assetBundleName:
assetBundleVariant:
| {
"pile_set_name": "Github"
} |
/*
* Copyright 2012 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.gradle.api.internal.artifacts.repositories;
import org.gradle.api.NamedDomainObjectCollection;
import org.gradle.api.artifacts.repositories.ArtifactRepository;
import org.gradle.util.DeprecationLogger;
public abstract class AbstractArtifactRepository implements ArtifactRepositoryInternal, ResolutionAwareRepository, PublicationAwareRepository {
private String name;
private boolean isPartOfContainer;
public void onAddToContainer(NamedDomainObjectCollection<ArtifactRepository> container) {
isPartOfContainer = true;
}
public String getName() {
return name;
}
public void setName(String name) {
if (isPartOfContainer) {
DeprecationLogger.nagUserOfDeprecated("Changing the name of an ArtifactRepository that is part of a container", "Set the name when creating the repository");
}
this.name = name;
}
}
| {
"pile_set_name": "Github"
} |
this one is truly interesting. in my opinion this is the best challenge of the whole CTF.
we seem to get a lot of functions, with a definite start(maze).
to find the goal we look at the strings near the other maze strings
'you are dead'
'you can't move that quickly'
and we find:
'you made it out alive!'
'the flag is the *shortest* path you must follow to survive'
so evidently we must solve the labyrninth.
 callgraph  polar plot
to do this we employ scripting with our Favorite Publicly-Available Open-Source Widely-Published Reverse Engineering and Disassembler tool.
<script.py>
```python
from collections import deque
class Graph(object):
def __init__(self,f):
self.file = f
def node(self,name):
self.file.write('node,%s\n' % (name))
def edge(self,nodeFrom, nodeTo):
self.file.write('edge,%s,%s\n' % (nodeFrom, nodeTo))
def add_vertex(g,name):
g.node(name)
def add_edge(g,nodeFrom, nodeTo):
g.edge(str(nodeFrom),str(nodeTo))
traversalDepth = 0
adrRngToNotTraverse = [] #(start,end) sections to not be traversed
def sectionsToNotTraverse(bv,sections):
global adrRngToNotTraverse
adrRngToNotTraverse = []
for section in sections:
sect = bv.sections.get(section)
if sect:
adrRngToNotTraverse.append((sect.start, sect.end))
def shouldNotBeTrav(bv, function):
for start,end in adrRngToNotTraverse:
if start <= function.start and end > function.start:
return True
return False
ignored = set(['__stack_chk_fail', 'getchar', 'time','puts'])
def goOn(bv,function,f):
g = Graph(f)
queue = deque() #(traversalLevel, function)
queue.append((0,function,[function],''))
visited = set()
while queue:
traverseLevel,func,path,directions = queue.popleft()
if func in visited:
continue
if shouldNotBeTrav(bv,func):
continue
if func.name == 'goal':
f.write('\n\n\nGOAL FOUND\n')
f.write(str(traverseLevel) + '\n')
f.write(func.name +'\n')
f.write(repr(map(lambda fn: fn.name, path)) + '\n')
f.write(directions + '\n')
log.log_alert("GOAL FOUND")
return
visited.add(func)
add_vertex(g,func.name)
if traversalDepth > 0 and traverseLevel>=traversalDepth:
continue
for block in func.low_level_il:
for il in block:
if il.operation != enums.LowLevelILOperation.LLIL_CALL: #and \
# il.operation!=enums.LowLevelILOperation.LLIL_JUMP_TO and \
# il.operation!=enums.LowLevelILOperation.LLIL_JUMP:
continue
callee = bv.get_functions_at(il.operands[0].value.value)[0]
if callee.name in ignored:
continue
move = '?'
if block.immediate_dominator:
cf_stmt = block.immediate_dominator[-1]
if cf_stmt.operation == LowLevelILOperation.LLIL_IF:
constant = cf_stmt.operands[0].operands[1].value.value
move = chr(constant)
if move not in 'NSEW':
move = '?'
# print str(il.operands[0].value) + ' %d %s %c' % (block.immediate_dominator.start, block.immediate_dominator[-1], move)
new_path = list(path)
new_path.append(callee)
queue.append((traverseLevel+1,callee,new_path,directions+move))
add_edge(g,func.name,callee.name)
f.write('search failed\n')
log.log_alert("Not found")
def go(bv,function):
global traversalDepth
sectionsDefault = [".plt"]
s = get_text_line_input("Enter sections to not be traversed, separated by comma.\nDefault one is '.plt'", "Sections to not traverse")
if s:
sections=s.split(",")
else:
sections=sectionsDefault
sectionsToNotTraverse(bv,sections)
d = get_text_line_input("Enter traversal depth, 0 for unlimited (default).", "Traversal depth")
if not d:
d="0"
traversalDepth = int(d)
with open("e:\\downloads\\out.txt", "w") as f:
goOn(bv,function,f)
```
explanation of the script:
We start at the maze, and we BFS through the callgraph looking for goal.
the first time goal is encountered, the path taken to get there will be minimal.
so we simply implement a simple interprocedural callgraph BFS using a FIFO worklist.
we also blacklist functions in every 'maze' function (`__stack_chk_fail`, `getchar`, `time`,`puts`) for efficiency.
at this point we have a path and a list of functions but we do not know what input is necessary to traverse the maze
such that those functions are visited. to do this we look at the immediate dominators of the blocks containing
the call instructions. In graph theory a block D is a dominator of block B if all paths from the starting node to B
must pass through D. in our case the start node is the function entry block and our graph is the CFG. the immediate
dominator is just the dominator adjacent to B. then we simply parse the conditional looking for the ascii value.
we put them all together to get the flag.
`flag{SSEEEESSWWWWSSEEEEEEEESSSSEENNEESSEENNNNNNWWNNNNEESSEENNEESSEESSSSWWNNWWSSSSEESSWWSSEEEESSSSWWNNWWWWSSEESSEEEESSSSSSEESSWWSSSSEEEEEEEESSWWSSSSEEEENNNNNNNNEESSSSSSSSEENNNNNNNNEEEEEESSWWWWSSSSEESSWWSSEEEENNEESSEEEENNWWNNNNEESSEEEEEEEESSWWWWSSSSEEEESSWWWWSSEEEESSWWSSSSEEEEEESSSSSSSS}`
tl;dr static analysis is important.
| {
"pile_set_name": "Github"
} |
// mkerrors.sh -m64
// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT
// +build amd64,openbsd
// Created by cgo -godefs - DO NOT EDIT
// cgo -godefs -- -m64 _const.go
package unix
import "syscall"
const (
AF_APPLETALK = 0x10
AF_BLUETOOTH = 0x20
AF_CCITT = 0xa
AF_CHAOS = 0x5
AF_CNT = 0x15
AF_COIP = 0x14
AF_DATAKIT = 0x9
AF_DECnet = 0xc
AF_DLI = 0xd
AF_E164 = 0x1a
AF_ECMA = 0x8
AF_ENCAP = 0x1c
AF_HYLINK = 0xf
AF_IMPLINK = 0x3
AF_INET = 0x2
AF_INET6 = 0x18
AF_IPX = 0x17
AF_ISDN = 0x1a
AF_ISO = 0x7
AF_KEY = 0x1e
AF_LAT = 0xe
AF_LINK = 0x12
AF_LOCAL = 0x1
AF_MAX = 0x24
AF_MPLS = 0x21
AF_NATM = 0x1b
AF_NS = 0x6
AF_OSI = 0x7
AF_PUP = 0x4
AF_ROUTE = 0x11
AF_SIP = 0x1d
AF_SNA = 0xb
AF_UNIX = 0x1
AF_UNSPEC = 0x0
ARPHRD_ETHER = 0x1
ARPHRD_FRELAY = 0xf
ARPHRD_IEEE1394 = 0x18
ARPHRD_IEEE802 = 0x6
B0 = 0x0
B110 = 0x6e
B115200 = 0x1c200
B1200 = 0x4b0
B134 = 0x86
B14400 = 0x3840
B150 = 0x96
B1800 = 0x708
B19200 = 0x4b00
B200 = 0xc8
B230400 = 0x38400
B2400 = 0x960
B28800 = 0x7080
B300 = 0x12c
B38400 = 0x9600
B4800 = 0x12c0
B50 = 0x32
B57600 = 0xe100
B600 = 0x258
B7200 = 0x1c20
B75 = 0x4b
B76800 = 0x12c00
B9600 = 0x2580
BIOCFLUSH = 0x20004268
BIOCGBLEN = 0x40044266
BIOCGDIRFILT = 0x4004427c
BIOCGDLT = 0x4004426a
BIOCGDLTLIST = 0xc010427b
BIOCGETIF = 0x4020426b
BIOCGFILDROP = 0x40044278
BIOCGHDRCMPLT = 0x40044274
BIOCGRSIG = 0x40044273
BIOCGRTIMEOUT = 0x4010426e
BIOCGSTATS = 0x4008426f
BIOCIMMEDIATE = 0x80044270
BIOCLOCK = 0x20004276
BIOCPROMISC = 0x20004269
BIOCSBLEN = 0xc0044266
BIOCSDIRFILT = 0x8004427d
BIOCSDLT = 0x8004427a
BIOCSETF = 0x80104267
BIOCSETIF = 0x8020426c
BIOCSETWF = 0x80104277
BIOCSFILDROP = 0x80044279
BIOCSHDRCMPLT = 0x80044275
BIOCSRSIG = 0x80044272
BIOCSRTIMEOUT = 0x8010426d
BIOCVERSION = 0x40044271
BPF_A = 0x10
BPF_ABS = 0x20
BPF_ADD = 0x0
BPF_ALIGNMENT = 0x4
BPF_ALU = 0x4
BPF_AND = 0x50
BPF_B = 0x10
BPF_DIRECTION_IN = 0x1
BPF_DIRECTION_OUT = 0x2
BPF_DIV = 0x30
BPF_H = 0x8
BPF_IMM = 0x0
BPF_IND = 0x40
BPF_JA = 0x0
BPF_JEQ = 0x10
BPF_JGE = 0x30
BPF_JGT = 0x20
BPF_JMP = 0x5
BPF_JSET = 0x40
BPF_K = 0x0
BPF_LD = 0x0
BPF_LDX = 0x1
BPF_LEN = 0x80
BPF_LSH = 0x60
BPF_MAJOR_VERSION = 0x1
BPF_MAXBUFSIZE = 0x200000
BPF_MAXINSNS = 0x200
BPF_MEM = 0x60
BPF_MEMWORDS = 0x10
BPF_MINBUFSIZE = 0x20
BPF_MINOR_VERSION = 0x1
BPF_MISC = 0x7
BPF_MSH = 0xa0
BPF_MUL = 0x20
BPF_NEG = 0x80
BPF_OR = 0x40
BPF_RELEASE = 0x30bb6
BPF_RET = 0x6
BPF_RSH = 0x70
BPF_ST = 0x2
BPF_STX = 0x3
BPF_SUB = 0x10
BPF_TAX = 0x0
BPF_TXA = 0x80
BPF_W = 0x0
BPF_X = 0x8
BRKINT = 0x2
CFLUSH = 0xf
CLOCAL = 0x8000
CREAD = 0x800
CS5 = 0x0
CS6 = 0x100
CS7 = 0x200
CS8 = 0x300
CSIZE = 0x300
CSTART = 0x11
CSTATUS = 0xff
CSTOP = 0x13
CSTOPB = 0x400
CSUSP = 0x1a
CTL_MAXNAME = 0xc
CTL_NET = 0x4
DIOCOSFPFLUSH = 0x2000444e
DLT_ARCNET = 0x7
DLT_ATM_RFC1483 = 0xb
DLT_AX25 = 0x3
DLT_CHAOS = 0x5
DLT_C_HDLC = 0x68
DLT_EN10MB = 0x1
DLT_EN3MB = 0x2
DLT_ENC = 0xd
DLT_FDDI = 0xa
DLT_IEEE802 = 0x6
DLT_IEEE802_11 = 0x69
DLT_IEEE802_11_RADIO = 0x7f
DLT_LOOP = 0xc
DLT_MPLS = 0xdb
DLT_NULL = 0x0
DLT_PFLOG = 0x75
DLT_PFSYNC = 0x12
DLT_PPP = 0x9
DLT_PPP_BSDOS = 0x10
DLT_PPP_ETHER = 0x33
DLT_PPP_SERIAL = 0x32
DLT_PRONET = 0x4
DLT_RAW = 0xe
DLT_SLIP = 0x8
DLT_SLIP_BSDOS = 0xf
DT_BLK = 0x6
DT_CHR = 0x2
DT_DIR = 0x4
DT_FIFO = 0x1
DT_LNK = 0xa
DT_REG = 0x8
DT_SOCK = 0xc
DT_UNKNOWN = 0x0
ECHO = 0x8
ECHOCTL = 0x40
ECHOE = 0x2
ECHOK = 0x4
ECHOKE = 0x1
ECHONL = 0x10
ECHOPRT = 0x20
EMT_TAGOVF = 0x1
EMUL_ENABLED = 0x1
EMUL_NATIVE = 0x2
ENDRUNDISC = 0x9
ETHERMIN = 0x2e
ETHERMTU = 0x5dc
ETHERTYPE_8023 = 0x4
ETHERTYPE_AARP = 0x80f3
ETHERTYPE_ACCTON = 0x8390
ETHERTYPE_AEONIC = 0x8036
ETHERTYPE_ALPHA = 0x814a
ETHERTYPE_AMBER = 0x6008
ETHERTYPE_AMOEBA = 0x8145
ETHERTYPE_AOE = 0x88a2
ETHERTYPE_APOLLO = 0x80f7
ETHERTYPE_APOLLODOMAIN = 0x8019
ETHERTYPE_APPLETALK = 0x809b
ETHERTYPE_APPLITEK = 0x80c7
ETHERTYPE_ARGONAUT = 0x803a
ETHERTYPE_ARP = 0x806
ETHERTYPE_AT = 0x809b
ETHERTYPE_ATALK = 0x809b
ETHERTYPE_ATOMIC = 0x86df
ETHERTYPE_ATT = 0x8069
ETHERTYPE_ATTSTANFORD = 0x8008
ETHERTYPE_AUTOPHON = 0x806a
ETHERTYPE_AXIS = 0x8856
ETHERTYPE_BCLOOP = 0x9003
ETHERTYPE_BOFL = 0x8102
ETHERTYPE_CABLETRON = 0x7034
ETHERTYPE_CHAOS = 0x804
ETHERTYPE_COMDESIGN = 0x806c
ETHERTYPE_COMPUGRAPHIC = 0x806d
ETHERTYPE_COUNTERPOINT = 0x8062
ETHERTYPE_CRONUS = 0x8004
ETHERTYPE_CRONUSVLN = 0x8003
ETHERTYPE_DCA = 0x1234
ETHERTYPE_DDE = 0x807b
ETHERTYPE_DEBNI = 0xaaaa
ETHERTYPE_DECAM = 0x8048
ETHERTYPE_DECCUST = 0x6006
ETHERTYPE_DECDIAG = 0x6005
ETHERTYPE_DECDNS = 0x803c
ETHERTYPE_DECDTS = 0x803e
ETHERTYPE_DECEXPER = 0x6000
ETHERTYPE_DECLAST = 0x8041
ETHERTYPE_DECLTM = 0x803f
ETHERTYPE_DECMUMPS = 0x6009
ETHERTYPE_DECNETBIOS = 0x8040
ETHERTYPE_DELTACON = 0x86de
ETHERTYPE_DIDDLE = 0x4321
ETHERTYPE_DLOG1 = 0x660
ETHERTYPE_DLOG2 = 0x661
ETHERTYPE_DN = 0x6003
ETHERTYPE_DOGFIGHT = 0x1989
ETHERTYPE_DSMD = 0x8039
ETHERTYPE_ECMA = 0x803
ETHERTYPE_ENCRYPT = 0x803d
ETHERTYPE_ES = 0x805d
ETHERTYPE_EXCELAN = 0x8010
ETHERTYPE_EXPERDATA = 0x8049
ETHERTYPE_FLIP = 0x8146
ETHERTYPE_FLOWCONTROL = 0x8808
ETHERTYPE_FRARP = 0x808
ETHERTYPE_GENDYN = 0x8068
ETHERTYPE_HAYES = 0x8130
ETHERTYPE_HIPPI_FP = 0x8180
ETHERTYPE_HITACHI = 0x8820
ETHERTYPE_HP = 0x8005
ETHERTYPE_IEEEPUP = 0xa00
ETHERTYPE_IEEEPUPAT = 0xa01
ETHERTYPE_IMLBL = 0x4c42
ETHERTYPE_IMLBLDIAG = 0x424c
ETHERTYPE_IP = 0x800
ETHERTYPE_IPAS = 0x876c
ETHERTYPE_IPV6 = 0x86dd
ETHERTYPE_IPX = 0x8137
ETHERTYPE_IPXNEW = 0x8037
ETHERTYPE_KALPANA = 0x8582
ETHERTYPE_LANBRIDGE = 0x8038
ETHERTYPE_LANPROBE = 0x8888
ETHERTYPE_LAT = 0x6004
ETHERTYPE_LBACK = 0x9000
ETHERTYPE_LITTLE = 0x8060
ETHERTYPE_LLDP = 0x88cc
ETHERTYPE_LOGICRAFT = 0x8148
ETHERTYPE_LOOPBACK = 0x9000
ETHERTYPE_MATRA = 0x807a
ETHERTYPE_MAX = 0xffff
ETHERTYPE_MERIT = 0x807c
ETHERTYPE_MICP = 0x873a
ETHERTYPE_MOPDL = 0x6001
ETHERTYPE_MOPRC = 0x6002
ETHERTYPE_MOTOROLA = 0x818d
ETHERTYPE_MPLS = 0x8847
ETHERTYPE_MPLS_MCAST = 0x8848
ETHERTYPE_MUMPS = 0x813f
ETHERTYPE_NBPCC = 0x3c04
ETHERTYPE_NBPCLAIM = 0x3c09
ETHERTYPE_NBPCLREQ = 0x3c05
ETHERTYPE_NBPCLRSP = 0x3c06
ETHERTYPE_NBPCREQ = 0x3c02
ETHERTYPE_NBPCRSP = 0x3c03
ETHERTYPE_NBPDG = 0x3c07
ETHERTYPE_NBPDGB = 0x3c08
ETHERTYPE_NBPDLTE = 0x3c0a
ETHERTYPE_NBPRAR = 0x3c0c
ETHERTYPE_NBPRAS = 0x3c0b
ETHERTYPE_NBPRST = 0x3c0d
ETHERTYPE_NBPSCD = 0x3c01
ETHERTYPE_NBPVCD = 0x3c00
ETHERTYPE_NBS = 0x802
ETHERTYPE_NCD = 0x8149
ETHERTYPE_NESTAR = 0x8006
ETHERTYPE_NETBEUI = 0x8191
ETHERTYPE_NOVELL = 0x8138
ETHERTYPE_NS = 0x600
ETHERTYPE_NSAT = 0x601
ETHERTYPE_NSCOMPAT = 0x807
ETHERTYPE_NTRAILER = 0x10
ETHERTYPE_OS9 = 0x7007
ETHERTYPE_OS9NET = 0x7009
ETHERTYPE_PACER = 0x80c6
ETHERTYPE_PAE = 0x888e
ETHERTYPE_PCS = 0x4242
ETHERTYPE_PLANNING = 0x8044
ETHERTYPE_PPP = 0x880b
ETHERTYPE_PPPOE = 0x8864
ETHERTYPE_PPPOEDISC = 0x8863
ETHERTYPE_PRIMENTS = 0x7031
ETHERTYPE_PUP = 0x200
ETHERTYPE_PUPAT = 0x200
ETHERTYPE_QINQ = 0x88a8
ETHERTYPE_RACAL = 0x7030
ETHERTYPE_RATIONAL = 0x8150
ETHERTYPE_RAWFR = 0x6559
ETHERTYPE_RCL = 0x1995
ETHERTYPE_RDP = 0x8739
ETHERTYPE_RETIX = 0x80f2
ETHERTYPE_REVARP = 0x8035
ETHERTYPE_SCA = 0x6007
ETHERTYPE_SECTRA = 0x86db
ETHERTYPE_SECUREDATA = 0x876d
ETHERTYPE_SGITW = 0x817e
ETHERTYPE_SG_BOUNCE = 0x8016
ETHERTYPE_SG_DIAG = 0x8013
ETHERTYPE_SG_NETGAMES = 0x8014
ETHERTYPE_SG_RESV = 0x8015
ETHERTYPE_SIMNET = 0x5208
ETHERTYPE_SLOW = 0x8809
ETHERTYPE_SNA = 0x80d5
ETHERTYPE_SNMP = 0x814c
ETHERTYPE_SONIX = 0xfaf5
ETHERTYPE_SPIDER = 0x809f
ETHERTYPE_SPRITE = 0x500
ETHERTYPE_STP = 0x8181
ETHERTYPE_TALARIS = 0x812b
ETHERTYPE_TALARISMC = 0x852b
ETHERTYPE_TCPCOMP = 0x876b
ETHERTYPE_TCPSM = 0x9002
ETHERTYPE_TEC = 0x814f
ETHERTYPE_TIGAN = 0x802f
ETHERTYPE_TRAIL = 0x1000
ETHERTYPE_TRANSETHER = 0x6558
ETHERTYPE_TYMSHARE = 0x802e
ETHERTYPE_UBBST = 0x7005
ETHERTYPE_UBDEBUG = 0x900
ETHERTYPE_UBDIAGLOOP = 0x7002
ETHERTYPE_UBDL = 0x7000
ETHERTYPE_UBNIU = 0x7001
ETHERTYPE_UBNMC = 0x7003
ETHERTYPE_VALID = 0x1600
ETHERTYPE_VARIAN = 0x80dd
ETHERTYPE_VAXELN = 0x803b
ETHERTYPE_VEECO = 0x8067
ETHERTYPE_VEXP = 0x805b
ETHERTYPE_VGLAB = 0x8131
ETHERTYPE_VINES = 0xbad
ETHERTYPE_VINESECHO = 0xbaf
ETHERTYPE_VINESLOOP = 0xbae
ETHERTYPE_VITAL = 0xff00
ETHERTYPE_VLAN = 0x8100
ETHERTYPE_VLTLMAN = 0x8080
ETHERTYPE_VPROD = 0x805c
ETHERTYPE_VURESERVED = 0x8147
ETHERTYPE_WATERLOO = 0x8130
ETHERTYPE_WELLFLEET = 0x8103
ETHERTYPE_X25 = 0x805
ETHERTYPE_X75 = 0x801
ETHERTYPE_XNSSM = 0x9001
ETHERTYPE_XTP = 0x817d
ETHER_ADDR_LEN = 0x6
ETHER_ALIGN = 0x2
ETHER_CRC_LEN = 0x4
ETHER_CRC_POLY_BE = 0x4c11db6
ETHER_CRC_POLY_LE = 0xedb88320
ETHER_HDR_LEN = 0xe
ETHER_MAX_DIX_LEN = 0x600
ETHER_MAX_LEN = 0x5ee
ETHER_MIN_LEN = 0x40
ETHER_TYPE_LEN = 0x2
ETHER_VLAN_ENCAP_LEN = 0x4
EVFILT_AIO = -0x3
EVFILT_PROC = -0x5
EVFILT_READ = -0x1
EVFILT_SIGNAL = -0x6
EVFILT_SYSCOUNT = 0x7
EVFILT_TIMER = -0x7
EVFILT_VNODE = -0x4
EVFILT_WRITE = -0x2
EV_ADD = 0x1
EV_CLEAR = 0x20
EV_DELETE = 0x2
EV_DISABLE = 0x8
EV_ENABLE = 0x4
EV_EOF = 0x8000
EV_ERROR = 0x4000
EV_FLAG1 = 0x2000
EV_ONESHOT = 0x10
EV_SYSFLAGS = 0xf000
EXTA = 0x4b00
EXTB = 0x9600
EXTPROC = 0x800
FD_CLOEXEC = 0x1
FD_SETSIZE = 0x400
FLUSHO = 0x800000
F_DUPFD = 0x0
F_DUPFD_CLOEXEC = 0xa
F_GETFD = 0x1
F_GETFL = 0x3
F_GETLK = 0x7
F_GETOWN = 0x5
F_OK = 0x0
F_RDLCK = 0x1
F_SETFD = 0x2
F_SETFL = 0x4
F_SETLK = 0x8
F_SETLKW = 0x9
F_SETOWN = 0x6
F_UNLCK = 0x2
F_WRLCK = 0x3
HUPCL = 0x4000
ICANON = 0x100
ICMP6_FILTER = 0x12
ICRNL = 0x100
IEXTEN = 0x400
IFAN_ARRIVAL = 0x0
IFAN_DEPARTURE = 0x1
IFA_ROUTE = 0x1
IFF_ALLMULTI = 0x200
IFF_BROADCAST = 0x2
IFF_CANTCHANGE = 0x8e52
IFF_DEBUG = 0x4
IFF_LINK0 = 0x1000
IFF_LINK1 = 0x2000
IFF_LINK2 = 0x4000
IFF_LOOPBACK = 0x8
IFF_MULTICAST = 0x8000
IFF_NOARP = 0x80
IFF_NOTRAILERS = 0x20
IFF_OACTIVE = 0x400
IFF_POINTOPOINT = 0x10
IFF_PROMISC = 0x100
IFF_RUNNING = 0x40
IFF_SIMPLEX = 0x800
IFF_UP = 0x1
IFNAMSIZ = 0x10
IFT_1822 = 0x2
IFT_A12MPPSWITCH = 0x82
IFT_AAL2 = 0xbb
IFT_AAL5 = 0x31
IFT_ADSL = 0x5e
IFT_AFLANE8023 = 0x3b
IFT_AFLANE8025 = 0x3c
IFT_ARAP = 0x58
IFT_ARCNET = 0x23
IFT_ARCNETPLUS = 0x24
IFT_ASYNC = 0x54
IFT_ATM = 0x25
IFT_ATMDXI = 0x69
IFT_ATMFUNI = 0x6a
IFT_ATMIMA = 0x6b
IFT_ATMLOGICAL = 0x50
IFT_ATMRADIO = 0xbd
IFT_ATMSUBINTERFACE = 0x86
IFT_ATMVCIENDPT = 0xc2
IFT_ATMVIRTUAL = 0x95
IFT_BGPPOLICYACCOUNTING = 0xa2
IFT_BLUETOOTH = 0xf8
IFT_BRIDGE = 0xd1
IFT_BSC = 0x53
IFT_CARP = 0xf7
IFT_CCTEMUL = 0x3d
IFT_CEPT = 0x13
IFT_CES = 0x85
IFT_CHANNEL = 0x46
IFT_CNR = 0x55
IFT_COFFEE = 0x84
IFT_COMPOSITELINK = 0x9b
IFT_DCN = 0x8d
IFT_DIGITALPOWERLINE = 0x8a
IFT_DIGITALWRAPPEROVERHEADCHANNEL = 0xba
IFT_DLSW = 0x4a
IFT_DOCSCABLEDOWNSTREAM = 0x80
IFT_DOCSCABLEMACLAYER = 0x7f
IFT_DOCSCABLEUPSTREAM = 0x81
IFT_DOCSCABLEUPSTREAMCHANNEL = 0xcd
IFT_DS0 = 0x51
IFT_DS0BUNDLE = 0x52
IFT_DS1FDL = 0xaa
IFT_DS3 = 0x1e
IFT_DTM = 0x8c
IFT_DUMMY = 0xf1
IFT_DVBASILN = 0xac
IFT_DVBASIOUT = 0xad
IFT_DVBRCCDOWNSTREAM = 0x93
IFT_DVBRCCMACLAYER = 0x92
IFT_DVBRCCUPSTREAM = 0x94
IFT_ECONET = 0xce
IFT_ENC = 0xf4
IFT_EON = 0x19
IFT_EPLRS = 0x57
IFT_ESCON = 0x49
IFT_ETHER = 0x6
IFT_FAITH = 0xf3
IFT_FAST = 0x7d
IFT_FASTETHER = 0x3e
IFT_FASTETHERFX = 0x45
IFT_FDDI = 0xf
IFT_FIBRECHANNEL = 0x38
IFT_FRAMERELAYINTERCONNECT = 0x3a
IFT_FRAMERELAYMPI = 0x5c
IFT_FRDLCIENDPT = 0xc1
IFT_FRELAY = 0x20
IFT_FRELAYDCE = 0x2c
IFT_FRF16MFRBUNDLE = 0xa3
IFT_FRFORWARD = 0x9e
IFT_G703AT2MB = 0x43
IFT_G703AT64K = 0x42
IFT_GIF = 0xf0
IFT_GIGABITETHERNET = 0x75
IFT_GR303IDT = 0xb2
IFT_GR303RDT = 0xb1
IFT_H323GATEKEEPER = 0xa4
IFT_H323PROXY = 0xa5
IFT_HDH1822 = 0x3
IFT_HDLC = 0x76
IFT_HDSL2 = 0xa8
IFT_HIPERLAN2 = 0xb7
IFT_HIPPI = 0x2f
IFT_HIPPIINTERFACE = 0x39
IFT_HOSTPAD = 0x5a
IFT_HSSI = 0x2e
IFT_HY = 0xe
IFT_IBM370PARCHAN = 0x48
IFT_IDSL = 0x9a
IFT_IEEE1394 = 0x90
IFT_IEEE80211 = 0x47
IFT_IEEE80212 = 0x37
IFT_IEEE8023ADLAG = 0xa1
IFT_IFGSN = 0x91
IFT_IMT = 0xbe
IFT_INFINIBAND = 0xc7
IFT_INTERLEAVE = 0x7c
IFT_IP = 0x7e
IFT_IPFORWARD = 0x8e
IFT_IPOVERATM = 0x72
IFT_IPOVERCDLC = 0x6d
IFT_IPOVERCLAW = 0x6e
IFT_IPSWITCH = 0x4e
IFT_ISDN = 0x3f
IFT_ISDNBASIC = 0x14
IFT_ISDNPRIMARY = 0x15
IFT_ISDNS = 0x4b
IFT_ISDNU = 0x4c
IFT_ISO88022LLC = 0x29
IFT_ISO88023 = 0x7
IFT_ISO88024 = 0x8
IFT_ISO88025 = 0x9
IFT_ISO88025CRFPINT = 0x62
IFT_ISO88025DTR = 0x56
IFT_ISO88025FIBER = 0x73
IFT_ISO88026 = 0xa
IFT_ISUP = 0xb3
IFT_L2VLAN = 0x87
IFT_L3IPVLAN = 0x88
IFT_L3IPXVLAN = 0x89
IFT_LAPB = 0x10
IFT_LAPD = 0x4d
IFT_LAPF = 0x77
IFT_LINEGROUP = 0xd2
IFT_LOCALTALK = 0x2a
IFT_LOOP = 0x18
IFT_MEDIAMAILOVERIP = 0x8b
IFT_MFSIGLINK = 0xa7
IFT_MIOX25 = 0x26
IFT_MODEM = 0x30
IFT_MPC = 0x71
IFT_MPLS = 0xa6
IFT_MPLSTUNNEL = 0x96
IFT_MSDSL = 0x8f
IFT_MVL = 0xbf
IFT_MYRINET = 0x63
IFT_NFAS = 0xaf
IFT_NSIP = 0x1b
IFT_OPTICALCHANNEL = 0xc3
IFT_OPTICALTRANSPORT = 0xc4
IFT_OTHER = 0x1
IFT_P10 = 0xc
IFT_P80 = 0xd
IFT_PARA = 0x22
IFT_PFLOG = 0xf5
IFT_PFLOW = 0xf9
IFT_PFSYNC = 0xf6
IFT_PLC = 0xae
IFT_PON155 = 0xcf
IFT_PON622 = 0xd0
IFT_POS = 0xab
IFT_PPP = 0x17
IFT_PPPMULTILINKBUNDLE = 0x6c
IFT_PROPATM = 0xc5
IFT_PROPBWAP2MP = 0xb8
IFT_PROPCNLS = 0x59
IFT_PROPDOCSWIRELESSDOWNSTREAM = 0xb5
IFT_PROPDOCSWIRELESSMACLAYER = 0xb4
IFT_PROPDOCSWIRELESSUPSTREAM = 0xb6
IFT_PROPMUX = 0x36
IFT_PROPVIRTUAL = 0x35
IFT_PROPWIRELESSP2P = 0x9d
IFT_PTPSERIAL = 0x16
IFT_PVC = 0xf2
IFT_Q2931 = 0xc9
IFT_QLLC = 0x44
IFT_RADIOMAC = 0xbc
IFT_RADSL = 0x5f
IFT_REACHDSL = 0xc0
IFT_RFC1483 = 0x9f
IFT_RS232 = 0x21
IFT_RSRB = 0x4f
IFT_SDLC = 0x11
IFT_SDSL = 0x60
IFT_SHDSL = 0xa9
IFT_SIP = 0x1f
IFT_SIPSIG = 0xcc
IFT_SIPTG = 0xcb
IFT_SLIP = 0x1c
IFT_SMDSDXI = 0x2b
IFT_SMDSICIP = 0x34
IFT_SONET = 0x27
IFT_SONETOVERHEADCHANNEL = 0xb9
IFT_SONETPATH = 0x32
IFT_SONETVT = 0x33
IFT_SRP = 0x97
IFT_SS7SIGLINK = 0x9c
IFT_STACKTOSTACK = 0x6f
IFT_STARLAN = 0xb
IFT_T1 = 0x12
IFT_TDLC = 0x74
IFT_TELINK = 0xc8
IFT_TERMPAD = 0x5b
IFT_TR008 = 0xb0
IFT_TRANSPHDLC = 0x7b
IFT_TUNNEL = 0x83
IFT_ULTRA = 0x1d
IFT_USB = 0xa0
IFT_V11 = 0x40
IFT_V35 = 0x2d
IFT_V36 = 0x41
IFT_V37 = 0x78
IFT_VDSL = 0x61
IFT_VIRTUALIPADDRESS = 0x70
IFT_VIRTUALTG = 0xca
IFT_VOICEDID = 0xd5
IFT_VOICEEM = 0x64
IFT_VOICEEMFGD = 0xd3
IFT_VOICEENCAP = 0x67
IFT_VOICEFGDEANA = 0xd4
IFT_VOICEFXO = 0x65
IFT_VOICEFXS = 0x66
IFT_VOICEOVERATM = 0x98
IFT_VOICEOVERCABLE = 0xc6
IFT_VOICEOVERFRAMERELAY = 0x99
IFT_VOICEOVERIP = 0x68
IFT_X213 = 0x5d
IFT_X25 = 0x5
IFT_X25DDN = 0x4
IFT_X25HUNTGROUP = 0x7a
IFT_X25MLP = 0x79
IFT_X25PLE = 0x28
IFT_XETHER = 0x1a
IGNBRK = 0x1
IGNCR = 0x80
IGNPAR = 0x4
IMAXBEL = 0x2000
INLCR = 0x40
INPCK = 0x10
IN_CLASSA_HOST = 0xffffff
IN_CLASSA_MAX = 0x80
IN_CLASSA_NET = 0xff000000
IN_CLASSA_NSHIFT = 0x18
IN_CLASSB_HOST = 0xffff
IN_CLASSB_MAX = 0x10000
IN_CLASSB_NET = 0xffff0000
IN_CLASSB_NSHIFT = 0x10
IN_CLASSC_HOST = 0xff
IN_CLASSC_NET = 0xffffff00
IN_CLASSC_NSHIFT = 0x8
IN_CLASSD_HOST = 0xfffffff
IN_CLASSD_NET = 0xf0000000
IN_CLASSD_NSHIFT = 0x1c
IN_LOOPBACKNET = 0x7f
IN_RFC3021_HOST = 0x1
IN_RFC3021_NET = 0xfffffffe
IN_RFC3021_NSHIFT = 0x1f
IPPROTO_AH = 0x33
IPPROTO_CARP = 0x70
IPPROTO_DIVERT = 0x102
IPPROTO_DIVERT_INIT = 0x2
IPPROTO_DIVERT_RESP = 0x1
IPPROTO_DONE = 0x101
IPPROTO_DSTOPTS = 0x3c
IPPROTO_EGP = 0x8
IPPROTO_ENCAP = 0x62
IPPROTO_EON = 0x50
IPPROTO_ESP = 0x32
IPPROTO_ETHERIP = 0x61
IPPROTO_FRAGMENT = 0x2c
IPPROTO_GGP = 0x3
IPPROTO_GRE = 0x2f
IPPROTO_HOPOPTS = 0x0
IPPROTO_ICMP = 0x1
IPPROTO_ICMPV6 = 0x3a
IPPROTO_IDP = 0x16
IPPROTO_IGMP = 0x2
IPPROTO_IP = 0x0
IPPROTO_IPCOMP = 0x6c
IPPROTO_IPIP = 0x4
IPPROTO_IPV4 = 0x4
IPPROTO_IPV6 = 0x29
IPPROTO_MAX = 0x100
IPPROTO_MAXID = 0x103
IPPROTO_MOBILE = 0x37
IPPROTO_MPLS = 0x89
IPPROTO_NONE = 0x3b
IPPROTO_PFSYNC = 0xf0
IPPROTO_PIM = 0x67
IPPROTO_PUP = 0xc
IPPROTO_RAW = 0xff
IPPROTO_ROUTING = 0x2b
IPPROTO_RSVP = 0x2e
IPPROTO_TCP = 0x6
IPPROTO_TP = 0x1d
IPPROTO_UDP = 0x11
IPV6_AUTH_LEVEL = 0x35
IPV6_AUTOFLOWLABEL = 0x3b
IPV6_CHECKSUM = 0x1a
IPV6_DEFAULT_MULTICAST_HOPS = 0x1
IPV6_DEFAULT_MULTICAST_LOOP = 0x1
IPV6_DEFHLIM = 0x40
IPV6_DONTFRAG = 0x3e
IPV6_DSTOPTS = 0x32
IPV6_ESP_NETWORK_LEVEL = 0x37
IPV6_ESP_TRANS_LEVEL = 0x36
IPV6_FAITH = 0x1d
IPV6_FLOWINFO_MASK = 0xffffff0f
IPV6_FLOWLABEL_MASK = 0xffff0f00
IPV6_FRAGTTL = 0x78
IPV6_HLIMDEC = 0x1
IPV6_HOPLIMIT = 0x2f
IPV6_HOPOPTS = 0x31
IPV6_IPCOMP_LEVEL = 0x3c
IPV6_JOIN_GROUP = 0xc
IPV6_LEAVE_GROUP = 0xd
IPV6_MAXHLIM = 0xff
IPV6_MAXPACKET = 0xffff
IPV6_MMTU = 0x500
IPV6_MULTICAST_HOPS = 0xa
IPV6_MULTICAST_IF = 0x9
IPV6_MULTICAST_LOOP = 0xb
IPV6_NEXTHOP = 0x30
IPV6_OPTIONS = 0x1
IPV6_PATHMTU = 0x2c
IPV6_PIPEX = 0x3f
IPV6_PKTINFO = 0x2e
IPV6_PORTRANGE = 0xe
IPV6_PORTRANGE_DEFAULT = 0x0
IPV6_PORTRANGE_HIGH = 0x1
IPV6_PORTRANGE_LOW = 0x2
IPV6_RECVDSTOPTS = 0x28
IPV6_RECVDSTPORT = 0x40
IPV6_RECVHOPLIMIT = 0x25
IPV6_RECVHOPOPTS = 0x27
IPV6_RECVPATHMTU = 0x2b
IPV6_RECVPKTINFO = 0x24
IPV6_RECVRTHDR = 0x26
IPV6_RECVTCLASS = 0x39
IPV6_RTABLE = 0x1021
IPV6_RTHDR = 0x33
IPV6_RTHDRDSTOPTS = 0x23
IPV6_RTHDR_LOOSE = 0x0
IPV6_RTHDR_STRICT = 0x1
IPV6_RTHDR_TYPE_0 = 0x0
IPV6_SOCKOPT_RESERVED1 = 0x3
IPV6_TCLASS = 0x3d
IPV6_UNICAST_HOPS = 0x4
IPV6_USE_MIN_MTU = 0x2a
IPV6_V6ONLY = 0x1b
IPV6_VERSION = 0x60
IPV6_VERSION_MASK = 0xf0
IP_ADD_MEMBERSHIP = 0xc
IP_AUTH_LEVEL = 0x14
IP_DEFAULT_MULTICAST_LOOP = 0x1
IP_DEFAULT_MULTICAST_TTL = 0x1
IP_DF = 0x4000
IP_DIVERTFL = 0x1022
IP_DROP_MEMBERSHIP = 0xd
IP_ESP_NETWORK_LEVEL = 0x16
IP_ESP_TRANS_LEVEL = 0x15
IP_HDRINCL = 0x2
IP_IPCOMP_LEVEL = 0x1d
IP_IPSECFLOWINFO = 0x24
IP_IPSEC_LOCAL_AUTH = 0x1b
IP_IPSEC_LOCAL_CRED = 0x19
IP_IPSEC_LOCAL_ID = 0x17
IP_IPSEC_REMOTE_AUTH = 0x1c
IP_IPSEC_REMOTE_CRED = 0x1a
IP_IPSEC_REMOTE_ID = 0x18
IP_MAXPACKET = 0xffff
IP_MAX_MEMBERSHIPS = 0xfff
IP_MF = 0x2000
IP_MINTTL = 0x20
IP_MIN_MEMBERSHIPS = 0xf
IP_MSS = 0x240
IP_MULTICAST_IF = 0x9
IP_MULTICAST_LOOP = 0xb
IP_MULTICAST_TTL = 0xa
IP_OFFMASK = 0x1fff
IP_OPTIONS = 0x1
IP_PIPEX = 0x22
IP_PORTRANGE = 0x13
IP_PORTRANGE_DEFAULT = 0x0
IP_PORTRANGE_HIGH = 0x1
IP_PORTRANGE_LOW = 0x2
IP_RECVDSTADDR = 0x7
IP_RECVDSTPORT = 0x21
IP_RECVIF = 0x1e
IP_RECVOPTS = 0x5
IP_RECVRETOPTS = 0x6
IP_RECVRTABLE = 0x23
IP_RECVTTL = 0x1f
IP_RETOPTS = 0x8
IP_RF = 0x8000
IP_RTABLE = 0x1021
IP_TOS = 0x3
IP_TTL = 0x4
ISIG = 0x80
ISTRIP = 0x20
IXANY = 0x800
IXOFF = 0x400
IXON = 0x200
LCNT_OVERLOAD_FLUSH = 0x6
LOCK_EX = 0x2
LOCK_NB = 0x4
LOCK_SH = 0x1
LOCK_UN = 0x8
MADV_DONTNEED = 0x4
MADV_FREE = 0x6
MADV_NORMAL = 0x0
MADV_RANDOM = 0x1
MADV_SEQUENTIAL = 0x2
MADV_SPACEAVAIL = 0x5
MADV_WILLNEED = 0x3
MAP_ANON = 0x1000
MAP_COPY = 0x4
MAP_FILE = 0x0
MAP_FIXED = 0x10
MAP_FLAGMASK = 0x1ff7
MAP_HASSEMAPHORE = 0x200
MAP_INHERIT = 0x80
MAP_INHERIT_COPY = 0x1
MAP_INHERIT_DONATE_COPY = 0x3
MAP_INHERIT_NONE = 0x2
MAP_INHERIT_SHARE = 0x0
MAP_NOEXTEND = 0x100
MAP_NORESERVE = 0x40
MAP_PRIVATE = 0x2
MAP_RENAME = 0x20
MAP_SHARED = 0x1
MAP_TRYFIXED = 0x400
MCL_CURRENT = 0x1
MCL_FUTURE = 0x2
MSG_BCAST = 0x100
MSG_CTRUNC = 0x20
MSG_DONTROUTE = 0x4
MSG_DONTWAIT = 0x80
MSG_EOR = 0x8
MSG_MCAST = 0x200
MSG_NOSIGNAL = 0x400
MSG_OOB = 0x1
MSG_PEEK = 0x2
MSG_TRUNC = 0x10
MSG_WAITALL = 0x40
MS_ASYNC = 0x1
MS_INVALIDATE = 0x4
MS_SYNC = 0x2
NAME_MAX = 0xff
NET_RT_DUMP = 0x1
NET_RT_FLAGS = 0x2
NET_RT_IFLIST = 0x3
NET_RT_MAXID = 0x6
NET_RT_STATS = 0x4
NET_RT_TABLE = 0x5
NOFLSH = 0x80000000
NOTE_ATTRIB = 0x8
NOTE_CHILD = 0x4
NOTE_DELETE = 0x1
NOTE_EOF = 0x2
NOTE_EXEC = 0x20000000
NOTE_EXIT = 0x80000000
NOTE_EXTEND = 0x4
NOTE_FORK = 0x40000000
NOTE_LINK = 0x10
NOTE_LOWAT = 0x1
NOTE_PCTRLMASK = 0xf0000000
NOTE_PDATAMASK = 0xfffff
NOTE_RENAME = 0x20
NOTE_REVOKE = 0x40
NOTE_TRACK = 0x1
NOTE_TRACKERR = 0x2
NOTE_TRUNCATE = 0x80
NOTE_WRITE = 0x2
OCRNL = 0x10
ONLCR = 0x2
ONLRET = 0x80
ONOCR = 0x40
ONOEOT = 0x8
OPOST = 0x1
O_ACCMODE = 0x3
O_APPEND = 0x8
O_ASYNC = 0x40
O_CLOEXEC = 0x10000
O_CREAT = 0x200
O_DIRECTORY = 0x20000
O_DSYNC = 0x80
O_EXCL = 0x800
O_EXLOCK = 0x20
O_FSYNC = 0x80
O_NDELAY = 0x4
O_NOCTTY = 0x8000
O_NOFOLLOW = 0x100
O_NONBLOCK = 0x4
O_RDONLY = 0x0
O_RDWR = 0x2
O_RSYNC = 0x80
O_SHLOCK = 0x10
O_SYNC = 0x80
O_TRUNC = 0x400
O_WRONLY = 0x1
PARENB = 0x1000
PARMRK = 0x8
PARODD = 0x2000
PENDIN = 0x20000000
PF_FLUSH = 0x1
PRIO_PGRP = 0x1
PRIO_PROCESS = 0x0
PRIO_USER = 0x2
PROT_EXEC = 0x4
PROT_NONE = 0x0
PROT_READ = 0x1
PROT_WRITE = 0x2
RLIMIT_CORE = 0x4
RLIMIT_CPU = 0x0
RLIMIT_DATA = 0x2
RLIMIT_FSIZE = 0x1
RLIMIT_NOFILE = 0x8
RLIMIT_STACK = 0x3
RLIM_INFINITY = 0x7fffffffffffffff
RTAX_AUTHOR = 0x6
RTAX_BRD = 0x7
RTAX_DST = 0x0
RTAX_GATEWAY = 0x1
RTAX_GENMASK = 0x3
RTAX_IFA = 0x5
RTAX_IFP = 0x4
RTAX_LABEL = 0xa
RTAX_MAX = 0xb
RTAX_NETMASK = 0x2
RTAX_SRC = 0x8
RTAX_SRCMASK = 0x9
RTA_AUTHOR = 0x40
RTA_BRD = 0x80
RTA_DST = 0x1
RTA_GATEWAY = 0x2
RTA_GENMASK = 0x8
RTA_IFA = 0x20
RTA_IFP = 0x10
RTA_LABEL = 0x400
RTA_NETMASK = 0x4
RTA_SRC = 0x100
RTA_SRCMASK = 0x200
RTF_ANNOUNCE = 0x4000
RTF_BLACKHOLE = 0x1000
RTF_CLONED = 0x10000
RTF_CLONING = 0x100
RTF_DONE = 0x40
RTF_DYNAMIC = 0x10
RTF_FMASK = 0x10f808
RTF_GATEWAY = 0x2
RTF_HOST = 0x4
RTF_LLINFO = 0x400
RTF_MASK = 0x80
RTF_MODIFIED = 0x20
RTF_MPATH = 0x40000
RTF_MPLS = 0x100000
RTF_PERMANENT_ARP = 0x2000
RTF_PROTO1 = 0x8000
RTF_PROTO2 = 0x4000
RTF_PROTO3 = 0x2000
RTF_REJECT = 0x8
RTF_SOURCE = 0x20000
RTF_STATIC = 0x800
RTF_TUNNEL = 0x100000
RTF_UP = 0x1
RTF_USETRAILERS = 0x8000
RTF_XRESOLVE = 0x200
RTM_ADD = 0x1
RTM_CHANGE = 0x3
RTM_DELADDR = 0xd
RTM_DELETE = 0x2
RTM_DESYNC = 0x10
RTM_GET = 0x4
RTM_IFANNOUNCE = 0xf
RTM_IFINFO = 0xe
RTM_LOCK = 0x8
RTM_LOSING = 0x5
RTM_MAXSIZE = 0x800
RTM_MISS = 0x7
RTM_NEWADDR = 0xc
RTM_REDIRECT = 0x6
RTM_RESOLVE = 0xb
RTM_RTTUNIT = 0xf4240
RTM_VERSION = 0x5
RTV_EXPIRE = 0x4
RTV_HOPCOUNT = 0x2
RTV_MTU = 0x1
RTV_RPIPE = 0x8
RTV_RTT = 0x40
RTV_RTTVAR = 0x80
RTV_SPIPE = 0x10
RTV_SSTHRESH = 0x20
RT_TABLEID_MAX = 0xff
RUSAGE_CHILDREN = -0x1
RUSAGE_SELF = 0x0
RUSAGE_THREAD = 0x1
SCM_RIGHTS = 0x1
SCM_TIMESTAMP = 0x4
SHUT_RD = 0x0
SHUT_RDWR = 0x2
SHUT_WR = 0x1
SIOCADDMULTI = 0x80206931
SIOCAIFADDR = 0x8040691a
SIOCAIFGROUP = 0x80286987
SIOCALIFADDR = 0x8218691c
SIOCATMARK = 0x40047307
SIOCBRDGADD = 0x8058693c
SIOCBRDGADDS = 0x80586941
SIOCBRDGARL = 0x806e694d
SIOCBRDGDADDR = 0x81286947
SIOCBRDGDEL = 0x8058693d
SIOCBRDGDELS = 0x80586942
SIOCBRDGFLUSH = 0x80586948
SIOCBRDGFRL = 0x806e694e
SIOCBRDGGCACHE = 0xc0146941
SIOCBRDGGFD = 0xc0146952
SIOCBRDGGHT = 0xc0146951
SIOCBRDGGIFFLGS = 0xc058693e
SIOCBRDGGMA = 0xc0146953
SIOCBRDGGPARAM = 0xc0406958
SIOCBRDGGPRI = 0xc0146950
SIOCBRDGGRL = 0xc030694f
SIOCBRDGGSIFS = 0xc058693c
SIOCBRDGGTO = 0xc0146946
SIOCBRDGIFS = 0xc0586942
SIOCBRDGRTS = 0xc0206943
SIOCBRDGSADDR = 0xc1286944
SIOCBRDGSCACHE = 0x80146940
SIOCBRDGSFD = 0x80146952
SIOCBRDGSHT = 0x80146951
SIOCBRDGSIFCOST = 0x80586955
SIOCBRDGSIFFLGS = 0x8058693f
SIOCBRDGSIFPRIO = 0x80586954
SIOCBRDGSMA = 0x80146953
SIOCBRDGSPRI = 0x80146950
SIOCBRDGSPROTO = 0x8014695a
SIOCBRDGSTO = 0x80146945
SIOCBRDGSTXHC = 0x80146959
SIOCDELMULTI = 0x80206932
SIOCDIFADDR = 0x80206919
SIOCDIFGROUP = 0x80286989
SIOCDIFPHYADDR = 0x80206949
SIOCDLIFADDR = 0x8218691e
SIOCGETKALIVE = 0xc01869a4
SIOCGETLABEL = 0x8020699a
SIOCGETPFLOW = 0xc02069fe
SIOCGETPFSYNC = 0xc02069f8
SIOCGETSGCNT = 0xc0207534
SIOCGETVIFCNT = 0xc0287533
SIOCGETVLAN = 0xc0206990
SIOCGHIWAT = 0x40047301
SIOCGIFADDR = 0xc0206921
SIOCGIFASYNCMAP = 0xc020697c
SIOCGIFBRDADDR = 0xc0206923
SIOCGIFCONF = 0xc0106924
SIOCGIFDATA = 0xc020691b
SIOCGIFDESCR = 0xc0206981
SIOCGIFDSTADDR = 0xc0206922
SIOCGIFFLAGS = 0xc0206911
SIOCGIFGATTR = 0xc028698b
SIOCGIFGENERIC = 0xc020693a
SIOCGIFGMEMB = 0xc028698a
SIOCGIFGROUP = 0xc0286988
SIOCGIFHARDMTU = 0xc02069a5
SIOCGIFMEDIA = 0xc0306936
SIOCGIFMETRIC = 0xc0206917
SIOCGIFMTU = 0xc020697e
SIOCGIFNETMASK = 0xc0206925
SIOCGIFPDSTADDR = 0xc0206948
SIOCGIFPRIORITY = 0xc020699c
SIOCGIFPSRCADDR = 0xc0206947
SIOCGIFRDOMAIN = 0xc02069a0
SIOCGIFRTLABEL = 0xc0206983
SIOCGIFTIMESLOT = 0xc0206986
SIOCGIFXFLAGS = 0xc020699e
SIOCGLIFADDR = 0xc218691d
SIOCGLIFPHYADDR = 0xc218694b
SIOCGLIFPHYRTABLE = 0xc02069a2
SIOCGLIFPHYTTL = 0xc02069a9
SIOCGLOWAT = 0x40047303
SIOCGPGRP = 0x40047309
SIOCGSPPPPARAMS = 0xc0206994
SIOCGVH = 0xc02069f6
SIOCGVNETID = 0xc02069a7
SIOCIFCREATE = 0x8020697a
SIOCIFDESTROY = 0x80206979
SIOCIFGCLONERS = 0xc0106978
SIOCSETKALIVE = 0x801869a3
SIOCSETLABEL = 0x80206999
SIOCSETPFLOW = 0x802069fd
SIOCSETPFSYNC = 0x802069f7
SIOCSETVLAN = 0x8020698f
SIOCSHIWAT = 0x80047300
SIOCSIFADDR = 0x8020690c
SIOCSIFASYNCMAP = 0x8020697d
SIOCSIFBRDADDR = 0x80206913
SIOCSIFDESCR = 0x80206980
SIOCSIFDSTADDR = 0x8020690e
SIOCSIFFLAGS = 0x80206910
SIOCSIFGATTR = 0x8028698c
SIOCSIFGENERIC = 0x80206939
SIOCSIFLLADDR = 0x8020691f
SIOCSIFMEDIA = 0xc0206935
SIOCSIFMETRIC = 0x80206918
SIOCSIFMTU = 0x8020697f
SIOCSIFNETMASK = 0x80206916
SIOCSIFPHYADDR = 0x80406946
SIOCSIFPRIORITY = 0x8020699b
SIOCSIFRDOMAIN = 0x8020699f
SIOCSIFRTLABEL = 0x80206982
SIOCSIFTIMESLOT = 0x80206985
SIOCSIFXFLAGS = 0x8020699d
SIOCSLIFPHYADDR = 0x8218694a
SIOCSLIFPHYRTABLE = 0x802069a1
SIOCSLIFPHYTTL = 0x802069a8
SIOCSLOWAT = 0x80047302
SIOCSPGRP = 0x80047308
SIOCSSPPPPARAMS = 0x80206993
SIOCSVH = 0xc02069f5
SIOCSVNETID = 0x802069a6
SOCK_DGRAM = 0x2
SOCK_RAW = 0x3
SOCK_RDM = 0x4
SOCK_SEQPACKET = 0x5
SOCK_STREAM = 0x1
SOL_SOCKET = 0xffff
SOMAXCONN = 0x80
SO_ACCEPTCONN = 0x2
SO_BINDANY = 0x1000
SO_BROADCAST = 0x20
SO_DEBUG = 0x1
SO_DONTROUTE = 0x10
SO_ERROR = 0x1007
SO_KEEPALIVE = 0x8
SO_LINGER = 0x80
SO_NETPROC = 0x1020
SO_OOBINLINE = 0x100
SO_PEERCRED = 0x1022
SO_RCVBUF = 0x1002
SO_RCVLOWAT = 0x1004
SO_RCVTIMEO = 0x1006
SO_REUSEADDR = 0x4
SO_REUSEPORT = 0x200
SO_RTABLE = 0x1021
SO_SNDBUF = 0x1001
SO_SNDLOWAT = 0x1003
SO_SNDTIMEO = 0x1005
SO_SPLICE = 0x1023
SO_TIMESTAMP = 0x800
SO_TYPE = 0x1008
SO_USELOOPBACK = 0x40
TCIFLUSH = 0x1
TCIOFLUSH = 0x3
TCOFLUSH = 0x2
TCP_MAXBURST = 0x4
TCP_MAXSEG = 0x2
TCP_MAXWIN = 0xffff
TCP_MAX_SACK = 0x3
TCP_MAX_WINSHIFT = 0xe
TCP_MD5SIG = 0x4
TCP_MSS = 0x200
TCP_NODELAY = 0x1
TCP_NOPUSH = 0x10
TCP_NSTATES = 0xb
TCP_SACK_ENABLE = 0x8
TCSAFLUSH = 0x2
TIOCCBRK = 0x2000747a
TIOCCDTR = 0x20007478
TIOCCONS = 0x80047462
TIOCDRAIN = 0x2000745e
TIOCEXCL = 0x2000740d
TIOCEXT = 0x80047460
TIOCFLAG_CLOCAL = 0x2
TIOCFLAG_CRTSCTS = 0x4
TIOCFLAG_MDMBUF = 0x8
TIOCFLAG_PPS = 0x10
TIOCFLAG_SOFTCAR = 0x1
TIOCFLUSH = 0x80047410
TIOCGETA = 0x402c7413
TIOCGETD = 0x4004741a
TIOCGFLAGS = 0x4004745d
TIOCGPGRP = 0x40047477
TIOCGSID = 0x40047463
TIOCGTSTAMP = 0x4010745b
TIOCGWINSZ = 0x40087468
TIOCMBIC = 0x8004746b
TIOCMBIS = 0x8004746c
TIOCMGET = 0x4004746a
TIOCMODG = 0x4004746a
TIOCMODS = 0x8004746d
TIOCMSET = 0x8004746d
TIOCM_CAR = 0x40
TIOCM_CD = 0x40
TIOCM_CTS = 0x20
TIOCM_DSR = 0x100
TIOCM_DTR = 0x2
TIOCM_LE = 0x1
TIOCM_RI = 0x80
TIOCM_RNG = 0x80
TIOCM_RTS = 0x4
TIOCM_SR = 0x10
TIOCM_ST = 0x8
TIOCNOTTY = 0x20007471
TIOCNXCL = 0x2000740e
TIOCOUTQ = 0x40047473
TIOCPKT = 0x80047470
TIOCPKT_DATA = 0x0
TIOCPKT_DOSTOP = 0x20
TIOCPKT_FLUSHREAD = 0x1
TIOCPKT_FLUSHWRITE = 0x2
TIOCPKT_IOCTL = 0x40
TIOCPKT_NOSTOP = 0x10
TIOCPKT_START = 0x8
TIOCPKT_STOP = 0x4
TIOCREMOTE = 0x80047469
TIOCSBRK = 0x2000747b
TIOCSCTTY = 0x20007461
TIOCSDTR = 0x20007479
TIOCSETA = 0x802c7414
TIOCSETAF = 0x802c7416
TIOCSETAW = 0x802c7415
TIOCSETD = 0x8004741b
TIOCSFLAGS = 0x8004745c
TIOCSIG = 0x8004745f
TIOCSPGRP = 0x80047476
TIOCSTART = 0x2000746e
TIOCSTAT = 0x80047465
TIOCSTI = 0x80017472
TIOCSTOP = 0x2000746f
TIOCSTSTAMP = 0x8008745a
TIOCSWINSZ = 0x80087467
TIOCUCNTL = 0x80047466
TOSTOP = 0x400000
VDISCARD = 0xf
VDSUSP = 0xb
VEOF = 0x0
VEOL = 0x1
VEOL2 = 0x2
VERASE = 0x3
VINTR = 0x8
VKILL = 0x5
VLNEXT = 0xe
VMIN = 0x10
VQUIT = 0x9
VREPRINT = 0x6
VSTART = 0xc
VSTATUS = 0x12
VSTOP = 0xd
VSUSP = 0xa
VTIME = 0x11
VWERASE = 0x4
WALTSIG = 0x4
WCONTINUED = 0x8
WCOREFLAG = 0x80
WNOHANG = 0x1
WSTOPPED = 0x7f
WUNTRACED = 0x2
)
// Errors
const (
E2BIG = syscall.Errno(0x7)
EACCES = syscall.Errno(0xd)
EADDRINUSE = syscall.Errno(0x30)
EADDRNOTAVAIL = syscall.Errno(0x31)
EAFNOSUPPORT = syscall.Errno(0x2f)
EAGAIN = syscall.Errno(0x23)
EALREADY = syscall.Errno(0x25)
EAUTH = syscall.Errno(0x50)
EBADF = syscall.Errno(0x9)
EBADRPC = syscall.Errno(0x48)
EBUSY = syscall.Errno(0x10)
ECANCELED = syscall.Errno(0x58)
ECHILD = syscall.Errno(0xa)
ECONNABORTED = syscall.Errno(0x35)
ECONNREFUSED = syscall.Errno(0x3d)
ECONNRESET = syscall.Errno(0x36)
EDEADLK = syscall.Errno(0xb)
EDESTADDRREQ = syscall.Errno(0x27)
EDOM = syscall.Errno(0x21)
EDQUOT = syscall.Errno(0x45)
EEXIST = syscall.Errno(0x11)
EFAULT = syscall.Errno(0xe)
EFBIG = syscall.Errno(0x1b)
EFTYPE = syscall.Errno(0x4f)
EHOSTDOWN = syscall.Errno(0x40)
EHOSTUNREACH = syscall.Errno(0x41)
EIDRM = syscall.Errno(0x59)
EILSEQ = syscall.Errno(0x54)
EINPROGRESS = syscall.Errno(0x24)
EINTR = syscall.Errno(0x4)
EINVAL = syscall.Errno(0x16)
EIO = syscall.Errno(0x5)
EIPSEC = syscall.Errno(0x52)
EISCONN = syscall.Errno(0x38)
EISDIR = syscall.Errno(0x15)
ELAST = syscall.Errno(0x5b)
ELOOP = syscall.Errno(0x3e)
EMEDIUMTYPE = syscall.Errno(0x56)
EMFILE = syscall.Errno(0x18)
EMLINK = syscall.Errno(0x1f)
EMSGSIZE = syscall.Errno(0x28)
ENAMETOOLONG = syscall.Errno(0x3f)
ENEEDAUTH = syscall.Errno(0x51)
ENETDOWN = syscall.Errno(0x32)
ENETRESET = syscall.Errno(0x34)
ENETUNREACH = syscall.Errno(0x33)
ENFILE = syscall.Errno(0x17)
ENOATTR = syscall.Errno(0x53)
ENOBUFS = syscall.Errno(0x37)
ENODEV = syscall.Errno(0x13)
ENOENT = syscall.Errno(0x2)
ENOEXEC = syscall.Errno(0x8)
ENOLCK = syscall.Errno(0x4d)
ENOMEDIUM = syscall.Errno(0x55)
ENOMEM = syscall.Errno(0xc)
ENOMSG = syscall.Errno(0x5a)
ENOPROTOOPT = syscall.Errno(0x2a)
ENOSPC = syscall.Errno(0x1c)
ENOSYS = syscall.Errno(0x4e)
ENOTBLK = syscall.Errno(0xf)
ENOTCONN = syscall.Errno(0x39)
ENOTDIR = syscall.Errno(0x14)
ENOTEMPTY = syscall.Errno(0x42)
ENOTSOCK = syscall.Errno(0x26)
ENOTSUP = syscall.Errno(0x5b)
ENOTTY = syscall.Errno(0x19)
ENXIO = syscall.Errno(0x6)
EOPNOTSUPP = syscall.Errno(0x2d)
EOVERFLOW = syscall.Errno(0x57)
EPERM = syscall.Errno(0x1)
EPFNOSUPPORT = syscall.Errno(0x2e)
EPIPE = syscall.Errno(0x20)
EPROCLIM = syscall.Errno(0x43)
EPROCUNAVAIL = syscall.Errno(0x4c)
EPROGMISMATCH = syscall.Errno(0x4b)
EPROGUNAVAIL = syscall.Errno(0x4a)
EPROTONOSUPPORT = syscall.Errno(0x2b)
EPROTOTYPE = syscall.Errno(0x29)
ERANGE = syscall.Errno(0x22)
EREMOTE = syscall.Errno(0x47)
EROFS = syscall.Errno(0x1e)
ERPCMISMATCH = syscall.Errno(0x49)
ESHUTDOWN = syscall.Errno(0x3a)
ESOCKTNOSUPPORT = syscall.Errno(0x2c)
ESPIPE = syscall.Errno(0x1d)
ESRCH = syscall.Errno(0x3)
ESTALE = syscall.Errno(0x46)
ETIMEDOUT = syscall.Errno(0x3c)
ETOOMANYREFS = syscall.Errno(0x3b)
ETXTBSY = syscall.Errno(0x1a)
EUSERS = syscall.Errno(0x44)
EWOULDBLOCK = syscall.Errno(0x23)
EXDEV = syscall.Errno(0x12)
)
// Signals
const (
SIGABRT = syscall.Signal(0x6)
SIGALRM = syscall.Signal(0xe)
SIGBUS = syscall.Signal(0xa)
SIGCHLD = syscall.Signal(0x14)
SIGCONT = syscall.Signal(0x13)
SIGEMT = syscall.Signal(0x7)
SIGFPE = syscall.Signal(0x8)
SIGHUP = syscall.Signal(0x1)
SIGILL = syscall.Signal(0x4)
SIGINFO = syscall.Signal(0x1d)
SIGINT = syscall.Signal(0x2)
SIGIO = syscall.Signal(0x17)
SIGIOT = syscall.Signal(0x6)
SIGKILL = syscall.Signal(0x9)
SIGPIPE = syscall.Signal(0xd)
SIGPROF = syscall.Signal(0x1b)
SIGQUIT = syscall.Signal(0x3)
SIGSEGV = syscall.Signal(0xb)
SIGSTOP = syscall.Signal(0x11)
SIGSYS = syscall.Signal(0xc)
SIGTERM = syscall.Signal(0xf)
SIGTHR = syscall.Signal(0x20)
SIGTRAP = syscall.Signal(0x5)
SIGTSTP = syscall.Signal(0x12)
SIGTTIN = syscall.Signal(0x15)
SIGTTOU = syscall.Signal(0x16)
SIGURG = syscall.Signal(0x10)
SIGUSR1 = syscall.Signal(0x1e)
SIGUSR2 = syscall.Signal(0x1f)
SIGVTALRM = syscall.Signal(0x1a)
SIGWINCH = syscall.Signal(0x1c)
SIGXCPU = syscall.Signal(0x18)
SIGXFSZ = syscall.Signal(0x19)
)
// Error table
var errors = [...]string{
1: "operation not permitted",
2: "no such file or directory",
3: "no such process",
4: "interrupted system call",
5: "input/output error",
6: "device not configured",
7: "argument list too long",
8: "exec format error",
9: "bad file descriptor",
10: "no child processes",
11: "resource deadlock avoided",
12: "cannot allocate memory",
13: "permission denied",
14: "bad address",
15: "block device required",
16: "device busy",
17: "file exists",
18: "cross-device link",
19: "operation not supported by device",
20: "not a directory",
21: "is a directory",
22: "invalid argument",
23: "too many open files in system",
24: "too many open files",
25: "inappropriate ioctl for device",
26: "text file busy",
27: "file too large",
28: "no space left on device",
29: "illegal seek",
30: "read-only file system",
31: "too many links",
32: "broken pipe",
33: "numerical argument out of domain",
34: "result too large",
35: "resource temporarily unavailable",
36: "operation now in progress",
37: "operation already in progress",
38: "socket operation on non-socket",
39: "destination address required",
40: "message too long",
41: "protocol wrong type for socket",
42: "protocol not available",
43: "protocol not supported",
44: "socket type not supported",
45: "operation not supported",
46: "protocol family not supported",
47: "address family not supported by protocol family",
48: "address already in use",
49: "can't assign requested address",
50: "network is down",
51: "network is unreachable",
52: "network dropped connection on reset",
53: "software caused connection abort",
54: "connection reset by peer",
55: "no buffer space available",
56: "socket is already connected",
57: "socket is not connected",
58: "can't send after socket shutdown",
59: "too many references: can't splice",
60: "connection timed out",
61: "connection refused",
62: "too many levels of symbolic links",
63: "file name too long",
64: "host is down",
65: "no route to host",
66: "directory not empty",
67: "too many processes",
68: "too many users",
69: "disc quota exceeded",
70: "stale NFS file handle",
71: "too many levels of remote in path",
72: "RPC struct is bad",
73: "RPC version wrong",
74: "RPC prog. not avail",
75: "program version wrong",
76: "bad procedure for program",
77: "no locks available",
78: "function not implemented",
79: "inappropriate file type or format",
80: "authentication error",
81: "need authenticator",
82: "IPsec processing failure",
83: "attribute not found",
84: "illegal byte sequence",
85: "no medium found",
86: "wrong medium type",
87: "value too large to be stored in data type",
88: "operation canceled",
89: "identifier removed",
90: "no message of desired type",
91: "not supported",
}
// Signal table
var signals = [...]string{
1: "hangup",
2: "interrupt",
3: "quit",
4: "illegal instruction",
5: "trace/BPT trap",
6: "abort trap",
7: "EMT trap",
8: "floating point exception",
9: "killed",
10: "bus error",
11: "segmentation fault",
12: "bad system call",
13: "broken pipe",
14: "alarm clock",
15: "terminated",
16: "urgent I/O condition",
17: "stopped (signal)",
18: "stopped",
19: "continued",
20: "child exited",
21: "stopped (tty input)",
22: "stopped (tty output)",
23: "I/O possible",
24: "cputime limit exceeded",
25: "filesize limit exceeded",
26: "virtual timer expired",
27: "profiling timer expired",
28: "window size changes",
29: "information request",
30: "user defined signal 1",
31: "user defined signal 2",
32: "thread AST",
}
| {
"pile_set_name": "Github"
} |
/* -*- mode: c++; coding: utf-8; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4; show-trailing-whitespace: t -*- vim:fenc=utf-8:ft=cpp:et:sw=4:ts=4:sts=4
This file is part of the Feel library
Author(s): Christoph Winkelmann <[email protected]>
Date: 2007-06-19
Copyright (C) 2007 EPFL
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 3.0 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
\file levelset_arcn.cpp
\author Christoph Winkelmann <[email protected]>
\date 2007-06-19
*/
#include "levelset.hpp"
namespace Feel
{
void
LevelSet::advReactUpdateCN( AdvReact<space_p_type, imOrder, ENTITY>& advReact,
double dt,
double theta,
double sign,
const element_p_type& vx,
const element_p_type& vy,
const element_p_type& phi,
bool updateStabilization )
{
using namespace Feel::vf;
AUTO( beta, idv( vx )*oneX()+idv( vy )*oneY() );
advReact.update( /* sigma = */ 1.0/dt,
/* beta = */ theta*sign*beta,
/* f = */ ( idv( phi )/dt
- ( 1.0-theta )*sign
* ( gradv( phi )*( beta ) ) ),
/* g = */ ( idv( phi )
- sign*dt * ( gradv( phi )*( beta ) ) ),
/* updtJ = */ updateStabilization
);
}
} // Feel
| {
"pile_set_name": "Github"
} |
package com.sequenceiq.cloudbreak.api.endpoint.v4.stacks.base.parameter;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.sequenceiq.common.model.JsonEntity;
import com.sequenceiq.cloudbreak.doc.ModelDescriptions.TemplateModelDescription;
import com.sequenceiq.common.api.type.EncryptionType;
import io.swagger.annotations.ApiModel;
import io.swagger.annotations.ApiModelProperty;
@ApiModel
@JsonIgnoreProperties(ignoreUnknown = true)
public class EncryptionParametersV4Base implements JsonEntity {
@ApiModelProperty(value = TemplateModelDescription.ENCRYPTION_TYPE, allowableValues = "DEFAULT,NONE,CUSTOM")
private EncryptionType type;
@ApiModelProperty(TemplateModelDescription.ENCRYPTION_KEY)
private String key;
public EncryptionType getType() {
return type;
}
public void setType(EncryptionType type) {
this.type = type;
}
public String getKey() {
return key;
}
public void setKey(String key) {
this.key = key;
}
}
| {
"pile_set_name": "Github"
} |
# Lines starting with '#' and sections without content
# are not displayed by a call to 'details'
#
[Website]
http://vuighe.net/xem-phim-online/xao-quyet-tap-1-insidious-ep-1_22200.html
[filters]
http://8x85.com/popup.js
http://vuighe.net/public/js/4menshop_pop.js
http://vuighe.net/public/js/popup.js
[other]
# Any other details
[comments]
fanboy | {
"pile_set_name": "Github"
} |
version: 1
n_points: 4
{
421.500 317.500
421.500 629.500
709.500 629.500
709.500 317.500
}
| {
"pile_set_name": "Github"
} |
File=fallapart.kcfg
ClassName=FallApartConfig
NameSpace=KWin
Singleton=true
Mutators=true
| {
"pile_set_name": "Github"
} |
/* Traits for Outcome
(C) 2018-2019 Niall Douglas <http://www.nedproductions.biz/> (8 commits)
File Created: March 2018
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License in the accompanying file
Licence.txt or at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Distributed under the Boost Software License, Version 1.0.
(See accompanying file Licence.txt or copy at
http://www.boost.org/LICENSE_1_0.txt)
*/
#ifndef OUTCOME_TRAIT_HPP
#define OUTCOME_TRAIT_HPP
#include "config.hpp"
OUTCOME_V2_NAMESPACE_BEGIN
namespace trait
{
/*! AWAITING HUGO JSON CONVERSION TOOL
SIGNATURE NOT RECOGNISED
*/
template <class R> //
static constexpr bool type_can_be_used_in_basic_result = //
(!std::is_reference<R>::value //
&& !OUTCOME_V2_NAMESPACE::detail::is_in_place_type_t<std::decay_t<R>>::value //
&& !is_success_type<R> //
&& !is_failure_type<R> //
&& !std::is_array<R>::value //
&& (std::is_void<R>::value || (std::is_object<R>::value //
&& std::is_destructible<R>::value)) //
);
/*! AWAITING HUGO JSON CONVERSION TOOL
type definition is_error_type. Potential doc page: NOT FOUND
*/
template <class E> struct is_error_type
{
static constexpr bool value = false;
};
/*! AWAITING HUGO JSON CONVERSION TOOL
type definition is_error_type_enum. Potential doc page: NOT FOUND
*/
template <class E, class Enum> struct is_error_type_enum
{
static constexpr bool value = false;
};
namespace detail
{
template <class T> using devoid = OUTCOME_V2_NAMESPACE::detail::devoid<T>;
template <class T> std::add_rvalue_reference_t<devoid<T>> declval() noexcept;
// From http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2015/n4436.pdf
namespace detector_impl
{
template <class...> using void_t = void;
template <class Default, class, template <class...> class Op, class... Args> struct detector
{
static constexpr bool value = false;
using type = Default;
};
template <class Default, template <class...> class Op, class... Args> struct detector<Default, void_t<Op<Args...>>, Op, Args...>
{
static constexpr bool value = true;
using type = Op<Args...>;
};
} // namespace detector_impl
template <template <class...> class Op, class... Args> using is_detected = detector_impl::detector<void, void, Op, Args...>;
template <class Arg> using result_of_make_error_code = decltype(make_error_code(declval<Arg>()));
template <class Arg> using introspect_make_error_code = is_detected<result_of_make_error_code, Arg>;
template <class Arg> using result_of_make_exception_ptr = decltype(make_exception_ptr(declval<Arg>()));
template <class Arg> using introspect_make_exception_ptr = is_detected<result_of_make_exception_ptr, Arg>;
template <class T> struct _is_error_code_available
{
static constexpr bool value = detail::introspect_make_error_code<T>::value;
using type = typename detail::introspect_make_error_code<T>::type;
};
template <class T> struct _is_exception_ptr_available
{
static constexpr bool value = detail::introspect_make_exception_ptr<T>::value;
using type = typename detail::introspect_make_exception_ptr<T>::type;
};
} // namespace detail
/*! AWAITING HUGO JSON CONVERSION TOOL
type definition is_error_code_available. Potential doc page: NOT FOUND
*/
template <class T> struct is_error_code_available
{
static constexpr bool value = detail::_is_error_code_available<std::decay_t<T>>::value;
using type = typename detail::_is_error_code_available<std::decay_t<T>>::type;
};
template <class T> constexpr bool is_error_code_available_v = detail::_is_error_code_available<std::decay_t<T>>::value;
/*! AWAITING HUGO JSON CONVERSION TOOL
type definition is_exception_ptr_available. Potential doc page: NOT FOUND
*/
template <class T> struct is_exception_ptr_available
{
static constexpr bool value = detail::_is_exception_ptr_available<std::decay_t<T>>::value;
using type = typename detail::_is_exception_ptr_available<std::decay_t<T>>::type;
};
template <class T> constexpr bool is_exception_ptr_available_v = detail::_is_exception_ptr_available<std::decay_t<T>>::value;
} // namespace trait
OUTCOME_V2_NAMESPACE_END
#endif
| {
"pile_set_name": "Github"
} |
/*
File cache_buffer.hh: implementation of the CacheBuffer class.
The CacheBuffer objects are used to keep a pre-computed copy of the output of the associated operation.
The computation is performed in the background while the program is idle. The CacheBuffer uses an image pyramid
for fast zooming.
*/
/*
Copyright (C) 2014 Ferrero Andrea
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
These files are distributed with PhotoFlow - http://aferrero2707.github.io/PhotoFlow/
*/
#ifndef CACHE_BUFFER_H
#define CACHE_BUFFER_H
#include <math.h>
#include <string>
#include <list>
#include <vector>
#include <iostream>
#include <fstream>
#include <vips/vips.h>
//#include <vips/vips>
#include "pftypes.hh"
#include "format_info.hh"
#include "property.hh"
#include "imagepyramid.hh"
namespace PF
{
class CacheBuffer
{
// Image to be cached
VipsImage* image;
// Image associate to the disk buffer
VipsImage* cached;
ImagePyramid pyramid;
std::string filename;
int fd;
guchar* memarray;
bool memarray_assigned;
// Flag indicating if the cache buffer has already been initialized
// Used by the layer manager to write buffers upon image loading/exporting
bool initialized;
//Flag indicating if the processing is completed
bool completed;
// Coordinates of the tile being processed
int step_x, step_y;
void step_cb(int x0, int y0, guchar* buf);
public:
CacheBuffer();
virtual ~CacheBuffer()
{
if( cached )
PF_UNREF( cached, "~CacheBuffer() cached image unref" );
}
bool is_initialized() { return initialized; }
void set_initialized( bool flag ) { initialized = flag; }
VipsImage* get_image() { return image; }
void set_image( VipsImage* img ) { image = img; }
ImagePyramid& get_pyramid() { return pyramid; }
void reset( bool reinitialize=false );
bool is_completed() { return completed; }
// Save data tile-by-tile
void step();
// Save all data to file
void write();
};
};
#endif
| {
"pile_set_name": "Github"
} |
// ----------------------------------------------------------------------------
// Copyright (c) Microsoft Corporation. All rights reserved.
// ----------------------------------------------------------------------------
using System;
using Microsoft.WindowsAzure.MobileServices.Query;
namespace Microsoft.WindowsAzure.MobileServices.Sync
{
/// <summary>
/// An object to represent the current position of pull in full query resullt
/// </summary>
internal class PullCursor
{
private readonly int maxRead; // used to limit the next link navigation because table storage and sql in .NET backend always return a link and also to obey $top if present
private int initialSkip;
private int totalRead; // used to track how many we have read so far since the last delta
public int Remaining { get; private set; }
public int Position
{
get { return this.initialSkip + this.totalRead; }
}
public bool Complete
{
get { return this.Remaining <= 0; }
}
public PullCursor(MobileServiceTableQueryDescription query)
{
this.Remaining = this.maxRead = query.Top.GetValueOrDefault(Int32.MaxValue);
this.initialSkip = query.Skip.GetValueOrDefault();
}
/// <summary>
/// Called when ever an item is processed from result
/// </summary>
/// <returns>True if cursor is still open, False when it is completed.</returns>
public bool OnNext()
{
if (this.Complete)
{
return false;
}
this.Remaining--;
this.totalRead++;
return true;
}
/// <summary>
/// Called when delta token is modified
/// </summary>
public void Reset()
{
this.initialSkip = 0;
this.totalRead = 0;
}
}
}
| {
"pile_set_name": "Github"
} |
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = [
"expansion.go",
"lister.go",
"tags.go",
],
importpath = "k8s.io/code-generator/cmd/lister-gen/generators",
deps = [
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/code-generator/cmd/client-gen/generators/util:go_default_library",
"//vendor/k8s.io/code-generator/cmd/client-gen/types:go_default_library",
"//vendor/k8s.io/gengo/args:go_default_library",
"//vendor/k8s.io/gengo/generator:go_default_library",
"//vendor/k8s.io/gengo/namer:go_default_library",
"//vendor/k8s.io/gengo/types:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)
| {
"pile_set_name": "Github"
} |
# README
用 QT 实现的24puzzle的图形界面。
给定初始状态和目标状态。
用方块的移动来直观描述使用 A*算法求解24puzzle 的过程。
感谢[朴哥](https://github.com/a367)的帮助!
| {
"pile_set_name": "Github"
} |
<?php
/*
* Copyright 2014 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
class Google_Service_AdSense_ReportingMetadataEntry extends Google_Collection
{
protected $collection_key = 'supportedProducts';
public $compatibleDimensions;
public $compatibleMetrics;
public $id;
public $kind;
public $requiredDimensions;
public $requiredMetrics;
public $supportedProducts;
public function setCompatibleDimensions($compatibleDimensions)
{
$this->compatibleDimensions = $compatibleDimensions;
}
public function getCompatibleDimensions()
{
return $this->compatibleDimensions;
}
public function setCompatibleMetrics($compatibleMetrics)
{
$this->compatibleMetrics = $compatibleMetrics;
}
public function getCompatibleMetrics()
{
return $this->compatibleMetrics;
}
public function setId($id)
{
$this->id = $id;
}
public function getId()
{
return $this->id;
}
public function setKind($kind)
{
$this->kind = $kind;
}
public function getKind()
{
return $this->kind;
}
public function setRequiredDimensions($requiredDimensions)
{
$this->requiredDimensions = $requiredDimensions;
}
public function getRequiredDimensions()
{
return $this->requiredDimensions;
}
public function setRequiredMetrics($requiredMetrics)
{
$this->requiredMetrics = $requiredMetrics;
}
public function getRequiredMetrics()
{
return $this->requiredMetrics;
}
public function setSupportedProducts($supportedProducts)
{
$this->supportedProducts = $supportedProducts;
}
public function getSupportedProducts()
{
return $this->supportedProducts;
}
}
| {
"pile_set_name": "Github"
} |
version https://git-lfs.github.com/spec/v1
oid sha256:973d042f3313c995cd96e2af5840d6d35c984ef821725fbf6a3f38524c921dc4
size 13178
| {
"pile_set_name": "Github"
} |
/**
* Copyright 2020-present Facebook. All Rights Reserved.
*
* This program file is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program in a file named COPYING; if not, write to the
* Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor,
* Boston, MA 02110-1301 USA
*/
package fileutils
import (
"bytes"
"log"
"os"
"testing"
"time"
"github.com/facebook/openbmc/tools/flashy/tests"
"github.com/pkg/errors"
)
func TestPathExists(t *testing.T) {
// save log output into buf for testing
var buf bytes.Buffer
log.SetOutput(&buf)
// mock and defer restore osStat
osStatOrig := osStat
defer func() {
log.SetOutput(os.Stderr)
osStat = osStatOrig
}()
cases := []struct {
name string
osStatErr error
logContainsSeq []string
want bool
}{
{
name: "exists",
osStatErr: nil,
logContainsSeq: []string{},
want: true,
},
{
name: "surely does not exists",
osStatErr: os.ErrNotExist,
logContainsSeq: []string{},
want: false,
},
{
name: "ambiguous, log and default to false",
osStatErr: errors.Errorf("12345"),
logContainsSeq: []string{
"Existence check of path 'x' returned error '12345', defaulting to false",
},
want: false,
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
buf = bytes.Buffer{}
osStat = func(filename string) (os.FileInfo, error) {
return nil, tc.osStatErr
}
got := PathExists("x")
if tc.want != got {
t.Errorf("want %v got %v", tc.want, got)
}
tests.LogContainsSeqTest(buf.String(), tc.logContainsSeq, t)
})
}
}
type mockFileInfo struct {
isDir bool
os.FileInfo
}
func (m *mockFileInfo) IsDir() bool {
return m.isDir
}
func TestFileExists(t *testing.T) {
// save log output into buf for testing
var buf bytes.Buffer
log.SetOutput(&buf)
// mock and defer restore osStat
osStatOrig := osStat
defer func() {
log.SetOutput(os.Stderr)
osStat = osStatOrig
}()
cases := []struct {
name string
isDir bool
osStatErr error
logContainsSeq []string
want bool
}{
{
name: "file exists and is not dir",
isDir: false,
osStatErr: nil,
logContainsSeq: []string{},
want: true,
},
{
name: "file exists and is dir",
isDir: true,
osStatErr: nil,
logContainsSeq: []string{},
want: false,
},
{
name: "file surely does not exist",
isDir: false,
osStatErr: os.ErrNotExist,
logContainsSeq: []string{},
want: false,
},
{
name: "ambiguous, log and default to false",
isDir: false,
osStatErr: errors.Errorf("12345"),
logContainsSeq: []string{
"Existence check of path 'x' returned error '12345', defaulting to false",
},
want: false,
},
}
for _, tc := range cases {
buf = bytes.Buffer{}
osStat = func(filename string) (os.FileInfo, error) {
return &mockFileInfo{tc.isDir, nil}, tc.osStatErr
}
got := FileExists("x")
if tc.want != got {
t.Errorf("want %v got %v", tc.want, got)
}
tests.LogContainsSeqTest(buf.String(), tc.logContainsSeq, t)
}
}
func TestDirExists(t *testing.T) {
// save log output into buf for testing
var buf bytes.Buffer
log.SetOutput(&buf)
// mock and defer restore osStat
osStatOrig := osStat
defer func() {
log.SetOutput(os.Stderr)
osStat = osStatOrig
}()
cases := []struct {
name string
isDir bool
osStatErr error
logContainsSeq []string
want bool
}{
{
name: "path exists and is dir",
isDir: true,
osStatErr: nil,
logContainsSeq: []string{},
want: true,
},
{
name: "path exists and is not dir",
isDir: false,
osStatErr: nil,
logContainsSeq: []string{},
want: false,
},
{
name: "path surely does not exist",
isDir: false,
osStatErr: os.ErrNotExist,
logContainsSeq: []string{},
want: false,
},
{
name: "ambiguous, log and default to false",
isDir: false,
osStatErr: errors.Errorf("12345"),
logContainsSeq: []string{
"Existence check of path 'x' returned error '12345', defaulting to false",
},
want: false,
},
}
for _, tc := range cases {
buf = bytes.Buffer{}
osStat = func(filename string) (os.FileInfo, error) {
return &mockFileInfo{tc.isDir, nil}, tc.osStatErr
}
got := DirExists("x")
if tc.want != got {
t.Errorf("want %v got %v", tc.want, got)
}
tests.LogContainsSeqTest(buf.String(), tc.logContainsSeq, t)
}
}
func TestIsELFFile(t *testing.T) {
// mock and defer restore MmapFileRange
mmapFileRangeOrig := MmapFileRange
defer func() {
MmapFileRange = mmapFileRangeOrig
}()
cases := []struct {
name string
mmapRet []byte
mmapErr error
want bool
}{
{
name: "ELF file",
mmapRet: []byte{0x7F, 'E', 'L', 'F'},
mmapErr: nil,
want: true,
},
{
name: "not ELF file",
mmapRet: []byte{0x7F, 'A', 'B', 'C'},
mmapErr: nil,
want: false,
},
{
name: "mmap error",
mmapRet: nil,
mmapErr: errors.Errorf("mmap error"),
want: false,
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
MmapFileRange = func(filename string, offset int64, length, prot, flags int) ([]byte, error) {
return tc.mmapRet, tc.mmapErr
}
got := IsELFFile("x")
if tc.want != got {
t.Errorf("want '%v' got '%v'", tc.want, got)
}
})
}
}
func TestGlobAll(t *testing.T) {
cases := []struct {
name string
patterns []string
// these tests should be system agnostic so even if the
// path is valid we cannot test for how many files we got
// hence, we can only test for the error
want error
}{
{
name: "Empty pattern",
patterns: []string{},
want: nil,
},
{
name: "Test multiple valid patterns",
patterns: []string{"/var/*", "/var/log/messages"},
want: nil,
},
{
name: "Invalid pattern",
patterns: []string{"["},
want: errors.Errorf("Unable to resolve pattern '[': syntax error in pattern"),
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
_, got := GlobAll(tc.patterns)
tests.CompareTestErrors(tc.want, got, t)
})
}
}
func TestWriteFileWIthTimeout(t *testing.T) {
writeFileOrig := WriteFile
defer func() {
WriteFile = writeFileOrig
}()
cases := []struct {
name string
writeErr error
writeTime time.Duration
timeout time.Duration
want error
}{
{
name: "within timeout",
writeErr: nil,
writeTime: 1 * time.Millisecond,
timeout: 1 * time.Second,
want: nil,
},
{
name: "within timeout but write errored",
writeErr: errors.Errorf("write error"),
writeTime: 1 * time.Millisecond,
timeout: 1 * time.Second,
want: errors.Errorf("write error"),
},
{
name: "timeout exceeded",
writeErr: nil,
writeTime: 1 * time.Second,
timeout: 1 * time.Millisecond,
want: errors.Errorf("Timed out after '1ms'"),
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
WriteFile = func(
filename string,
data []byte,
perm os.FileMode,
) error {
time.Sleep(tc.writeTime)
return tc.writeErr
}
got := WriteFileWithTimeout("x", []byte("abcd"), 0, tc.timeout)
tests.CompareTestErrors(tc.want, got, t)
})
}
}
| {
"pile_set_name": "Github"
} |
# -*- coding: utf-8 -*-
from __future__ import division, print_function
from collections import Iterable
import numpy as np
import pandas as pd
from fastcache import lru_cache
from cached_property import cached_property
from scipy.stats import spearmanr, pearsonr, morestats
from . import performance as pef, plotting as pl
from .prepare import (get_clean_factor_and_forward_returns, rate_of_return,
std_conversion)
from .plot_utils import _use_chinese
from .utils import convert_to_forward_returns_columns, ensure_tuple
class FactorAnalyzer(object):
"""单因子分析
参数
----------
factor :
因子值
pd.DataFrame / pd.Series
一个 DataFrame, index 为日期, columns 为资产,
values 为因子的值
或一个 Series, index 为日期和资产的 MultiIndex,
values 为因子的值
prices :
用于计算因子远期收益的价格数据
pd.DataFrame
index 为日期, columns 为资产
价格数据必须覆盖因子分析时间段以及额外远期收益计算中的最大预期期数.
或 function
输入参数为 securities, start_date, end_date, count
返回值为价格数据的 DataFrame
groupby :
分组数据, 默认为 None
pd.DataFrame
index 为日期, columns 为资产,为每个资产每天的分组.
或 dict
资产-分组映射的字典. 如果传递了dict,则假定分组映射在整个时间段内保持不变.
或 function
输入参数为 securities, start_date, end_date
返回值为分组数据的 DataFrame 或 dict
weights :
权重数据, 默认为 1
pd.DataFrame
index 为日期, columns 为资产,为每个资产每天的权重.
或 dict
资产-权重映射的字典. 如果传递了dict,则假定权重映射在整个时间段内保持不变.
或 function
输入参数为 securities, start_date, end_date
返回值为权重数据的 DataFrame 或 dict
binning_by_group :
bool
如果为 True, 则对每个组分别计算分位数. 默认为 False
适用于因子值范围在各个组上变化很大的情况.
如果要分析分组(行业)中性的组合, 您最好设置为 True
quantiles :
int or sequence[float]
默认为 None
在因子分组中按照因子值大小平均分组的组数
或分位数序列, 允许不均匀分组.
例如 [0, .10, .5, .90, 1.] 或 [.05, .5, .95]
'quantiles' 和 'bins' 有且只能有一个不为 None
bins :
int or sequence[float]
默认为 None
在因子分组中使用的等宽 (按照因子值) 区间的数量
或边界值序列, 允许不均匀的区间宽度.
例如 [-4, -2, -0.5, 0, 10]
'quantiles' 和 'bins' 有且只能有一个不为 None
periods :
int or sequence[int]
远期收益的期数, 默认为 (1, 5, 10)
max_loss :
float
默认为 0.25
允许的丢弃因子数据的最大百分比 (0.00 到 1.00),
计算比较输入因子索引中的项目数和输出 DataFrame 索引中的项目数.
因子数据本身存在缺陷 (例如 NaN),
没有提供足够的价格数据来计算所有因子值的远期收益,
或者因为分组失败, 因此可以部分地丢弃因子数据
设置 max_loss = 0 以停止异常捕获.
zero_aware :
bool
默认为 False
如果为True,则分别为正负因子值计算分位数。
适用于您的信号聚集并且零是正值和负值的分界线的情况.
所有属性列表
----------
factor_data:返回因子值
- 类型: pandas.Series
- index: 为日期和股票代码的MultiIndex
clean_factor_data: 去除 nan/inf, 整理后的因子值、forward_return 和分位数
- 类型: pandas.DataFrame
- index: 为日期和股票代码的MultiIndex
- columns: 根据period选择后的forward_return
(如果调仓周期为1天, 那么 forward_return 为
[第二天的收盘价-今天的收盘价]/今天的收盘价),
因子值、行业分组、分位数数组、权重
mean_return_by_quantile: 按分位数分组加权平均因子收益
- 类型: pandas.DataFrame
- index: 分位数分组
- columns: 调仓周期
mean_return_std_by_quantile: 按分位数分组加权因子收益标准差
- 类型: pandas.DataFrame
- index: 分位数分组
- columns: 调仓周期
mean_return_by_date: 按分位数及日期分组加权平均因子收益
- 类型: pandas.DataFrame
- index: 为日期和分位数的MultiIndex
- columns: 调仓周期
mean_return_std_by_date: 按分位数及日期分组加权因子收益标准差
- 类型: pandas.DataFrame
- index: 为日期和分位数的MultiIndex
- columns: 调仓周期
mean_return_by_group: 按分位数及行业分组加权平均因子收益
- 类型: pandas.DataFrame
- index: 为行业和分位数的MultiIndex
- columns: 调仓周期
mean_return_std_by_group: 按分位数及行业分组加权因子收益标准差
- 类型: pandas.DataFrame
- index: 为行业和分位数的MultiIndex
- columns: 调仓周期
mean_return_spread_by_quantile: 最高分位数因子收益减最低分位数因子收益每日均值
- 类型: pandas.DataFrame
- index: 日期
- columns: 调仓周期
mean_return_spread_std_by_quantile: 最高分位数因子收益减最低分位数因子收益每日标准差
- 类型: pandas.DataFrame
- index: 日期
- columns: 调仓周期
cumulative_return_by_quantile:各分位数每日累积收益
- 类型: pandas.DataFrame
- index: 日期
- columns: 调仓周期和分位数
cumulative_returns: 按因子值加权多空组合每日累积收益
- 类型: pandas.DataFrame
- index: 日期
- columns: 调仓周期
top_down_cumulative_returns: 做多最高分位做空最低分位多空组合每日累计收益
- 类型: pandas.DataFrame
- index: 日期
- columns: 调仓周期
ic: 信息比率
- 类型: pandas.DataFrame
- index: 日期
- columns: 调仓周期
ic_by_group: 分行业信息比率
- 类型: pandas.DataFrame
- index: 行业
- columns: 调仓周期
ic_monthly: 月度信息比率
- 类型: pandas.DataFrame
- index: 月度
- columns: 调仓周期表
quantile_turnover: 换手率
- 类型: dict
- 键: 调仓周期
- index: 日期
- columns: 分位数分组
所有方法列表
----------
calc_mean_return_by_quantile:
计算按分位数分组加权因子收益和标准差
calc_factor_returns:
计算按因子值加权多空组合每日收益
compute_mean_returns_spread:
计算两个分位数相减的因子收益和标准差
calc_factor_alpha_beta:
计算因子的 alpha 和 beta
calc_factor_information_coefficient:
计算每日因子信息比率 (IC值)
calc_mean_information_coefficient:
计算因子信息比率均值 (IC值均值)
calc_average_cumulative_return_by_quantile:
按照当天的分位数算分位数未来和过去的收益均值和标准差
calc_cumulative_return_by_quantile:
计算各分位数每日累积收益
calc_cumulative_returns:
计算按因子值加权多空组合每日累积收益
calc_top_down_cumulative_returns:
计算做多最高分位做空最低分位多空组合每日累计收益
calc_autocorrelation:
根据调仓周期确定滞后期的每天计算因子自相关性
calc_autocorrelation_n_days_lag:
后 1 - n 天因子值自相关性
calc_quantile_turnover_mean_n_days_lag:
各分位数 1 - n 天平均换手率
calc_ic_mean_n_days_lag:
滞后 0 - n 天 forward return 信息比率
plot_returns_table: 打印因子收益表
plot_turnover_table: 打印换手率表
plot_information_table: 打印信息比率(IC)相关表
plot_quantile_statistics_table: 打印各分位数统计表
plot_ic_ts: 画信息比率(IC)时间序列图
plot_ic_hist: 画信息比率分布直方图
plot_ic_qq: 画信息比率 qq 图
plot_quantile_returns_bar: 画各分位数平均收益图
plot_quantile_returns_violin: 画各分位数收益分布图
plot_mean_quantile_returns_spread_time_series: 画最高分位减最低分位收益图
plot_ic_by_group: 画按行业分组信息比率(IC)图
plot_factor_auto_correlation: 画因子自相关图
plot_top_bottom_quantile_turnover: 画最高最低分位换手率图
plot_monthly_ic_heatmap: 画月度信息比率(IC)图
plot_cumulative_returns: 画按因子值加权组合每日累积收益图
plot_top_down_cumulative_returns: 画做多最大分位数做空最小分位数组合每日累积收益图
plot_cumulative_returns_by_quantile: 画各分位数每日累积收益图
plot_quantile_average_cumulative_return: 因子预测能力平均累计收益图
plot_events_distribution: 画有效因子数量统计图
create_summary_tear_sheet: 因子值特征分析
create_returns_tear_sheet: 因子收益分析
create_information_tear_sheet: 因子 IC 分析
create_turnover_tear_sheet: 因子换手率分析
create_event_returns_tear_sheet: 因子预测能力分析
create_full_tear_sheet: 全部分析
plot_disable_chinese_label: 关闭中文图例显示
"""
def __init__(self, factor, prices, groupby=None, weights=1.0,
quantiles=None, bins=None, periods=(1, 5, 10),
binning_by_group=False, max_loss=0.25, zero_aware=False):
self.factor = factor
self.prices = prices
self.groupby = groupby
self.weights = weights
self._quantiles = quantiles
self._bins = bins
self._periods = ensure_tuple(periods)
self._binning_by_group = binning_by_group
self._max_loss = max_loss
self._zero_aware = zero_aware
self.__gen_clean_factor_and_forward_returns()
def __gen_clean_factor_and_forward_returns(self):
"""格式化因子数据和定价数据"""
factor_data = self.factor
if isinstance(factor_data, pd.DataFrame):
factor_data = factor_data.stack(dropna=False)
stocks = list(factor_data.index.get_level_values(1).drop_duplicates())
start_date = min(factor_data.index.get_level_values(0))
end_date = max(factor_data.index.get_level_values(0))
if hasattr(self.prices, "__call__"):
prices = self.prices(securities=stocks,
start_date=start_date,
end_date=end_date,
period=max(self._periods))
prices = prices.loc[~prices.index.duplicated()]
else:
prices = self.prices
self._prices = prices
if hasattr(self.groupby, "__call__"):
groupby = self.groupby(securities=stocks,
start_date=start_date,
end_date=end_date)
else:
groupby = self.groupby
self._groupby = groupby
if hasattr(self.weights, "__call__"):
weights = self.weights(stocks,
start_date=start_date,
end_date=end_date)
else:
weights = self.weights
self._weights = weights
self._clean_factor_data = get_clean_factor_and_forward_returns(
factor_data,
prices,
groupby=groupby,
weights=weights,
binning_by_group=self._binning_by_group,
quantiles=self._quantiles,
bins=self._bins,
periods=self._periods,
max_loss=self._max_loss,
zero_aware=self._zero_aware
)
@property
def clean_factor_data(self):
return self._clean_factor_data
@property
def _factor_quantile(self):
data = self.clean_factor_data
if not data.empty:
return max(data.factor_quantile)
else:
_quantiles = self._quantiles
_bins = self._bins
_zero_aware = self._zero_aware
get_len = lambda x: len(x) - 1 if isinstance(x, Iterable) else int(x)
if _quantiles is not None and _bins is None and not _zero_aware:
return get_len(_quantiles)
elif _quantiles is not None and _bins is None and _zero_aware:
return int(_quantiles) // 2 * 2
elif _bins is not None and _quantiles is None and not _zero_aware:
return get_len(_bins)
elif _bins is not None and _quantiles is None and _zero_aware:
return int(_bins) // 2 * 2
@lru_cache(16)
def calc_mean_return_by_quantile(self, by_date=False, by_group=False,
demeaned=False, group_adjust=False):
"""计算按分位数分组因子收益和标准差
因子收益为收益按照 weight 列中权重的加权平均值
参数:
by_date:
- True: 按天计算收益
- False: 不按天计算收益
by_group:
- True: 按行业计算收益
- False: 不按行业计算收益
demeaned:
- True: 使用超额收益计算各分位数收益,超额收益=收益-基准收益
(基准收益被认为是每日所有股票收益按照weight列中权重的加权的均值)
- False: 不使用超额收益
group_adjust:
- True: 使用行业中性收益计算各分位数收益,行业中性收益=收益-行业收益
(行业收益被认为是每日各个行业股票收益按照weight列中权重的加权的均值)
- False: 不使用行业中性收益
"""
return pef.mean_return_by_quantile(self._clean_factor_data,
by_date=by_date,
by_group=by_group,
demeaned=demeaned,
group_adjust=group_adjust)
@lru_cache(4)
def calc_factor_returns(self, demeaned=True, group_adjust=False):
"""计算按因子值加权组合每日收益
权重 = 每日因子值 / 每日因子值的绝对值的和
正的权重代表买入, 负的权重代表卖出
参数:
demeaned:
- True: 对权重去均值 (每日权重 = 每日权重 - 每日权重的均值), 使组合转换为 cash-neutral 多空组合
- False: 不对权重去均值
group_adjust:
- True: 对权重分行业去均值 (每日权重 = 每日权重 - 每日各行业权重的均值),
使组合转换为 industry-neutral 多空组合
- False: 不对权重分行业去均值
"""
return pef.factor_returns(self._clean_factor_data,
demeaned=demeaned,
group_adjust=group_adjust)
def compute_mean_returns_spread(self, upper_quant=None, lower_quant=None,
by_date=True, by_group=False,
demeaned=False, group_adjust=False):
"""计算两个分位数相减的因子收益和标准差
参数:
upper_quant: 用 upper_quant 选择的分位数减去 lower_quant 选择的分位数
lower_quant: 用 upper_quant 选择的分位数减去 lower_quant 选择的分位数
by_date:
- True: 按天计算两个分位数相减的因子收益和标准差
- False: 不按天计算两个分位数相减的因子收益和标准差
by_group:
- True: 分行业计算两个分位数相减的因子收益和标准差
- False: 不分行业计算两个分位数相减的因子收益和标准差
demeaned:
- True: 使用超额收益 (基准收益被认为是每日所有股票收益按照weight列中权重加权的均值)
- False: 不使用超额收益
group_adjust:
- True: 使用行业中性收益 (行业收益被认为是每日各个行业股票收益按照weight列中权重加权的均值)
- False: 不使用行业中性收益
"""
upper_quant = upper_quant if upper_quant is not None else self._factor_quantile
lower_quant = lower_quant if lower_quant is not None else 1
if ((not 1 <= upper_quant <= self._factor_quantile) or
(not 1 <= lower_quant <= self._factor_quantile)):
raise ValueError("upper_quant 和 low_quant 的取值范围为 1 - %s 的整数"
% self._factor_quantile)
mean, std = self.calc_mean_return_by_quantile(by_date=by_date, by_group=by_group,
demeaned=demeaned, group_adjust=group_adjust,)
mean = mean.apply(rate_of_return, axis=0)
std = std.apply(rate_of_return, axis=0)
return pef.compute_mean_returns_spread(mean_returns=mean,
upper_quant=upper_quant,
lower_quant=lower_quant,
std_err=std)
@lru_cache(4)
def calc_factor_alpha_beta(self, demeaned=True, group_adjust=False):
"""计算因子的 alpha 和 beta
因子值加权组合每日收益 = beta * 市场组合每日收益 + alpha
因子值加权组合每日收益计算方法见 calc_factor_returns 函数
市场组合每日收益是每日所有股票收益按照weight列中权重加权的均值
结果中的 alpha 是年化 alpha
参数:
demeaned:
详见 calc_factor_returns 中 demeaned 参数
- True: 对因子值加权组合每日收益的权重去均值 (每日权重 = 每日权重 - 每日权重的均值),
使组合转换为cash-neutral多空组合
- False: 不对权重去均值
group_adjust:
详见 calc_factor_returns 中 group_adjust 参数
- True: 对权重分行业去均值 (每日权重 = 每日权重 - 每日各行业权重的均值),
使组合转换为 industry-neutral 多空组合
- False: 不对权重分行业去均值
"""
return pef.factor_alpha_beta(self._clean_factor_data,
demeaned=demeaned,
group_adjust=group_adjust)
@lru_cache(8)
def calc_factor_information_coefficient(self, group_adjust=False, by_group=False, method=None):
"""计算每日因子信息比率 (IC值)
参数:
group_adjust:
- True: 使用行业中性收益计算 IC (行业收益被认为是每日各个行业股票收益按照weight列中权重加权的均值)
- False: 不使用行业中性收益
by_group:
- True: 分行业计算 IC
- False: 不分行业计算 IC
method:
- 'rank': 用秩相关系数计算IC值
- 'normal': 用普通相关系数计算IC值
"""
if method is None:
method = 'rank'
if method not in ('rank', 'normal'):
raise ValueError("`method` should be chosen from ('rank' | 'normal')")
if method == 'rank':
method = spearmanr
elif method == 'normal':
method = pearsonr
return pef.factor_information_coefficient(self._clean_factor_data,
group_adjust=group_adjust,
by_group=by_group,
method=method)
@lru_cache(16)
def calc_mean_information_coefficient(self, group_adjust=False, by_group=False,
by_time=None, method=None):
"""计算因子信息比率均值 (IC值均值)
参数:
group_adjust:
- True: 使用行业中性收益计算 IC (行业收益被认为是每日各个行业股票收益按照weight列中权重加权的均值)
- False: 不使用行业中性收益
by_group:
- True: 分行业计算 IC
- False: 不分行业计算 IC
by_time:
- 'Y': 按年求均值
- 'M': 按月求均值
- None: 对所有日期求均值
method:
- 'rank': 用秩相关系数计算IC值
- 'normal': 用普通相关系数计算IC值
"""
if method is None:
method = 'rank'
if method not in ('rank', 'normal'):
raise ValueError("`method` should be chosen from ('rank' | 'normal')")
if method == 'rank':
method = spearmanr
elif method == 'normal':
method = pearsonr
return pef.mean_information_coefficient(
self._clean_factor_data,
group_adjust=group_adjust,
by_group=by_group,
by_time=by_time,
method=method
)
@lru_cache(16)
def calc_average_cumulative_return_by_quantile(self, periods_before, periods_after,
demeaned=False, group_adjust=False):
"""按照当天的分位数算分位数未来和过去的收益均值和标准差
参数:
periods_before: 计算过去的天数
periods_after: 计算未来的天数
demeaned:
- True: 使用超额收益计算累积收益 (基准收益被认为是每日所有股票收益按照weight列中权重加权的均值)
- False: 不使用超额收益
group_adjust:
- True: 使用行业中性化后的收益计算累积收益
(行业收益被认为是每日各个行业股票收益按照weight列中权重加权的均值)
- False: 不使用行业中性化后的收益
"""
return pef.average_cumulative_return_by_quantile(
self._clean_factor_data,
prices=self._prices,
periods_before=periods_before,
periods_after=periods_after,
demeaned=demeaned,
group_adjust=group_adjust
)
@lru_cache(2)
def calc_autocorrelation(self, rank=True):
"""根据调仓周期确定滞后期的每天计算因子自相关性
当日因子值和滞后period天的因子值的自相关性
参数:
rank:
- True: 秩相关系数
- False: 普通相关系数
"""
return pd.concat(
[
pef.factor_autocorrelation(self._clean_factor_data,
period, rank=rank)
for period in self._periods
],
axis=1,
keys=list(map(convert_to_forward_returns_columns, self._periods))
)
@lru_cache(None)
def calc_quantile_turnover_mean_n_days_lag(self, n=10):
"""各分位数滞后1天到n天的换手率均值
参数:
n: 滞后 1 天到 n 天的换手率
"""
quantile_factor = self._clean_factor_data['factor_quantile']
quantile_turnover_rate = pd.concat(
[pd.Series([pef.quantile_turnover(quantile_factor, q, p).mean()
for q in range(1, int(quantile_factor.max()) + 1)],
index=range(1, int(quantile_factor.max()) + 1),
name=p)
for p in range(1, n + 1)],
axis=1, keys='lag_' + pd.Index(range(1, n + 1)).astype(str)
).T
quantile_turnover_rate.columns.name = 'factor_quantile'
return quantile_turnover_rate
@lru_cache(None)
def calc_autocorrelation_n_days_lag(self, n=10, rank=False):
"""滞后1-n天因子值自相关性
参数:
n: 滞后1天到n天的因子值自相关性
rank:
- True: 秩相关系数
- False: 普通相关系数
"""
return pd.Series(
[
pef.factor_autocorrelation(self._clean_factor_data, p, rank=rank).mean()
for p in range(1, n + 1)
],
index='lag_' + pd.Index(range(1, n + 1)).astype(str)
)
@lru_cache(None)
def _calc_ic_mean_n_day_lag(self, n, group_adjust=False, by_group=False, method=None):
if method is None:
method = 'rank'
if method not in ('rank', 'normal'):
raise ValueError("`method` should be chosen from ('rank' | 'normal')")
if method == 'rank':
method = spearmanr
elif method == 'normal':
method = pearsonr
factor_data = self._clean_factor_data.copy()
factor_value = factor_data['factor'].unstack('asset')
factor_data['factor'] = factor_value.shift(n).stack(dropna=True)
if factor_data.dropna().empty:
return pd.Series(np.nan, index=pef.get_forward_returns_columns(factor_data.columns))
ac = pef.factor_information_coefficient(
factor_data.dropna(),
group_adjust=group_adjust, by_group=by_group,
method=method
)
return ac.mean(level=('group' if by_group else None))
def calc_ic_mean_n_days_lag(self, n=10, group_adjust=False, by_group=False, method=None):
"""滞后 0 - n 天因子收益信息比率(IC)的均值
滞后 n 天 IC 表示使用当日因子值和滞后 n 天的因子收益计算 IC
参数:
n: 滞后0-n天因子收益的信息比率(IC)的均值
group_adjust:
- True: 使用行业中性收益计算 IC (行业收益被认为是每日各个行业股票收益按照weight列中权重加权的均值)
- False: 不使用行业中性收益
by_group:
- True: 分行业计算 IC
- False: 不分行业计算 IC
method:
- 'rank': 用秩相关系数计算IC值
- 'normal': 用普通相关系数计算IC值
"""
ic_mean = [self.calc_factor_information_coefficient(
group_adjust=group_adjust, by_group=by_group, method=method,
).mean(level=('group' if by_group else None))]
for lag in range(1, n + 1):
ic_mean.append(self._calc_ic_mean_n_day_lag(
n=lag,
group_adjust=group_adjust,
by_group=by_group,
method=method
))
if not by_group:
ic_mean = pd.concat(ic_mean, keys='lag_' + pd.Index(range(n + 1)).astype(str), axis=1)
ic_mean = ic_mean.T
else:
ic_mean = pd.concat(ic_mean, keys='lag_' + pd.Index(range(n + 1)).astype(str), axis=0)
return ic_mean
@property
def mean_return_by_quantile(self):
"""收益分析
用来画分位数收益的柱状图
返回 pandas.DataFrame, index 是 factor_quantile, 值是(1, 2, 3, 4, 5),
column 是 period 的值 (1, 5, 10)
"""
mean_ret_quantile, _ = self.calc_mean_return_by_quantile(
by_date=False,
by_group=False,
demeaned=False,
group_adjust=False,
)
mean_compret_quantile = mean_ret_quantile.apply(rate_of_return, axis=0)
return mean_compret_quantile
@property
def mean_return_std_by_quantile(self):
"""收益分析
用来画分位数收益的柱状图
返回 pandas.DataFrame, index 是 factor_quantile, 值是(1, 2, 3, 4, 5),
column 是 period 的值 (1, 5, 10)
"""
_, mean_ret_std_quantile = self.calc_mean_return_by_quantile(
by_date=False,
by_group=False,
demeaned=False,
group_adjust=False,
)
mean_ret_std_quantile = mean_ret_std_quantile.apply(rate_of_return, axis=0)
return mean_ret_std_quantile
@property
def _mean_return_by_date(self):
_mean_return_by_date, _ = self.calc_mean_return_by_quantile(
by_date=True,
by_group=False,
demeaned=False,
group_adjust=False,
)
return _mean_return_by_date
@property
def mean_return_by_date(self):
mean_return_by_date = self._mean_return_by_date.apply(rate_of_return, axis=0)
return mean_return_by_date
@property
def mean_return_std_by_date(self):
_, std_quant_daily = self.calc_mean_return_by_quantile(
by_date=True,
demeaned=False,
by_group=False,
group_adjust=False,
)
mean_return_std_by_date = std_quant_daily.apply(std_conversion, axis=0)
return mean_return_std_by_date
@property
def mean_return_by_group(self):
"""分行业的分位数收益
返回值:
MultiIndex 的 DataFrame
index 分别是分位数、 行业名称, column 是 period (1, 5, 10)
"""
mean_return_group, _ = self.calc_mean_return_by_quantile(
by_date=False,
by_group=True,
demeaned=True,
group_adjust=False,
)
mean_return_group = mean_return_group.apply(rate_of_return, axis=0)
return mean_return_group
@property
def mean_return_std_by_group(self):
_, mean_return_std_group = self.calc_mean_return_by_quantile(
by_date=False,
by_group=True,
demeaned=True,
group_adjust=False,
)
mean_return_std_group = mean_return_std_group.apply(rate_of_return, axis=0)
return mean_return_std_group
@property
def mean_return_spread_by_quantile(self):
mean_return_spread_by_quantile, _ = self.compute_mean_returns_spread()
return mean_return_spread_by_quantile
@property
def mean_return_spread_std_by_quantile(self):
_, std_spread_quant = self.compute_mean_returns_spread()
return std_spread_quant
@lru_cache(5)
def calc_cumulative_return_by_quantile(self, period=None, demeaned=False, group_adjust=False):
"""计算指定调仓周期的各分位数每日累积收益
参数:
period: 指定调仓周期
demeaned:
详见 calc_mean_return_by_quantile 中 demeaned 参数
- True: 使用超额收益计算累积收益 (基准收益被认为是每日所有股票收益按照weight列中权重加权的均值)
- False: 不使用超额收益
group_adjust:
详见 calc_mean_return_by_quantile 中 group_adjust 参数
- True: 使用行业中性化后的收益计算累积收益
(行业收益被认为是每日各个行业股票收益按照weight列中权重加权的均值)
- False: 不使用行业中性化后的收益
"""
if period is None:
period = self._periods[0]
period_col = convert_to_forward_returns_columns(period)
factor_returns = self.calc_mean_return_by_quantile(
by_date=True, demeaned=demeaned, group_adjust=group_adjust
)[0][period_col].unstack('factor_quantile')
cum_ret = factor_returns.apply(pef.cumulative_returns, period=period)
return cum_ret
@lru_cache(20)
def calc_cumulative_returns(self, period=None,
demeaned=False, group_adjust=False):
"""计算指定调仓周期的按因子值加权组合每日累积收益
当 period > 1 时,组合的累积收益计算方法为:
组合每日收益 = (从第0天开始每period天一调仓的组合每日收益 +
从第1天开始每period天一调仓的组合每日收益 + ... +
从第period-1天开始每period天一调仓的组合每日收益) / period
组合累积收益 = 组合每日收益的累积
参数:
period: 指定调仓周期
demeaned:
详见 calc_factor_returns 中 demeaned 参数
- True: 对权重去均值 (每日权重 = 每日权重 - 每日权重的均值), 使组合转换为 cash-neutral 多空组合
- False: 不对权重去均值
group_adjust:
详见 calc_factor_returns 中 group_adjust 参数
- True: 对权重分行业去均值 (每日权重 = 每日权重 - 每日各行业权重的均值),
使组合转换为 industry-neutral 多空组合
- False: 不对权重分行业去均值
"""
if period is None:
period = self._periods[0]
period_col = convert_to_forward_returns_columns(period)
factor_returns = self.calc_factor_returns(
demeaned=demeaned, group_adjust=group_adjust
)[period_col]
return pef.cumulative_returns(factor_returns, period=period)
@lru_cache(20)
def calc_top_down_cumulative_returns(self, period=None,
demeaned=False, group_adjust=False):
"""计算做多最大分位,做空最小分位组合每日累积收益
参数:
period: 指定调仓周期
demeaned:
详见 calc_mean_return_by_quantile 中 demeaned 参数
- True: 使用超额收益计算累积收益 (基准收益被认为是每日所有股票收益按照weight列中权重加权的均值)
- False: 不使用超额收益
group_adjust:
详见 calc_mean_return_by_quantile 中 group_adjust 参数
- True: 使用行业中性化后的收益计算累积收益
(行业收益被认为是每日各个行业股票收益按照weight列中权重加权的均值)
- False: 不使用行业中性化后的收益
"""
if period is None:
period = self._periods[0]
period_col = convert_to_forward_returns_columns(period)
mean_returns, _ = self.calc_mean_return_by_quantile(
by_date=True, by_group=False,
demeaned=demeaned, group_adjust=group_adjust,
)
mean_returns = mean_returns.apply(rate_of_return, axis=0)
upper_quant = mean_returns[period_col].xs(self._factor_quantile,
level='factor_quantile')
lower_quant = mean_returns[period_col].xs(1,
level='factor_quantile')
return pef.cumulative_returns(upper_quant - lower_quant, period=period)
@property
def ic(self):
"""IC 分析, 日度 ic
返回 DataFrame, index 是时间, column 是 period 的值 (1, 5, 10)
"""
return self.calc_factor_information_coefficient()
@property
def ic_by_group(self):
"""行业 ic"""
return self.calc_mean_information_coefficient(by_group=True)
@property
def ic_monthly(self):
ic_monthly = self.calc_mean_information_coefficient(group_adjust=False,
by_group=False,
by_time="M").copy()
ic_monthly.index = ic_monthly.index.map(lambda x: x.strftime('%Y-%m'))
return ic_monthly
@cached_property
def quantile_turnover(self):
"""换手率分析
返回值一个 dict, key 是 period, value 是一个 DataFrame(index 是日期, column 是分位数)
"""
quantile_factor = self._clean_factor_data['factor_quantile']
quantile_turnover_rate = {
convert_to_forward_returns_columns(p):
pd.concat([pef.quantile_turnover(quantile_factor, q, p)
for q in range(1, int(quantile_factor.max()) + 1)],
axis=1)
for p in self._periods
}
return quantile_turnover_rate
@property
def cumulative_return_by_quantile(self):
return {
convert_to_forward_returns_columns(p):
self.calc_cumulative_return_by_quantile(period=p)
for p in self._periods
}
@property
def cumulative_returns(self):
return pd.concat([self.calc_cumulative_returns(period=period)
for period in self._periods],
axis=1,
keys=list(map(convert_to_forward_returns_columns,
self._periods)))
@property
def top_down_cumulative_returns(self):
return pd.concat([self.calc_top_down_cumulative_returns(period=period)
for period in self._periods],
axis=1,
keys=list(map(convert_to_forward_returns_columns,
self._periods)))
def plot_returns_table(self, demeaned=False, group_adjust=False):
"""打印因子收益表
参数:
demeaned:
- True: 使用超额收益计算 (基准收益被认为是每日所有股票收益按照weight列中权重的加权的均值)
- False: 不使用超额收益
group_adjust:
- True: 使用行业中性收益 (行业收益被认为是每日各个行业股票收益按照weight列中权重的加权的均值)
- False: 不使用行业中性收益
"""
mean_return_by_quantile = self.calc_mean_return_by_quantile(
by_date=False, by_group=False,
demeaned=demeaned, group_adjust=group_adjust,
)[0].apply(rate_of_return, axis=0)
mean_returns_spread, _ = self.compute_mean_returns_spread(
upper_quant=self._factor_quantile,
lower_quant=1,
by_date=True,
by_group=False,
demeaned=demeaned,
group_adjust=group_adjust,
)
pl.plot_returns_table(
self.calc_factor_alpha_beta(demeaned=demeaned),
mean_return_by_quantile,
mean_returns_spread
)
def plot_turnover_table(self):
"""打印换手率表"""
pl.plot_turnover_table(
self.calc_autocorrelation(),
self.quantile_turnover
)
def plot_information_table(self, group_adjust=False, method=None):
"""打印信息比率 (IC)相关表
参数:
group_adjust:
- True:使用行业中性收益 (行业收益被认为是每日各个行业股票收益按照weight列中权重的加权的均值)
- False:不使用行业中性收益
method:
- 'rank':用秩相关系数计算IC值
- 'normal':用相关系数计算IC值
"""
ic = self.calc_factor_information_coefficient(
group_adjust=group_adjust,
by_group=False,
method=method
)
pl.plot_information_table(ic)
def plot_quantile_statistics_table(self):
"""打印各分位数统计表"""
pl.plot_quantile_statistics_table(self._clean_factor_data)
def plot_ic_ts(self, group_adjust=False, method=None):
"""画信息比率(IC)时间序列图
参数:
group_adjust:
- True: 使用行业中性收益 (行业收益被认为是每日各个行业股票收益按照weight列中权重的加权的均值)
- False: 不使用行业中性收益
method:
- 'rank': 用秩相关系数计算IC值
- 'normal':用相关系数计算IC值
"""
ic = self.calc_factor_information_coefficient(
group_adjust=group_adjust, by_group=False, method=method
)
pl.plot_ic_ts(ic)
def plot_ic_hist(self, group_adjust=False, method=None):
"""画信息比率分布直方图
参数:
group_adjust:
- True: 使用行业中性收益 (行业收益被认为是每日各个行业股票收益按照weight列中权重的加权的均值)
- False: 不使用行业中性收益
method:
- 'rank': 用秩相关系数计算IC值
- 'normal': 用相关系数计算IC值
"""
ic = self.calc_factor_information_coefficient(
group_adjust=group_adjust,
by_group=False,
method=method
)
pl.plot_ic_hist(ic)
def plot_ic_qq(self, group_adjust=False, method=None, theoretical_dist=None):
"""画信息比率 qq 图
参数:
group_adjust:
- True: 使用行业中性收益 (行业收益被认为是每日各个行业股票收益按照weight列中权重的加权的均值)
- False: 不使用行业中性收益
method:
- 'rank': 用秩相关系数计算IC值
- 'normal': 用相关系数计算IC值
theoretical_dist:
- 'norm': 正态分布
- 't': t 分布
"""
theoretical_dist = 'norm' if theoretical_dist is None else theoretical_dist
theoretical_dist = morestats._parse_dist_kw(theoretical_dist)
ic = self.calc_factor_information_coefficient(
group_adjust=group_adjust,
by_group=False,
method=method,
)
pl.plot_ic_qq(ic, theoretical_dist=theoretical_dist)
def plot_quantile_returns_bar(self, by_group=False,
demeaned=False, group_adjust=False):
"""画各分位数平均收益图
参数:
by_group:
- True: 各行业的各分位数平均收益图
- False: 各分位数平均收益图
demeaned:
- True: 使用超额收益计算累积收益 (基准收益被认为是每日所有股票收益按照weight列中权重加权的均值)
- False: 不使用超额收益
group_adjust:
- True: 使用行业中性化后的收益计算累积收益
(行业收益被认为是每日各个行业股票收益按照weight列中权重加权的均值)
- False: 不使用行业中性化后的收益
"""
mean_return_by_quantile = self.calc_mean_return_by_quantile(
by_date=False, by_group=by_group,
demeaned=demeaned, group_adjust=group_adjust,
)[0].apply(rate_of_return, axis=0)
pl.plot_quantile_returns_bar(
mean_return_by_quantile, by_group=by_group, ylim_percentiles=None
)
def plot_quantile_returns_violin(self, demeaned=False, group_adjust=False,
ylim_percentiles=(1, 99)):
"""画各分位数收益分布图
参数:
demeaned:
- True: 使用超额收益计算累积收益 (基准收益被认为是每日所有股票收益按照weight列中权重加权的均值)
- False: 不使用超额收益
group_adjust:
- True: 使用行业中性化后的收益计算累积收益
(行业收益被认为是每日各个行业股票收益按照weight列中权重加权的均值)
- False: 不使用行业中性化后的收益
plot_quantile_returns_violin: 有效收益分位数(单位为百分之). 画图时y轴的范围为有效收益的最大/最小值.
例如 (1, 99) 代表收益的从小到大排列的 1% 分位到 99% 分位为有效收益.
"""
mean_return_by_date = self.calc_mean_return_by_quantile(
by_date=True, by_group=False,
demeaned=demeaned, group_adjust=group_adjust
)[0].apply(rate_of_return, axis=0)
pl.plot_quantile_returns_violin(mean_return_by_date,
ylim_percentiles=ylim_percentiles)
def plot_mean_quantile_returns_spread_time_series(
self, demeaned=False, group_adjust=False, bandwidth=1
):
"""画最高分位减最低分位收益图
参数:
demeaned:
- True: 使用超额收益计算累积收益 (基准收益被认为是每日所有股票收益按照weight列中权重加权的均值)
- False: 不使用超额收益
group_adjust:
- True: 使用行业中性化后的收益计算累积收益
(行业收益被认为是每日各个行业股票收益按照weight列中权重加权的均值)
- False: 不使用行业中性化后的收益
bandwidth: n, 加减 n 倍当日标准差
"""
mean_returns_spread, mean_returns_spread_std = self.compute_mean_returns_spread(
upper_quant=self._factor_quantile,
lower_quant=1,
by_date=True,
by_group=False,
demeaned=demeaned,
group_adjust=group_adjust,
)
pl.plot_mean_quantile_returns_spread_time_series(
mean_returns_spread, std_err=mean_returns_spread_std,
bandwidth=bandwidth
)
def plot_ic_by_group(self, group_adjust=False, method=None):
"""画按行业分组信息比率(IC)图
参数:
group_adjust:
- True: 使用行业中性收益 (行业收益被认为是每日各个行业股票收益按照weight列中权重的加权的均值)
- False: 不使用行业中性收益
method:
- 'rank': 用秩相关系数计算IC值
- 'normal': 用相关系数计算IC值
"""
ic_by_group = self.calc_mean_information_coefficient(
group_adjust=group_adjust,
by_group=True,
method=method
)
pl.plot_ic_by_group(ic_by_group)
def plot_factor_auto_correlation(self, periods=None, rank=True):
"""画因子自相关图
参数:
periods: 滞后周期
rank:
- True: 用秩相关系数
- False: 用相关系数
"""
if periods is None:
periods = self._periods
if not isinstance(periods, Iterable):
periods = (periods,)
periods = tuple(periods)
for p in periods:
if p in self._periods:
pl.plot_factor_rank_auto_correlation(
self.calc_autocorrelation(rank=rank)[
convert_to_forward_returns_columns(p)
],
period=p
)
def plot_top_bottom_quantile_turnover(self, periods=None):
"""画最高最低分位换手率图
参数:
periods: 调仓周期
"""
quantile_turnover = self.quantile_turnover
if periods is None:
periods = self._periods
if not isinstance(periods, Iterable):
periods = (periods,)
periods = tuple(periods)
for p in periods:
if p in self._periods:
pl.plot_top_bottom_quantile_turnover(
quantile_turnover[convert_to_forward_returns_columns(p)],
period=p
)
def plot_monthly_ic_heatmap(self, group_adjust=False):
"""画月度信息比率(IC)图
参数:
group_adjust:
- True: 使用行业中性收益 (行业收益被认为是每日各个行业股票收益按照weight列中权重的加权的均值)
- False: 不使用行业中性收益
"""
ic_monthly = self.calc_mean_information_coefficient(
group_adjust=group_adjust, by_group=False, by_time="M"
)
pl.plot_monthly_ic_heatmap(ic_monthly)
def plot_cumulative_returns(self, period=None, demeaned=False,
group_adjust=False):
"""画按因子值加权组合每日累积收益图
参数:
periods: 调仓周期
demeaned:
详见 calc_factor_returns 中 demeaned 参数
- True: 对因子值加权组合每日收益的权重去均值 (每日权重 = 每日权重 - 每日权重的均值),
使组合转换为cash-neutral多空组合
- False: 不对权重去均值
group_adjust:
详见 calc_factor_returns 中 group_adjust 参数
- True: 对权重分行业去均值 (每日权重 = 每日权重 - 每日各行业权重的均值),
使组合转换为 industry-neutral 多空组合
- False: 不对权重分行业去均值
"""
if period is None:
period = self._periods
if not isinstance(period, Iterable):
period = (period,)
period = tuple(period)
factor_returns = self.calc_factor_returns(demeaned=demeaned,
group_adjust=group_adjust)
for p in period:
if p in self._periods:
pl.plot_cumulative_returns(
factor_returns[convert_to_forward_returns_columns(p)],
period=p
)
def plot_top_down_cumulative_returns(self, period=None, demeaned=False, group_adjust=False):
"""画做多最大分位数做空最小分位数组合每日累积收益图
period: 指定调仓周期
demeaned:
详见 calc_mean_return_by_quantile 中 demeaned 参数
- True: 使用超额收益计算累积收益 (基准收益被认为是每日所有股票收益按照weight列中权重加权的均值)
- False: 不使用超额收益
group_adjust:
详见 calc_mean_return_by_quantile 中 group_adjust 参数
- True: 使用行业中性化后的收益计算累积收益
(行业收益被认为是每日各个行业股票收益按照weight列中权重加权的均值)
- False: 不使用行业中性化后的收益
"""
if period is None:
period = self._periods
if not isinstance(period, Iterable):
period = (period, )
period = tuple(period)
for p in period:
if p in self._periods:
factor_return = self.calc_top_down_cumulative_returns(
period=p, demeaned=demeaned, group_adjust=group_adjust,
)
pl.plot_top_down_cumulative_returns(
factor_return, period=p
)
def plot_cumulative_returns_by_quantile(self, period=None, demeaned=False,
group_adjust=False):
"""画各分位数每日累积收益图
参数:
period: 调仓周期
demeaned:
详见 calc_mean_return_by_quantile 中 demeaned 参数
- True: 使用超额收益计算累积收益 (基准收益被认为是每日所有股票收益按照weight列中权重加权的均值)
- False: 不使用超额收益
group_adjust:
详见 calc_mean_return_by_quantile 中 group_adjust 参数
- True: 使用行业中性化后的收益计算累积收益
(行业收益被认为是每日各个行业股票收益按照weight列中权重加权的均值)
- False: 不使用行业中性化后的收益
"""
if period is None:
period = self._periods
if not isinstance(period, Iterable):
period = (period,)
period = tuple(period)
mean_return_by_date, _ = self.calc_mean_return_by_quantile(
by_date=True, by_group=False, demeaned=demeaned, group_adjust=group_adjust,
)
for p in period:
if p in self._periods:
pl.plot_cumulative_returns_by_quantile(
mean_return_by_date[convert_to_forward_returns_columns(p)],
period=p
)
def plot_quantile_average_cumulative_return(self, periods_before=5, periods_after=10,
by_quantile=False, std_bar=False,
demeaned=False, group_adjust=False):
"""因子预测能力平均累计收益图
参数:
periods_before: 计算过去的天数
periods_after: 计算未来的天数
by_quantile: 是否各分位数分别显示因子预测能力平均累计收益图
std_bar:
- True: 显示标准差
- False: 不显示标准差
demeaned:
详见 calc_mean_return_by_quantile 中 demeaned 参数
- True: 使用超额收益计算累积收益 (基准收益被认为是每日所有股票收益按照weight列中权重加权的均值)
- False: 不使用超额收益
group_adjust:
详见 calc_mean_return_by_quantile 中 group_adjust 参数
- True: 使用行业中性化后的收益计算累积收益
(行业收益被认为是每日各个行业股票收益按照weight列中权重加权的均值)
- False: 不使用行业中性化后的收益
"""
average_cumulative_return_by_q = self.calc_average_cumulative_return_by_quantile(
periods_before=periods_before, periods_after=periods_after,
demeaned=demeaned, group_adjust=group_adjust
)
pl.plot_quantile_average_cumulative_return(average_cumulative_return_by_q,
by_quantile=by_quantile,
std_bar=std_bar,
periods_before=periods_before,
periods_after=periods_after)
def plot_events_distribution(self, num_days=5):
"""画有效因子数量统计图
参数:
num_days: 统计间隔天数
"""
pl.plot_events_distribution(
events=self._clean_factor_data['factor'],
num_days=num_days,
full_dates=pd.to_datetime(self.factor.index.get_level_values('date').unique())
)
def create_summary_tear_sheet(self, demeaned=False, group_adjust=False):
"""因子值特征分析
参数:
demeaned:
- True: 对每日因子收益去均值求得因子收益表
- False: 因子收益表
group_adjust:
- True: 按行业对因子收益去均值后求得因子收益表
- False: 因子收益表
"""
self.plot_quantile_statistics_table()
self.plot_returns_table(demeaned=demeaned, group_adjust=group_adjust)
self.plot_quantile_returns_bar(by_group=False, demeaned=demeaned, group_adjust=group_adjust)
pl.plt.show()
self.plot_information_table(group_adjust=group_adjust)
self.plot_turnover_table()
def create_returns_tear_sheet(self, demeaned=False, group_adjust=False, by_group=False):
"""因子值特征分析
参数:
demeaned:
详见 calc_mean_return_by_quantile 中 demeaned 参数
- True: 使用超额收益计算 (基准收益被认为是每日所有股票收益按照weight列中权重加权的均值)
- False: 不使用超额收益
group_adjust:
详见 calc_mean_return_by_quantile 中 group_adjust 参数
- True: 使用行业中性化后的收益计算 (行业收益被认为是每日各个行业股票收益按照weight列中权重加权的均值)
- False: 不使用行业中性化后的收益
by_group:
- True: 画各行业的各分位数平均收益图
- False: 不画各行业的各分位数平均收益图
"""
self.plot_returns_table(demeaned=demeaned, group_adjust=group_adjust)
self.plot_quantile_returns_bar(by_group=False,
demeaned=demeaned,
group_adjust=group_adjust)
pl.plt.show()
self.plot_cumulative_returns(
period=None, demeaned=demeaned, group_adjust=group_adjust
)
pl.plt.show()
self.plot_cumulative_returns_by_quantile(period=None,
demeaned=demeaned,
group_adjust=group_adjust)
self.plot_top_down_cumulative_returns(period=None,
demeaned=demeaned,
group_adjust=group_adjust)
pl.plt.show()
self.plot_mean_quantile_returns_spread_time_series(
demeaned=demeaned, group_adjust=group_adjust
)
pl.plt.show()
if by_group:
self.plot_quantile_returns_bar(by_group=True,
demeaned=demeaned,
group_adjust=group_adjust)
pl.plt.show()
self.plot_quantile_returns_violin(demeaned=demeaned,
group_adjust=group_adjust)
pl.plt.show()
def create_information_tear_sheet(self, group_adjust=False, by_group=False):
"""因子 IC 分析
参数:
group_adjust:
- True: 使用行业中性收益 (行业收益被认为是每日各个行业股票收益按照weight列中权重的加权的均值)
- False: 不使用行业中性收益
by_group:
- True: 画按行业分组信息比率(IC)图
- False: 画月度信息比率(IC)图
"""
self.plot_ic_ts(group_adjust=group_adjust, method=None)
pl.plt.show()
self.plot_ic_qq(group_adjust=group_adjust)
pl.plt.show()
if by_group:
self.plot_ic_by_group(group_adjust=group_adjust, method=None)
else:
self.plot_monthly_ic_heatmap(group_adjust=group_adjust)
pl.plt.show()
def create_turnover_tear_sheet(self, turnover_periods=None):
"""因子换手率分析
参数:
turnover_periods: 调仓周期
"""
self.plot_turnover_table()
self.plot_top_bottom_quantile_turnover(periods=turnover_periods)
pl.plt.show()
self.plot_factor_auto_correlation(periods=turnover_periods)
pl.plt.show()
def create_event_returns_tear_sheet(self, avgretplot=(5, 15),
demeaned=False, group_adjust=False,
std_bar=False):
"""因子预测能力分析
参数:
avgretplot: tuple 因子预测的天数
-(计算过去的天数, 计算未来的天数)
demeaned:
详见 calc_mean_return_by_quantile 中 demeaned 参数
- True: 使用超额收益计算累积收益 (基准收益被认为是每日所有股票收益按照weight列中权重加权的均值)
- False: 不使用超额收益
group_adjust:
详见 calc_mean_return_by_quantile 中 group_adjust 参数
- True: 使用行业中性化后的收益计算累积收益
(行业收益被认为是每日各个行业股票收益按照weight列中权重加权的均值)
- False: 不使用行业中性化后的收益
std_bar:
- True: 显示标准差
- False: 不显示标准差
"""
before, after = avgretplot
self.plot_quantile_average_cumulative_return(
periods_before=before, periods_after=after,
by_quantile=False, std_bar=False,
demeaned=demeaned, group_adjust=group_adjust
)
pl.plt.show()
if std_bar:
self.plot_quantile_average_cumulative_return(
periods_before=before, periods_after=after,
by_quantile=True, std_bar=True,
demeaned=demeaned, group_adjust=group_adjust
)
pl.plt.show()
def create_full_tear_sheet(self, demeaned=False, group_adjust=False, by_group=False,
turnover_periods=None, avgretplot=(5, 15), std_bar=False):
"""全部分析
参数:
demeaned:
- True:使用超额收益计算 (基准收益被认为是每日所有股票收益按照weight列中权重加权的均值)
- False:不使用超额收益
group_adjust:
- True:使用行业中性化后的收益计算
(行业收益被认为是每日各个行业股票收益按照weight列中权重加权的均值)
- False:不使用行业中性化后的收益
by_group:
- True: 按行业展示
- False: 不按行业展示
turnover_periods: 调仓周期
avgretplot: tuple 因子预测的天数
-(计算过去的天数, 计算未来的天数)
std_bar:
- True: 显示标准差
- False: 不显示标准差
"""
self.plot_quantile_statistics_table()
print("\n-------------------------\n")
self.plot_returns_table(demeaned=demeaned, group_adjust=group_adjust)
self.plot_quantile_returns_bar(by_group=False,
demeaned=demeaned,
group_adjust=group_adjust)
pl.plt.show()
self.plot_cumulative_returns(period=None, demeaned=demeaned, group_adjust=group_adjust)
pl.plt.show()
self.plot_top_down_cumulative_returns(period=None,
demeaned=demeaned,
group_adjust=group_adjust)
pl.plt.show()
self.plot_cumulative_returns_by_quantile(period=None,
demeaned=demeaned,
group_adjust=group_adjust)
self.plot_mean_quantile_returns_spread_time_series(demeaned=demeaned,
group_adjust=group_adjust)
pl.plt.show()
if by_group:
self.plot_quantile_returns_bar(by_group=True,
demeaned=demeaned,
group_adjust=group_adjust)
pl.plt.show()
self.plot_quantile_returns_violin(demeaned=demeaned,
group_adjust=group_adjust)
pl.plt.show()
print("\n-------------------------\n")
self.plot_information_table(group_adjust=group_adjust)
self.plot_ic_ts(group_adjust=group_adjust, method=None)
pl.plt.show()
self.plot_ic_qq(group_adjust=group_adjust)
pl.plt.show()
if by_group:
self.plot_ic_by_group(group_adjust=group_adjust, method=None)
else:
self.plot_monthly_ic_heatmap(group_adjust=group_adjust)
pl.plt.show()
print("\n-------------------------\n")
self.plot_turnover_table()
self.plot_top_bottom_quantile_turnover(periods=turnover_periods)
pl.plt.show()
self.plot_factor_auto_correlation(periods=turnover_periods)
pl.plt.show()
print("\n-------------------------\n")
before, after = avgretplot
self.plot_quantile_average_cumulative_return(
periods_before=before, periods_after=after,
by_quantile=False, std_bar=False,
demeaned=demeaned, group_adjust=group_adjust
)
pl.plt.show()
if std_bar:
self.plot_quantile_average_cumulative_return(
periods_before=before, periods_after=after,
by_quantile=True, std_bar=True,
demeaned=demeaned, group_adjust=group_adjust
)
pl.plt.show()
def plot_disable_chinese_label(self):
"""关闭中文图例显示
画图时默认会从系统中查找中文字体显示以中文图例
如果找不到中文字体则默认使用英文图例
当找到中文字体但中文显示乱码时, 可调用此 API 关闭中文图例显示而使用英文
"""
_use_chinese(False)
| {
"pile_set_name": "Github"
} |
/*
* Copyright 2007-2008 Extreme Engineering Solutions, Inc.
*
* Author: Nate Case <[email protected]>
*
* This file is subject to the terms and conditions of version 2 of
* the GNU General Public License. See the file COPYING in the main
* directory of this archive for more details.
*
* LED driver for various PCA955x I2C LED drivers
*
* Supported devices:
*
* Device Description 7-bit slave address
* ------ ----------- -------------------
* PCA9550 2-bit driver 0x60 .. 0x61
* PCA9551 8-bit driver 0x60 .. 0x67
* PCA9552 16-bit driver 0x60 .. 0x67
* PCA9553/01 4-bit driver 0x62
* PCA9553/02 4-bit driver 0x63
*
* Philips PCA955x LED driver chips follow a register map as shown below:
*
* Control Register Description
* ---------------- -----------
* 0x0 Input register 0
* ..
* NUM_INPUT_REGS - 1 Last Input register X
*
* NUM_INPUT_REGS Frequency prescaler 0
* NUM_INPUT_REGS + 1 PWM register 0
* NUM_INPUT_REGS + 2 Frequency prescaler 1
* NUM_INPUT_REGS + 3 PWM register 1
*
* NUM_INPUT_REGS + 4 LED selector 0
* NUM_INPUT_REGS + 4
* + NUM_LED_REGS - 1 Last LED selector
*
* where NUM_INPUT_REGS and NUM_LED_REGS vary depending on how many
* bits the chip supports.
*/
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/string.h>
#include <linux/ctype.h>
#include <linux/leds.h>
#include <linux/err.h>
#include <linux/i2c.h>
#include <linux/workqueue.h>
#include <linux/slab.h>
/* LED select registers determine the source that drives LED outputs */
#define PCA955X_LS_LED_ON 0x0 /* Output LOW */
#define PCA955X_LS_LED_OFF 0x1 /* Output HI-Z */
#define PCA955X_LS_BLINK0 0x2 /* Blink at PWM0 rate */
#define PCA955X_LS_BLINK1 0x3 /* Blink at PWM1 rate */
enum pca955x_type {
pca9550,
pca9551,
pca9552,
pca9553,
};
struct pca955x_chipdef {
int bits;
u8 slv_addr; /* 7-bit slave address mask */
int slv_addr_shift; /* Number of bits to ignore */
};
static struct pca955x_chipdef pca955x_chipdefs[] = {
[pca9550] = {
.bits = 2,
.slv_addr = /* 110000x */ 0x60,
.slv_addr_shift = 1,
},
[pca9551] = {
.bits = 8,
.slv_addr = /* 1100xxx */ 0x60,
.slv_addr_shift = 3,
},
[pca9552] = {
.bits = 16,
.slv_addr = /* 1100xxx */ 0x60,
.slv_addr_shift = 3,
},
[pca9553] = {
.bits = 4,
.slv_addr = /* 110001x */ 0x62,
.slv_addr_shift = 1,
},
};
static const struct i2c_device_id pca955x_id[] = {
{ "pca9550", pca9550 },
{ "pca9551", pca9551 },
{ "pca9552", pca9552 },
{ "pca9553", pca9553 },
{ }
};
MODULE_DEVICE_TABLE(i2c, pca955x_id);
struct pca955x_led {
struct pca955x_chipdef *chipdef;
struct i2c_client *client;
struct work_struct work;
spinlock_t lock;
enum led_brightness brightness;
struct led_classdev led_cdev;
int led_num; /* 0 .. 15 potentially */
char name[32];
};
/* 8 bits per input register */
static inline int pca95xx_num_input_regs(int bits)
{
return (bits + 7) / 8;
}
/* 4 bits per LED selector register */
static inline int pca95xx_num_led_regs(int bits)
{
return (bits + 3) / 4;
}
/*
* Return an LED selector register value based on an existing one, with
* the appropriate 2-bit state value set for the given LED number (0-3).
*/
static inline u8 pca955x_ledsel(u8 oldval, int led_num, int state)
{
return (oldval & (~(0x3 << (led_num << 1)))) |
((state & 0x3) << (led_num << 1));
}
/*
* Write to frequency prescaler register, used to program the
* period of the PWM output. period = (PSCx + 1) / 38
*/
static void pca955x_write_psc(struct i2c_client *client, int n, u8 val)
{
struct pca955x_led *pca955x = i2c_get_clientdata(client);
i2c_smbus_write_byte_data(client,
pca95xx_num_input_regs(pca955x->chipdef->bits) + 2*n,
val);
}
/*
* Write to PWM register, which determines the duty cycle of the
* output. LED is OFF when the count is less than the value of this
* register, and ON when it is greater. If PWMx == 0, LED is always OFF.
*
* Duty cycle is (256 - PWMx) / 256
*/
static void pca955x_write_pwm(struct i2c_client *client, int n, u8 val)
{
struct pca955x_led *pca955x = i2c_get_clientdata(client);
i2c_smbus_write_byte_data(client,
pca95xx_num_input_regs(pca955x->chipdef->bits) + 1 + 2*n,
val);
}
/*
* Write to LED selector register, which determines the source that
* drives the LED output.
*/
static void pca955x_write_ls(struct i2c_client *client, int n, u8 val)
{
struct pca955x_led *pca955x = i2c_get_clientdata(client);
i2c_smbus_write_byte_data(client,
pca95xx_num_input_regs(pca955x->chipdef->bits) + 4 + n,
val);
}
/*
* Read the LED selector register, which determines the source that
* drives the LED output.
*/
static u8 pca955x_read_ls(struct i2c_client *client, int n)
{
struct pca955x_led *pca955x = i2c_get_clientdata(client);
return (u8) i2c_smbus_read_byte_data(client,
pca95xx_num_input_regs(pca955x->chipdef->bits) + 4 + n);
}
static void pca955x_led_work(struct work_struct *work)
{
struct pca955x_led *pca955x;
u8 ls;
int chip_ls; /* which LSx to use (0-3 potentially) */
int ls_led; /* which set of bits within LSx to use (0-3) */
pca955x = container_of(work, struct pca955x_led, work);
chip_ls = pca955x->led_num / 4;
ls_led = pca955x->led_num % 4;
ls = pca955x_read_ls(pca955x->client, chip_ls);
switch (pca955x->brightness) {
case LED_FULL:
ls = pca955x_ledsel(ls, ls_led, PCA955X_LS_LED_ON);
break;
case LED_OFF:
ls = pca955x_ledsel(ls, ls_led, PCA955X_LS_LED_OFF);
break;
case LED_HALF:
ls = pca955x_ledsel(ls, ls_led, PCA955X_LS_BLINK0);
break;
default:
/*
* Use PWM1 for all other values. This has the unwanted
* side effect of making all LEDs on the chip share the
* same brightness level if set to a value other than
* OFF, HALF, or FULL. But, this is probably better than
* just turning off for all other values.
*/
pca955x_write_pwm(pca955x->client, 1, 255-pca955x->brightness);
ls = pca955x_ledsel(ls, ls_led, PCA955X_LS_BLINK1);
break;
}
pca955x_write_ls(pca955x->client, chip_ls, ls);
}
static void pca955x_led_set(struct led_classdev *led_cdev, enum led_brightness value)
{
struct pca955x_led *pca955x;
pca955x = container_of(led_cdev, struct pca955x_led, led_cdev);
spin_lock(&pca955x->lock);
pca955x->brightness = value;
/*
* Must use workqueue for the actual I/O since I2C operations
* can sleep.
*/
schedule_work(&pca955x->work);
spin_unlock(&pca955x->lock);
}
static int __devinit pca955x_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct pca955x_led *pca955x;
struct pca955x_chipdef *chip;
struct i2c_adapter *adapter;
struct led_platform_data *pdata;
int i, err;
chip = &pca955x_chipdefs[id->driver_data];
adapter = to_i2c_adapter(client->dev.parent);
pdata = client->dev.platform_data;
/* Make sure the slave address / chip type combo given is possible */
if ((client->addr & ~((1 << chip->slv_addr_shift) - 1)) !=
chip->slv_addr) {
dev_err(&client->dev, "invalid slave address %02x\n",
client->addr);
return -ENODEV;
}
printk(KERN_INFO "leds-pca955x: Using %s %d-bit LED driver at "
"slave address 0x%02x\n",
id->name, chip->bits, client->addr);
if (!i2c_check_functionality(adapter, I2C_FUNC_I2C))
return -EIO;
if (pdata) {
if (pdata->num_leds != chip->bits) {
dev_err(&client->dev, "board info claims %d LEDs"
" on a %d-bit chip\n",
pdata->num_leds, chip->bits);
return -ENODEV;
}
}
pca955x = kzalloc(sizeof(*pca955x) * chip->bits, GFP_KERNEL);
if (!pca955x)
return -ENOMEM;
i2c_set_clientdata(client, pca955x);
for (i = 0; i < chip->bits; i++) {
pca955x[i].chipdef = chip;
pca955x[i].client = client;
pca955x[i].led_num = i;
/* Platform data can specify LED names and default triggers */
if (pdata) {
if (pdata->leds[i].name)
snprintf(pca955x[i].name,
sizeof(pca955x[i].name), "pca955x:%s",
pdata->leds[i].name);
if (pdata->leds[i].default_trigger)
pca955x[i].led_cdev.default_trigger =
pdata->leds[i].default_trigger;
} else {
snprintf(pca955x[i].name, sizeof(pca955x[i].name),
"pca955x:%d", i);
}
spin_lock_init(&pca955x[i].lock);
pca955x[i].led_cdev.name = pca955x[i].name;
pca955x[i].led_cdev.brightness_set = pca955x_led_set;
INIT_WORK(&pca955x[i].work, pca955x_led_work);
err = led_classdev_register(&client->dev, &pca955x[i].led_cdev);
if (err < 0)
goto exit;
}
/* Turn off LEDs */
for (i = 0; i < pca95xx_num_led_regs(chip->bits); i++)
pca955x_write_ls(client, i, 0x55);
/* PWM0 is used for half brightness or 50% duty cycle */
pca955x_write_pwm(client, 0, 255-LED_HALF);
/* PWM1 is used for variable brightness, default to OFF */
pca955x_write_pwm(client, 1, 0);
/* Set to fast frequency so we do not see flashing */
pca955x_write_psc(client, 0, 0);
pca955x_write_psc(client, 1, 0);
return 0;
exit:
while (i--) {
led_classdev_unregister(&pca955x[i].led_cdev);
cancel_work_sync(&pca955x[i].work);
}
kfree(pca955x);
return err;
}
static int __devexit pca955x_remove(struct i2c_client *client)
{
struct pca955x_led *pca955x = i2c_get_clientdata(client);
int i;
for (i = 0; i < pca955x->chipdef->bits; i++) {
led_classdev_unregister(&pca955x[i].led_cdev);
cancel_work_sync(&pca955x[i].work);
}
kfree(pca955x);
return 0;
}
static struct i2c_driver pca955x_driver = {
.driver = {
.name = "leds-pca955x",
.owner = THIS_MODULE,
},
.probe = pca955x_probe,
.remove = __devexit_p(pca955x_remove),
.id_table = pca955x_id,
};
static int __init pca955x_leds_init(void)
{
return i2c_add_driver(&pca955x_driver);
}
static void __exit pca955x_leds_exit(void)
{
i2c_del_driver(&pca955x_driver);
}
module_init(pca955x_leds_init);
module_exit(pca955x_leds_exit);
MODULE_AUTHOR("Nate Case <[email protected]>");
MODULE_DESCRIPTION("PCA955x LED driver");
MODULE_LICENSE("GPL v2");
| {
"pile_set_name": "Github"
} |
include $(top_srcdir)/modules.am
SUBDIRS = .
capture_planconfdir = $(confdir)/captureplans
capture_planconf_DATA = $(top_srcdir)/conf/captureplans/rtcpxr_capture_plan.cfg
| {
"pile_set_name": "Github"
} |
/*
* Copyright 2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package androidx.ui.text.font
data class FontFamilyList(val fontFamilies: List<FontFamily>) : List<FontFamily> by fontFamilies {
constructor(fontFamily: FontFamily) : this(listOf(fontFamily))
constructor(vararg fontFamily: FontFamily) : this(fontFamily.asList())
init {
assert(fontFamilies.size > 0) { "At least one FontFamily required in FontFamilyList" }
}
} | {
"pile_set_name": "Github"
} |
@SQ SN:CHROMOSOME_I LN:1009800
@SQ SN:CHROMOSOME_II LN:5000
@SQ SN:CHROMOSOME_III LN:5000
@SQ SN:CHROMOSOME_IV LN:5000
@SQ SN:CHROMOSOME_V LN:5000
I 16 CHROMOSOME_I 2 1 27M1D73M * 0 0 CCTAGCCCTAACCCTAACCCTAACCCTAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAA #############################@B?8B?BA@@DDBCDDCBC@CDCDCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC XG:i:1 XM:i:5 XN:i:0 XO:i:1 XS:i:-18 AS:i:-18 YT:Z:UU
II.14978392 16 CHROMOSOME_I 2 1 27M1D73M * 0 0 CCTAGCCCTAACCCTAACCCTAACCCTAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAA #############################@B?8B?BA@@DDBCDDCBC@CDCDCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC XG:i:1 XM:i:5 XN:i:0 XO:i:1 XS:i:-18 AS:i:-18 YT:Z:UU
III 16 CHROMOSOME_I 2 1 27M1D73M * 0 0 CCTAGCCCTAACCCTAACCCTAACCCTAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAA #############################@B?8B?BA@@DDBCDDCBC@CDCDCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC XG:i:1 XM:i:5 XN:i:0 XO:i:1 XS:i:-18 AS:i:-18 YT:Z:UU
IV 16 CHROMOSOME_I 2 1 27M1D73M * 0 0 CCTAGCCCTAACCCTAACCCTAACCCTAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAA #############################@B?8B?BA@@DDBCDDCBC@CDCDCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC XG:i:1 XM:i:5 XN:i:0 XO:i:1 XS:i:-18 AS:i:-18 YT:Z:UU
V 16 CHROMOSOME_I 2 1 27M1D73M * 0 0 CCTAGCCCTAACCCTAACCCTAACCCTAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAA #############################@B?8B?BA@@DDBCDDCBC@CDCDCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC XG:i:1 XM:i:5 XN:i:0 XO:i:1 XS:i:-18 AS:i:-18 YT:Z:UU
VI 2048 CHROMOSOME_I 2 1 27M100000D73M * 0 0 ACTAAGCCTAAGCCTAAGCCTAAGCCAATTATCGATTTCTGAAAAAATTATCGAATTTTCTAGAAATTTTGCAAATTTTTTCATAAAATTATCGATTTTA #############################@B?8B?BA@@DDBCDDCBC@CDCDCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC
| {
"pile_set_name": "Github"
} |
{
"name": "JBKenBurnsView",
"version": "1.0",
"license": {
"type": "MIT",
"file": "LICENSE"
},
"summary": "UIView that can generate a Ken Burns transition when given an array of images or paths.",
"frameworks": "QuartzCore",
"homepage": "https://github.com/jberlana/iOSKenBurns",
"authors": {
"Javier Berlana": "[email protected]"
},
"social_media_url": "http://twitter.com/jberlana",
"source": {
"git": "https://github.com/jberlana/iOSKenBurns.git",
"tag": "1.0"
},
"platforms": {
"ios": "6.0"
},
"source_files": "KenBurns/*.{h,m}",
"requires_arc": true
}
| {
"pile_set_name": "Github"
} |
var copyArray = require('./_copyArray'),
isIndex = require('./_isIndex');
/* Built-in method references for those with the same name as other `lodash` methods. */
var nativeMin = Math.min;
/**
* Reorder `array` according to the specified indexes where the element at
* the first index is assigned as the first element, the element at
* the second index is assigned as the second element, and so on.
*
* @private
* @param {Array} array The array to reorder.
* @param {Array} indexes The arranged array indexes.
* @returns {Array} Returns `array`.
*/
function reorder(array, indexes) {
var arrLength = array.length,
length = nativeMin(indexes.length, arrLength),
oldArray = copyArray(array);
while (length--) {
var index = indexes[length];
array[length] = isIndex(index, arrLength) ? oldArray[index] : undefined;
}
return array;
}
module.exports = reorder;
| {
"pile_set_name": "Github"
} |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#, fuzzy
msgid ""
msgstr ""
"Project-Id-Version: Apache Traffic Server 6.2\n"
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2016-01-02 21:32+0000\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
"Language-Team: LANGUAGE <[email protected]>\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=utf-8\n"
"Content-Transfer-Encoding: 8bit\n"
"Generated-By: Babel 2.1.1\n"
#: ../../developer-guide/testing-with-vagrant/index.en.rst:23
msgid "Using Vagrant to Test |TS|"
msgstr ""
#: ../../developer-guide/testing-with-vagrant/index.en.rst:25
msgid ""
"The |ATS| project's official repository includes a Vagrantfile, intended to "
"ease the process of creating environments suitable for building and testing "
"|TS|, where all the necessary dependencies are installed automatically for "
"a variety of operating systems and common distribution releases."
msgstr ""
#: ../../developer-guide/testing-with-vagrant/index.en.rst:32
msgid ""
"Vagrant is a tool for building complete development environments. With an "
"easy-to-use workflow and focus on automation, Vagrant lowers development "
"environment setup time, increases development/production parity, and makes "
"the \"works on my machine\" excuse a relic of the past."
msgstr ""
#: ../../developer-guide/testing-with-vagrant/index.en.rst:43
msgid "`VagrantUp website <https://www.vagrantup.com/about.html>`_"
msgstr ""
#: ../../developer-guide/testing-with-vagrant/index.en.rst:39
msgid ""
"Vagrant can be used in combination with any of the popular configurtion "
"management and automation tools, such as `Chef <https://www.chef.io/chef/"
">`_, `Puppet <https://puppetlabs.com/>`_, `Ansible <http://www.ansible.com/"
"home>`_, and more. The Vagrantfile included in the |TS| repository happens "
"to make use of Puppet."
msgstr ""
#: ../../developer-guide/testing-with-vagrant/index.en.rst:46
msgid "Installing Vagrant and Dependencies"
msgstr ""
#: ../../developer-guide/testing-with-vagrant/index.en.rst:49
msgid "VirtualBox"
msgstr ""
#: ../../developer-guide/testing-with-vagrant/index.en.rst:51
msgid ""
"The virtualization software `VirtualBox <https://www.virtualbox.org/>`_ is "
"required to create and run the virtual machines created by the included "
"project Vagrantfile."
msgstr ""
#: ../../developer-guide/testing-with-vagrant/index.en.rst:55
msgid ""
"VirtualBox can be obtained by free from the official website, and many "
"distributions provide their own packages as well. No special configuration "
"of the software is required."
msgstr ""
#: ../../developer-guide/testing-with-vagrant/index.en.rst:60
msgid "Vagrant"
msgstr ""
#: ../../developer-guide/testing-with-vagrant/index.en.rst:62
msgid ""
"A fairly recent version of `Vagrant <https://www.vagrantup.com/downloads."
"html>`_ is necessary to use the included Vagrantfile. While older versions "
"of Vagrant could be installed through the Ruby Gems facility, modern "
"versions are only provided as distribution specific packages."
msgstr ""
#: ../../developer-guide/testing-with-vagrant/index.en.rst:68
msgid "NFS Server"
msgstr ""
#: ../../developer-guide/testing-with-vagrant/index.en.rst:70
msgid ""
"The project Vagrantfile uses the NFS shared folders support of VirtualBox "
"to mount the same directory in which the Vagrantfile is located on your "
"host machine as a network directory inside the virtual machine. For this to "
"work, your host machine must have an NFS server installed and running, and "
"the user under which you run the vagrant commands must have sudo "
"permissions to modify the NFS exports configuration and restart the NFS "
"service."
msgstr ""
#: ../../developer-guide/testing-with-vagrant/index.en.rst:77
msgid ""
"The virtual machine created by Vagrant will still function without a "
"working NFS server on your host machine, but you will not be able to access "
"the shared folder which includes the entire |TS| source tree. You may opt "
"to modify the Vagrantfile to use a method other than NFS, as per the "
"`Vagrant documentation <https://docs.vagrantup.com/v2/synced-folders/"
"basic_usage.html>`_."
msgstr ""
#: ../../developer-guide/testing-with-vagrant/index.en.rst:84
msgid "Managing Virtual Machines"
msgstr ""
#: ../../developer-guide/testing-with-vagrant/index.en.rst:87
msgid "Listing Available Machines"
msgstr ""
#: ../../developer-guide/testing-with-vagrant/index.en.rst:89
msgid ""
"The included Vagrantfile defines many variations of operating systems, "
"releases, and architectures. To see a complete list of the virtual machine "
"options available to you, run the command ``vagrant status`` from within "
"the same directory as the Vagrantfile. The command may take a few moments "
"to run as the configurations defined in the Vagrantfile are evaluated, and "
"calls are made to the underlying VirtualBox utilities to check for the "
"existence and operational state of each possibility. You should expect to "
"see output along the lines of::"
msgstr ""
#: ../../developer-guide/testing-with-vagrant/index.en.rst:127
msgid "Creating and Destroying"
msgstr ""
#: ../../developer-guide/testing-with-vagrant/index.en.rst:129
msgid ""
"Creation and destruction of virtual machines with Vagrant is very simple. "
"To bring a new virtual machine into existence, run the following command "
"from the same directory in which the Vagrantfile is located::"
msgstr ""
#: ../../developer-guide/testing-with-vagrant/index.en.rst:135
msgid ""
"Where ``<name>`` should be the specific operating system release you wish "
"to use for the virtual machine. For example, to test |TS| in a CentOS 6.4 "
"environment, you would run::"
msgstr ""
#: ../../developer-guide/testing-with-vagrant/index.en.rst:141
msgid ""
"Running the ``vagrant up`` command for a virtual machine which does not "
"exist yet (or has previously been deleted) will create a brand new virtual "
"machine, using the appropriate image (called a *basebox* in Vagrant "
"parlance), as well as provision the machine according to any configuration "
"management rules specified in the Vagrantfile."
msgstr ""
#: ../../developer-guide/testing-with-vagrant/index.en.rst:147
msgid ""
"Similarly, you may destroy the virtual machine when you are finished by "
"running the command::"
msgstr ""
#: ../../developer-guide/testing-with-vagrant/index.en.rst:152
msgid ""
"Or if you wish to only stop the virtual machine temporarily without "
"deleting it, you may run::"
msgstr ""
#: ../../developer-guide/testing-with-vagrant/index.en.rst:157
msgid ""
"A halted virtual machine is started back up with the same ``vagrant up`` "
"command shown earlier. The difference is that Vagrant will recognize the "
"box already exists and do nothing more than start the vm process and "
"configure the virtual networking interfaces on your host. Any configuration "
"management hooks in the Vagrantfile will be skipped."
msgstr ""
#: ../../developer-guide/testing-with-vagrant/index.en.rst:164
msgid "Logging In"
msgstr ""
#: ../../developer-guide/testing-with-vagrant/index.en.rst:166
msgid ""
"Logging into a virtual machine created with Vagrant may be accomplished in "
"a couple different ways. The easiest is to let Vagrant itself figure out "
"where the machine is and how to properly authenticate you to it::"
msgstr ""
#: ../../developer-guide/testing-with-vagrant/index.en.rst:172
msgid ""
"Using that command from within the same directory as the Vagrantfile allows "
"you to skip figuring out what virtual network interface has been attached "
"to the machine, what local port may be assigned to handle SSH forwarding, "
"and so on. As long as the virtual machine was already running, you will be "
"quickly dropped into a local shell in the virtual machine as the "
"``vagrant`` user."
msgstr ""
#: ../../developer-guide/testing-with-vagrant/index.en.rst:180
msgid ""
"Vagrant by default uses a widely-shared private RSA key on newly created "
"virtual machines (that are built on public basebox images). The default "
"user on these baseboxes is also configured for password-less sudo "
"permissions. This is very clearly insecure, but Vagrant is designed for "
"local testing and development, not production (or other public) uses, so "
"the project has made the philosophical trade-off in favor of ease of use."
msgstr ""
#: ../../developer-guide/testing-with-vagrant/index.en.rst:187
msgid ""
"Alternatively, you may SSH directly to the virtual machine. Because the "
"virtual machines are configured to use only the private virtual network "
"layer provided by VirtualBox, you cannot directly. Instead, VirtualBox has "
"created a local port mapping automatically which should be used. There is "
"no fixed, pre-determined port mapping that will be universally valid, as "
"Vagrant and VirtualBox may be used together to run an arbitrary number of "
"virtual machines simultaneously, each provisioned in any order, and defined "
"by any number and variety of Vagrantfiles."
msgstr ""
#: ../../developer-guide/testing-with-vagrant/index.en.rst:196
msgid ""
"The correct way to determine what port Vagrant and VirtualBox have used to "
"map to a given virtual machine is to run the following command from within "
"the same directory as your Vagrantfile::"
msgstr ""
#: ../../developer-guide/testing-with-vagrant/index.en.rst:202
msgid ""
"That will output a configuration block, suitable for inclusion in your "
"local ``~/.ssh/config`` file. Note specifically, in addition to the port, "
"the path to the private key you will need to use as your identity when "
"attempting to log into the virtual machine."
msgstr ""
#: ../../developer-guide/testing-with-vagrant/index.en.rst:208
msgid "Shared Host Folders"
msgstr ""
#: ../../developer-guide/testing-with-vagrant/index.en.rst:210
msgid ""
"VirtualBox provides a facility for mounting directories from your host "
"machine as filesystems inside the virtual machines. The |TS| Vagrantfile "
"makes use of this feature to mount its own source tree in a predictable "
"location in the virtual environment."
msgstr ""
#: ../../developer-guide/testing-with-vagrant/index.en.rst:215
msgid ""
"Multiple methods are available for this, including NFS, CIFS, and simulated "
"block devices. The |TS| project opts to use NFS for its simplicity, speed, "
"support for features such as symlinks, and wide interoperability across the "
"various guest operating systems included in the Vagrantfile. Within the "
"included Vagrantfile, you can see the following line::"
msgstr ""
#: ../../developer-guide/testing-with-vagrant/index.en.rst:223
msgid ""
"This directs VirtualBox to mount the directory in which the |TS| "
"Vagrantfile resides as an NFS mount inside the virtual machine at the path "
"``/opt/src/trafficserver.git``. Additional host directories may be mounted "
"in the same manner should the need arise."
msgstr ""
#: ../../developer-guide/testing-with-vagrant/index.en.rst:229
msgid "Forwarding Custom Ports"
msgstr ""
#: ../../developer-guide/testing-with-vagrant/index.en.rst:241
msgid "Building |TS| Inside Vagrant"
msgstr ""
#: ../../developer-guide/testing-with-vagrant/index.en.rst:243
msgid ""
"Producing |TS| builds from within the Vagrant managed virtual machines is "
"effectively no different than in any other environment. The same directory "
"in which the Vagrantfile exists will be mounted via NFS inside the virtual "
"machine at the path ``/opt/src/trafficserver.git``."
msgstr ""
#: ../../developer-guide/testing-with-vagrant/index.en.rst:250
msgid ""
"If you have run ``autoconf`` or ``configure`` from outside the virtual "
"machine environment against the same Git working copy as is mounted inside "
"the virtual machine, you will encounter failures should you attempt to run "
"any of the Make targets inside the VM. Any build related commands should be "
"run inside of the virtual machine. Additionally, if you are running more "
"than one virtual machine simultaneously, remember that they are each using "
"the same NFS export on your host machine."
msgstr ""
| {
"pile_set_name": "Github"
} |
// Copyright ©2015 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gonum
// Iladlc scans a matrix for its last non-zero column. Returns -1 if the matrix
// is all zeros.
//
// Iladlc is an internal routine. It is exported for testing purposes.
func (Implementation) Iladlc(m, n int, a []float64, lda int) int {
switch {
case m < 0:
panic(mLT0)
case n < 0:
panic(nLT0)
case lda < max(1, n):
panic(badLdA)
}
if n == 0 || m == 0 {
return -1
}
if len(a) < (m-1)*lda+n {
panic(shortA)
}
// Test common case where corner is non-zero.
if a[n-1] != 0 || a[(m-1)*lda+(n-1)] != 0 {
return n - 1
}
// Scan each row tracking the highest column seen.
highest := -1
for i := 0; i < m; i++ {
for j := n - 1; j >= 0; j-- {
if a[i*lda+j] != 0 {
highest = max(highest, j)
break
}
}
}
return highest
}
| {
"pile_set_name": "Github"
} |
/*
* Driver for the SWIM3 (Super Woz Integrated Machine 3)
* floppy controller found on Power Macintoshes.
*
* Copyright (C) 1996 Paul Mackerras.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
/*
* TODO:
* handle 2 drives
* handle GCR disks
*/
#undef DEBUG
#include <linux/stddef.h>
#include <linux/kernel.h>
#include <linux/sched/signal.h>
#include <linux/timer.h>
#include <linux/delay.h>
#include <linux/fd.h>
#include <linux/ioctl.h>
#include <linux/blkdev.h>
#include <linux/interrupt.h>
#include <linux/mutex.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/wait.h>
#include <asm/io.h>
#include <asm/dbdma.h>
#include <asm/prom.h>
#include <linux/uaccess.h>
#include <asm/mediabay.h>
#include <asm/machdep.h>
#include <asm/pmac_feature.h>
#define MAX_FLOPPIES 2
static DEFINE_MUTEX(swim3_mutex);
static struct gendisk *disks[MAX_FLOPPIES];
enum swim_state {
idle,
locating,
seeking,
settling,
do_transfer,
jogging,
available,
revalidating,
ejecting
};
#define REG(x) unsigned char x; char x ## _pad[15];
/*
* The names for these registers mostly represent speculation on my part.
* It will be interesting to see how close they are to the names Apple uses.
*/
struct swim3 {
REG(data);
REG(timer); /* counts down at 1MHz */
REG(error);
REG(mode);
REG(select); /* controls CA0, CA1, CA2 and LSTRB signals */
REG(setup);
REG(control); /* writing bits clears them */
REG(status); /* writing bits sets them in control */
REG(intr);
REG(nseek); /* # tracks to seek */
REG(ctrack); /* current track number */
REG(csect); /* current sector number */
REG(gap3); /* size of gap 3 in track format */
REG(sector); /* sector # to read or write */
REG(nsect); /* # sectors to read or write */
REG(intr_enable);
};
#define control_bic control
#define control_bis status
/* Bits in select register */
#define CA_MASK 7
#define LSTRB 8
/* Bits in control register */
#define DO_SEEK 0x80
#define FORMAT 0x40
#define SELECT 0x20
#define WRITE_SECTORS 0x10
#define DO_ACTION 0x08
#define DRIVE2_ENABLE 0x04
#define DRIVE_ENABLE 0x02
#define INTR_ENABLE 0x01
/* Bits in status register */
#define FIFO_1BYTE 0x80
#define FIFO_2BYTE 0x40
#define ERROR 0x20
#define DATA 0x08
#define RDDATA 0x04
#define INTR_PENDING 0x02
#define MARK_BYTE 0x01
/* Bits in intr and intr_enable registers */
#define ERROR_INTR 0x20
#define DATA_CHANGED 0x10
#define TRANSFER_DONE 0x08
#define SEEN_SECTOR 0x04
#define SEEK_DONE 0x02
#define TIMER_DONE 0x01
/* Bits in error register */
#define ERR_DATA_CRC 0x80
#define ERR_ADDR_CRC 0x40
#define ERR_OVERRUN 0x04
#define ERR_UNDERRUN 0x01
/* Bits in setup register */
#define S_SW_RESET 0x80
#define S_GCR_WRITE 0x40
#define S_IBM_DRIVE 0x20
#define S_TEST_MODE 0x10
#define S_FCLK_DIV2 0x08
#define S_GCR 0x04
#define S_COPY_PROT 0x02
#define S_INV_WDATA 0x01
/* Select values for swim3_action */
#define SEEK_POSITIVE 0
#define SEEK_NEGATIVE 4
#define STEP 1
#define MOTOR_ON 2
#define MOTOR_OFF 6
#define INDEX 3
#define EJECT 7
#define SETMFM 9
#define SETGCR 13
/* Select values for swim3_select and swim3_readbit */
#define STEP_DIR 0
#define STEPPING 1
#define MOTOR_ON 2
#define RELAX 3 /* also eject in progress */
#define READ_DATA_0 4
#define ONEMEG_DRIVE 5
#define SINGLE_SIDED 6 /* drive or diskette is 4MB type? */
#define DRIVE_PRESENT 7
#define DISK_IN 8
#define WRITE_PROT 9
#define TRACK_ZERO 10
#define TACHO 11
#define READ_DATA_1 12
#define GCR_MODE 13
#define SEEK_COMPLETE 14
#define TWOMEG_MEDIA 15
/* Definitions of values used in writing and formatting */
#define DATA_ESCAPE 0x99
#define GCR_SYNC_EXC 0x3f
#define GCR_SYNC_CONV 0x80
#define GCR_FIRST_MARK 0xd5
#define GCR_SECOND_MARK 0xaa
#define GCR_ADDR_MARK "\xd5\xaa\x00"
#define GCR_DATA_MARK "\xd5\xaa\x0b"
#define GCR_SLIP_BYTE "\x27\xaa"
#define GCR_SELF_SYNC "\x3f\xbf\x1e\x34\x3c\x3f"
#define DATA_99 "\x99\x99"
#define MFM_ADDR_MARK "\x99\xa1\x99\xa1\x99\xa1\x99\xfe"
#define MFM_INDEX_MARK "\x99\xc2\x99\xc2\x99\xc2\x99\xfc"
#define MFM_GAP_LEN 12
struct floppy_state {
enum swim_state state;
struct swim3 __iomem *swim3; /* hardware registers */
struct dbdma_regs __iomem *dma; /* DMA controller registers */
int swim3_intr; /* interrupt number for SWIM3 */
int dma_intr; /* interrupt number for DMA channel */
int cur_cyl; /* cylinder head is on, or -1 */
int cur_sector; /* last sector we saw go past */
int req_cyl; /* the cylinder for the current r/w request */
int head; /* head number ditto */
int req_sector; /* sector number ditto */
int scount; /* # sectors we're transferring at present */
int retries;
int settle_time;
int secpercyl; /* disk geometry information */
int secpertrack;
int total_secs;
int write_prot; /* 1 if write-protected, 0 if not, -1 dunno */
struct dbdma_cmd *dma_cmd;
int ref_count;
int expect_cyl;
struct timer_list timeout;
int timeout_pending;
int ejected;
wait_queue_head_t wait;
int wanted;
struct macio_dev *mdev;
char dbdma_cmd_space[5 * sizeof(struct dbdma_cmd)];
int index;
struct request *cur_req;
};
#define swim3_err(fmt, arg...) dev_err(&fs->mdev->ofdev.dev, "[fd%d] " fmt, fs->index, arg)
#define swim3_warn(fmt, arg...) dev_warn(&fs->mdev->ofdev.dev, "[fd%d] " fmt, fs->index, arg)
#define swim3_info(fmt, arg...) dev_info(&fs->mdev->ofdev.dev, "[fd%d] " fmt, fs->index, arg)
#ifdef DEBUG
#define swim3_dbg(fmt, arg...) dev_dbg(&fs->mdev->ofdev.dev, "[fd%d] " fmt, fs->index, arg)
#else
#define swim3_dbg(fmt, arg...) do { } while(0)
#endif
static struct floppy_state floppy_states[MAX_FLOPPIES];
static int floppy_count = 0;
static DEFINE_SPINLOCK(swim3_lock);
static unsigned short write_preamble[] = {
0x4e4e, 0x4e4e, 0x4e4e, 0x4e4e, 0x4e4e, /* gap field */
0, 0, 0, 0, 0, 0, /* sync field */
0x99a1, 0x99a1, 0x99a1, 0x99fb, /* data address mark */
0x990f /* no escape for 512 bytes */
};
static unsigned short write_postamble[] = {
0x9904, /* insert CRC */
0x4e4e, 0x4e4e,
0x9908, /* stop writing */
0, 0, 0, 0, 0, 0
};
static void seek_track(struct floppy_state *fs, int n);
static void init_dma(struct dbdma_cmd *cp, int cmd, void *buf, int count);
static void act(struct floppy_state *fs);
static void scan_timeout(unsigned long data);
static void seek_timeout(unsigned long data);
static void settle_timeout(unsigned long data);
static void xfer_timeout(unsigned long data);
static irqreturn_t swim3_interrupt(int irq, void *dev_id);
/*static void fd_dma_interrupt(int irq, void *dev_id);*/
static int grab_drive(struct floppy_state *fs, enum swim_state state,
int interruptible);
static void release_drive(struct floppy_state *fs);
static int fd_eject(struct floppy_state *fs);
static int floppy_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long param);
static int floppy_open(struct block_device *bdev, fmode_t mode);
static void floppy_release(struct gendisk *disk, fmode_t mode);
static unsigned int floppy_check_events(struct gendisk *disk,
unsigned int clearing);
static int floppy_revalidate(struct gendisk *disk);
static bool swim3_end_request(struct floppy_state *fs, blk_status_t err, unsigned int nr_bytes)
{
struct request *req = fs->cur_req;
int rc;
swim3_dbg(" end request, err=%d nr_bytes=%d, cur_req=%p\n",
err, nr_bytes, req);
if (err)
nr_bytes = blk_rq_cur_bytes(req);
rc = __blk_end_request(req, err, nr_bytes);
if (rc)
return true;
fs->cur_req = NULL;
return false;
}
static void swim3_select(struct floppy_state *fs, int sel)
{
struct swim3 __iomem *sw = fs->swim3;
out_8(&sw->select, RELAX);
if (sel & 8)
out_8(&sw->control_bis, SELECT);
else
out_8(&sw->control_bic, SELECT);
out_8(&sw->select, sel & CA_MASK);
}
static void swim3_action(struct floppy_state *fs, int action)
{
struct swim3 __iomem *sw = fs->swim3;
swim3_select(fs, action);
udelay(1);
out_8(&sw->select, sw->select | LSTRB);
udelay(2);
out_8(&sw->select, sw->select & ~LSTRB);
udelay(1);
}
static int swim3_readbit(struct floppy_state *fs, int bit)
{
struct swim3 __iomem *sw = fs->swim3;
int stat;
swim3_select(fs, bit);
udelay(1);
stat = in_8(&sw->status);
return (stat & DATA) == 0;
}
static void start_request(struct floppy_state *fs)
{
struct request *req;
unsigned long x;
swim3_dbg("start request, initial state=%d\n", fs->state);
if (fs->state == idle && fs->wanted) {
fs->state = available;
wake_up(&fs->wait);
return;
}
while (fs->state == idle) {
swim3_dbg("start request, idle loop, cur_req=%p\n", fs->cur_req);
if (!fs->cur_req) {
fs->cur_req = blk_fetch_request(disks[fs->index]->queue);
swim3_dbg(" fetched request %p\n", fs->cur_req);
if (!fs->cur_req)
break;
}
req = fs->cur_req;
if (fs->mdev->media_bay &&
check_media_bay(fs->mdev->media_bay) != MB_FD) {
swim3_dbg("%s", " media bay absent, dropping req\n");
swim3_end_request(fs, BLK_STS_IOERR, 0);
continue;
}
#if 0 /* This is really too verbose */
swim3_dbg("do_fd_req: dev=%s cmd=%d sec=%ld nr_sec=%u buf=%p\n",
req->rq_disk->disk_name, req->cmd,
(long)blk_rq_pos(req), blk_rq_sectors(req),
bio_data(req->bio));
swim3_dbg(" current_nr_sectors=%u\n",
blk_rq_cur_sectors(req));
#endif
if (blk_rq_pos(req) >= fs->total_secs) {
swim3_dbg(" pos out of bounds (%ld, max is %ld)\n",
(long)blk_rq_pos(req), (long)fs->total_secs);
swim3_end_request(fs, BLK_STS_IOERR, 0);
continue;
}
if (fs->ejected) {
swim3_dbg("%s", " disk ejected\n");
swim3_end_request(fs, BLK_STS_IOERR, 0);
continue;
}
if (rq_data_dir(req) == WRITE) {
if (fs->write_prot < 0)
fs->write_prot = swim3_readbit(fs, WRITE_PROT);
if (fs->write_prot) {
swim3_dbg("%s", " try to write, disk write protected\n");
swim3_end_request(fs, BLK_STS_IOERR, 0);
continue;
}
}
/* Do not remove the cast. blk_rq_pos(req) is now a
* sector_t and can be 64 bits, but it will never go
* past 32 bits for this driver anyway, so we can
* safely cast it down and not have to do a 64/32
* division
*/
fs->req_cyl = ((long)blk_rq_pos(req)) / fs->secpercyl;
x = ((long)blk_rq_pos(req)) % fs->secpercyl;
fs->head = x / fs->secpertrack;
fs->req_sector = x % fs->secpertrack + 1;
fs->state = do_transfer;
fs->retries = 0;
act(fs);
}
}
static void do_fd_request(struct request_queue * q)
{
start_request(q->queuedata);
}
static void set_timeout(struct floppy_state *fs, int nticks,
void (*proc)(unsigned long))
{
if (fs->timeout_pending)
del_timer(&fs->timeout);
fs->timeout.expires = jiffies + nticks;
fs->timeout.function = proc;
fs->timeout.data = (unsigned long) fs;
add_timer(&fs->timeout);
fs->timeout_pending = 1;
}
static inline void scan_track(struct floppy_state *fs)
{
struct swim3 __iomem *sw = fs->swim3;
swim3_select(fs, READ_DATA_0);
in_8(&sw->intr); /* clear SEEN_SECTOR bit */
in_8(&sw->error);
out_8(&sw->intr_enable, SEEN_SECTOR);
out_8(&sw->control_bis, DO_ACTION);
/* enable intr when track found */
set_timeout(fs, HZ, scan_timeout); /* enable timeout */
}
static inline void seek_track(struct floppy_state *fs, int n)
{
struct swim3 __iomem *sw = fs->swim3;
if (n >= 0) {
swim3_action(fs, SEEK_POSITIVE);
sw->nseek = n;
} else {
swim3_action(fs, SEEK_NEGATIVE);
sw->nseek = -n;
}
fs->expect_cyl = (fs->cur_cyl >= 0)? fs->cur_cyl + n: -1;
swim3_select(fs, STEP);
in_8(&sw->error);
/* enable intr when seek finished */
out_8(&sw->intr_enable, SEEK_DONE);
out_8(&sw->control_bis, DO_SEEK);
set_timeout(fs, 3*HZ, seek_timeout); /* enable timeout */
fs->settle_time = 0;
}
static inline void init_dma(struct dbdma_cmd *cp, int cmd,
void *buf, int count)
{
cp->req_count = cpu_to_le16(count);
cp->command = cpu_to_le16(cmd);
cp->phy_addr = cpu_to_le32(virt_to_bus(buf));
cp->xfer_status = 0;
}
static inline void setup_transfer(struct floppy_state *fs)
{
int n;
struct swim3 __iomem *sw = fs->swim3;
struct dbdma_cmd *cp = fs->dma_cmd;
struct dbdma_regs __iomem *dr = fs->dma;
struct request *req = fs->cur_req;
if (blk_rq_cur_sectors(req) <= 0) {
swim3_warn("%s", "Transfer 0 sectors ?\n");
return;
}
if (rq_data_dir(req) == WRITE)
n = 1;
else {
n = fs->secpertrack - fs->req_sector + 1;
if (n > blk_rq_cur_sectors(req))
n = blk_rq_cur_sectors(req);
}
swim3_dbg(" setup xfer at sect %d (of %d) head %d for %d\n",
fs->req_sector, fs->secpertrack, fs->head, n);
fs->scount = n;
swim3_select(fs, fs->head? READ_DATA_1: READ_DATA_0);
out_8(&sw->sector, fs->req_sector);
out_8(&sw->nsect, n);
out_8(&sw->gap3, 0);
out_le32(&dr->cmdptr, virt_to_bus(cp));
if (rq_data_dir(req) == WRITE) {
/* Set up 3 dma commands: write preamble, data, postamble */
init_dma(cp, OUTPUT_MORE, write_preamble, sizeof(write_preamble));
++cp;
init_dma(cp, OUTPUT_MORE, bio_data(req->bio), 512);
++cp;
init_dma(cp, OUTPUT_LAST, write_postamble, sizeof(write_postamble));
} else {
init_dma(cp, INPUT_LAST, bio_data(req->bio), n * 512);
}
++cp;
out_le16(&cp->command, DBDMA_STOP);
out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS);
in_8(&sw->error);
out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS);
if (rq_data_dir(req) == WRITE)
out_8(&sw->control_bis, WRITE_SECTORS);
in_8(&sw->intr);
out_le32(&dr->control, (RUN << 16) | RUN);
/* enable intr when transfer complete */
out_8(&sw->intr_enable, TRANSFER_DONE);
out_8(&sw->control_bis, DO_ACTION);
set_timeout(fs, 2*HZ, xfer_timeout); /* enable timeout */
}
static void act(struct floppy_state *fs)
{
for (;;) {
swim3_dbg(" act loop, state=%d, req_cyl=%d, cur_cyl=%d\n",
fs->state, fs->req_cyl, fs->cur_cyl);
switch (fs->state) {
case idle:
return; /* XXX shouldn't get here */
case locating:
if (swim3_readbit(fs, TRACK_ZERO)) {
swim3_dbg("%s", " locate track 0\n");
fs->cur_cyl = 0;
if (fs->req_cyl == 0)
fs->state = do_transfer;
else
fs->state = seeking;
break;
}
scan_track(fs);
return;
case seeking:
if (fs->cur_cyl < 0) {
fs->expect_cyl = -1;
fs->state = locating;
break;
}
if (fs->req_cyl == fs->cur_cyl) {
swim3_warn("%s", "Whoops, seeking 0\n");
fs->state = do_transfer;
break;
}
seek_track(fs, fs->req_cyl - fs->cur_cyl);
return;
case settling:
/* check for SEEK_COMPLETE after 30ms */
fs->settle_time = (HZ + 32) / 33;
set_timeout(fs, fs->settle_time, settle_timeout);
return;
case do_transfer:
if (fs->cur_cyl != fs->req_cyl) {
if (fs->retries > 5) {
swim3_err("Wrong cylinder in transfer, want: %d got %d\n",
fs->req_cyl, fs->cur_cyl);
swim3_end_request(fs, BLK_STS_IOERR, 0);
fs->state = idle;
return;
}
fs->state = seeking;
break;
}
setup_transfer(fs);
return;
case jogging:
seek_track(fs, -5);
return;
default:
swim3_err("Unknown state %d\n", fs->state);
return;
}
}
}
static void scan_timeout(unsigned long data)
{
struct floppy_state *fs = (struct floppy_state *) data;
struct swim3 __iomem *sw = fs->swim3;
unsigned long flags;
swim3_dbg("* scan timeout, state=%d\n", fs->state);
spin_lock_irqsave(&swim3_lock, flags);
fs->timeout_pending = 0;
out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS);
out_8(&sw->select, RELAX);
out_8(&sw->intr_enable, 0);
fs->cur_cyl = -1;
if (fs->retries > 5) {
swim3_end_request(fs, BLK_STS_IOERR, 0);
fs->state = idle;
start_request(fs);
} else {
fs->state = jogging;
act(fs);
}
spin_unlock_irqrestore(&swim3_lock, flags);
}
static void seek_timeout(unsigned long data)
{
struct floppy_state *fs = (struct floppy_state *) data;
struct swim3 __iomem *sw = fs->swim3;
unsigned long flags;
swim3_dbg("* seek timeout, state=%d\n", fs->state);
spin_lock_irqsave(&swim3_lock, flags);
fs->timeout_pending = 0;
out_8(&sw->control_bic, DO_SEEK);
out_8(&sw->select, RELAX);
out_8(&sw->intr_enable, 0);
swim3_err("%s", "Seek timeout\n");
swim3_end_request(fs, BLK_STS_IOERR, 0);
fs->state = idle;
start_request(fs);
spin_unlock_irqrestore(&swim3_lock, flags);
}
static void settle_timeout(unsigned long data)
{
struct floppy_state *fs = (struct floppy_state *) data;
struct swim3 __iomem *sw = fs->swim3;
unsigned long flags;
swim3_dbg("* settle timeout, state=%d\n", fs->state);
spin_lock_irqsave(&swim3_lock, flags);
fs->timeout_pending = 0;
if (swim3_readbit(fs, SEEK_COMPLETE)) {
out_8(&sw->select, RELAX);
fs->state = locating;
act(fs);
goto unlock;
}
out_8(&sw->select, RELAX);
if (fs->settle_time < 2*HZ) {
++fs->settle_time;
set_timeout(fs, 1, settle_timeout);
goto unlock;
}
swim3_err("%s", "Seek settle timeout\n");
swim3_end_request(fs, BLK_STS_IOERR, 0);
fs->state = idle;
start_request(fs);
unlock:
spin_unlock_irqrestore(&swim3_lock, flags);
}
static void xfer_timeout(unsigned long data)
{
struct floppy_state *fs = (struct floppy_state *) data;
struct swim3 __iomem *sw = fs->swim3;
struct dbdma_regs __iomem *dr = fs->dma;
unsigned long flags;
int n;
swim3_dbg("* xfer timeout, state=%d\n", fs->state);
spin_lock_irqsave(&swim3_lock, flags);
fs->timeout_pending = 0;
out_le32(&dr->control, RUN << 16);
/* We must wait a bit for dbdma to stop */
for (n = 0; (in_le32(&dr->status) & ACTIVE) && n < 1000; n++)
udelay(1);
out_8(&sw->intr_enable, 0);
out_8(&sw->control_bic, WRITE_SECTORS | DO_ACTION);
out_8(&sw->select, RELAX);
swim3_err("Timeout %sing sector %ld\n",
(rq_data_dir(fs->cur_req)==WRITE? "writ": "read"),
(long)blk_rq_pos(fs->cur_req));
swim3_end_request(fs, BLK_STS_IOERR, 0);
fs->state = idle;
start_request(fs);
spin_unlock_irqrestore(&swim3_lock, flags);
}
static irqreturn_t swim3_interrupt(int irq, void *dev_id)
{
struct floppy_state *fs = (struct floppy_state *) dev_id;
struct swim3 __iomem *sw = fs->swim3;
int intr, err, n;
int stat, resid;
struct dbdma_regs __iomem *dr;
struct dbdma_cmd *cp;
unsigned long flags;
struct request *req = fs->cur_req;
swim3_dbg("* interrupt, state=%d\n", fs->state);
spin_lock_irqsave(&swim3_lock, flags);
intr = in_8(&sw->intr);
err = (intr & ERROR_INTR)? in_8(&sw->error): 0;
if ((intr & ERROR_INTR) && fs->state != do_transfer)
swim3_err("Non-transfer error interrupt: state=%d, dir=%x, intr=%x, err=%x\n",
fs->state, rq_data_dir(req), intr, err);
switch (fs->state) {
case locating:
if (intr & SEEN_SECTOR) {
out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS);
out_8(&sw->select, RELAX);
out_8(&sw->intr_enable, 0);
del_timer(&fs->timeout);
fs->timeout_pending = 0;
if (sw->ctrack == 0xff) {
swim3_err("%s", "Seen sector but cyl=ff?\n");
fs->cur_cyl = -1;
if (fs->retries > 5) {
swim3_end_request(fs, BLK_STS_IOERR, 0);
fs->state = idle;
start_request(fs);
} else {
fs->state = jogging;
act(fs);
}
break;
}
fs->cur_cyl = sw->ctrack;
fs->cur_sector = sw->csect;
if (fs->expect_cyl != -1 && fs->expect_cyl != fs->cur_cyl)
swim3_err("Expected cyl %d, got %d\n",
fs->expect_cyl, fs->cur_cyl);
fs->state = do_transfer;
act(fs);
}
break;
case seeking:
case jogging:
if (sw->nseek == 0) {
out_8(&sw->control_bic, DO_SEEK);
out_8(&sw->select, RELAX);
out_8(&sw->intr_enable, 0);
del_timer(&fs->timeout);
fs->timeout_pending = 0;
if (fs->state == seeking)
++fs->retries;
fs->state = settling;
act(fs);
}
break;
case settling:
out_8(&sw->intr_enable, 0);
del_timer(&fs->timeout);
fs->timeout_pending = 0;
act(fs);
break;
case do_transfer:
if ((intr & (ERROR_INTR | TRANSFER_DONE)) == 0)
break;
out_8(&sw->intr_enable, 0);
out_8(&sw->control_bic, WRITE_SECTORS | DO_ACTION);
out_8(&sw->select, RELAX);
del_timer(&fs->timeout);
fs->timeout_pending = 0;
dr = fs->dma;
cp = fs->dma_cmd;
if (rq_data_dir(req) == WRITE)
++cp;
/*
* Check that the main data transfer has finished.
* On writing, the swim3 sometimes doesn't use
* up all the bytes of the postamble, so we can still
* see DMA active here. That doesn't matter as long
* as all the sector data has been transferred.
*/
if ((intr & ERROR_INTR) == 0 && cp->xfer_status == 0) {
/* wait a little while for DMA to complete */
for (n = 0; n < 100; ++n) {
if (cp->xfer_status != 0)
break;
udelay(1);
barrier();
}
}
/* turn off DMA */
out_le32(&dr->control, (RUN | PAUSE) << 16);
stat = le16_to_cpu(cp->xfer_status);
resid = le16_to_cpu(cp->res_count);
if (intr & ERROR_INTR) {
n = fs->scount - 1 - resid / 512;
if (n > 0) {
blk_update_request(req, 0, n << 9);
fs->req_sector += n;
}
if (fs->retries < 5) {
++fs->retries;
act(fs);
} else {
swim3_err("Error %sing block %ld (err=%x)\n",
rq_data_dir(req) == WRITE? "writ": "read",
(long)blk_rq_pos(req), err);
swim3_end_request(fs, BLK_STS_IOERR, 0);
fs->state = idle;
}
} else {
if ((stat & ACTIVE) == 0 || resid != 0) {
/* musta been an error */
swim3_err("fd dma error: stat=%x resid=%d\n", stat, resid);
swim3_err(" state=%d, dir=%x, intr=%x, err=%x\n",
fs->state, rq_data_dir(req), intr, err);
swim3_end_request(fs, BLK_STS_IOERR, 0);
fs->state = idle;
start_request(fs);
break;
}
fs->retries = 0;
if (swim3_end_request(fs, 0, fs->scount << 9)) {
fs->req_sector += fs->scount;
if (fs->req_sector > fs->secpertrack) {
fs->req_sector -= fs->secpertrack;
if (++fs->head > 1) {
fs->head = 0;
++fs->req_cyl;
}
}
act(fs);
} else
fs->state = idle;
}
if (fs->state == idle)
start_request(fs);
break;
default:
swim3_err("Don't know what to do in state %d\n", fs->state);
}
spin_unlock_irqrestore(&swim3_lock, flags);
return IRQ_HANDLED;
}
/*
static void fd_dma_interrupt(int irq, void *dev_id)
{
}
*/
/* Called under the mutex to grab exclusive access to a drive */
static int grab_drive(struct floppy_state *fs, enum swim_state state,
int interruptible)
{
unsigned long flags;
swim3_dbg("%s", "-> grab drive\n");
spin_lock_irqsave(&swim3_lock, flags);
if (fs->state != idle && fs->state != available) {
++fs->wanted;
/* this will enable irqs in order to sleep */
if (!interruptible)
wait_event_lock_irq(fs->wait,
fs->state == available,
swim3_lock);
else if (wait_event_interruptible_lock_irq(fs->wait,
fs->state == available,
swim3_lock)) {
--fs->wanted;
spin_unlock_irqrestore(&swim3_lock, flags);
return -EINTR;
}
--fs->wanted;
}
fs->state = state;
spin_unlock_irqrestore(&swim3_lock, flags);
return 0;
}
static void release_drive(struct floppy_state *fs)
{
unsigned long flags;
swim3_dbg("%s", "-> release drive\n");
spin_lock_irqsave(&swim3_lock, flags);
fs->state = idle;
start_request(fs);
spin_unlock_irqrestore(&swim3_lock, flags);
}
static int fd_eject(struct floppy_state *fs)
{
int err, n;
err = grab_drive(fs, ejecting, 1);
if (err)
return err;
swim3_action(fs, EJECT);
for (n = 20; n > 0; --n) {
if (signal_pending(current)) {
err = -EINTR;
break;
}
swim3_select(fs, RELAX);
schedule_timeout_interruptible(1);
if (swim3_readbit(fs, DISK_IN) == 0)
break;
}
swim3_select(fs, RELAX);
udelay(150);
fs->ejected = 1;
release_drive(fs);
return err;
}
static struct floppy_struct floppy_type =
{ 2880,18,2,80,0,0x1B,0x00,0xCF,0x6C,NULL }; /* 7 1.44MB 3.5" */
static int floppy_locked_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long param)
{
struct floppy_state *fs = bdev->bd_disk->private_data;
int err;
if ((cmd & 0x80) && !capable(CAP_SYS_ADMIN))
return -EPERM;
if (fs->mdev->media_bay &&
check_media_bay(fs->mdev->media_bay) != MB_FD)
return -ENXIO;
switch (cmd) {
case FDEJECT:
if (fs->ref_count != 1)
return -EBUSY;
err = fd_eject(fs);
return err;
case FDGETPRM:
if (copy_to_user((void __user *) param, &floppy_type,
sizeof(struct floppy_struct)))
return -EFAULT;
return 0;
}
return -ENOTTY;
}
static int floppy_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long param)
{
int ret;
mutex_lock(&swim3_mutex);
ret = floppy_locked_ioctl(bdev, mode, cmd, param);
mutex_unlock(&swim3_mutex);
return ret;
}
static int floppy_open(struct block_device *bdev, fmode_t mode)
{
struct floppy_state *fs = bdev->bd_disk->private_data;
struct swim3 __iomem *sw = fs->swim3;
int n, err = 0;
if (fs->ref_count == 0) {
if (fs->mdev->media_bay &&
check_media_bay(fs->mdev->media_bay) != MB_FD)
return -ENXIO;
out_8(&sw->setup, S_IBM_DRIVE | S_FCLK_DIV2);
out_8(&sw->control_bic, 0xff);
out_8(&sw->mode, 0x95);
udelay(10);
out_8(&sw->intr_enable, 0);
out_8(&sw->control_bis, DRIVE_ENABLE | INTR_ENABLE);
swim3_action(fs, MOTOR_ON);
fs->write_prot = -1;
fs->cur_cyl = -1;
for (n = 0; n < 2 * HZ; ++n) {
if (n >= HZ/30 && swim3_readbit(fs, SEEK_COMPLETE))
break;
if (signal_pending(current)) {
err = -EINTR;
break;
}
swim3_select(fs, RELAX);
schedule_timeout_interruptible(1);
}
if (err == 0 && (swim3_readbit(fs, SEEK_COMPLETE) == 0
|| swim3_readbit(fs, DISK_IN) == 0))
err = -ENXIO;
swim3_action(fs, SETMFM);
swim3_select(fs, RELAX);
} else if (fs->ref_count == -1 || mode & FMODE_EXCL)
return -EBUSY;
if (err == 0 && (mode & FMODE_NDELAY) == 0
&& (mode & (FMODE_READ|FMODE_WRITE))) {
check_disk_change(bdev);
if (fs->ejected)
err = -ENXIO;
}
if (err == 0 && (mode & FMODE_WRITE)) {
if (fs->write_prot < 0)
fs->write_prot = swim3_readbit(fs, WRITE_PROT);
if (fs->write_prot)
err = -EROFS;
}
if (err) {
if (fs->ref_count == 0) {
swim3_action(fs, MOTOR_OFF);
out_8(&sw->control_bic, DRIVE_ENABLE | INTR_ENABLE);
swim3_select(fs, RELAX);
}
return err;
}
if (mode & FMODE_EXCL)
fs->ref_count = -1;
else
++fs->ref_count;
return 0;
}
static int floppy_unlocked_open(struct block_device *bdev, fmode_t mode)
{
int ret;
mutex_lock(&swim3_mutex);
ret = floppy_open(bdev, mode);
mutex_unlock(&swim3_mutex);
return ret;
}
static void floppy_release(struct gendisk *disk, fmode_t mode)
{
struct floppy_state *fs = disk->private_data;
struct swim3 __iomem *sw = fs->swim3;
mutex_lock(&swim3_mutex);
if (fs->ref_count > 0 && --fs->ref_count == 0) {
swim3_action(fs, MOTOR_OFF);
out_8(&sw->control_bic, 0xff);
swim3_select(fs, RELAX);
}
mutex_unlock(&swim3_mutex);
}
static unsigned int floppy_check_events(struct gendisk *disk,
unsigned int clearing)
{
struct floppy_state *fs = disk->private_data;
return fs->ejected ? DISK_EVENT_MEDIA_CHANGE : 0;
}
static int floppy_revalidate(struct gendisk *disk)
{
struct floppy_state *fs = disk->private_data;
struct swim3 __iomem *sw;
int ret, n;
if (fs->mdev->media_bay &&
check_media_bay(fs->mdev->media_bay) != MB_FD)
return -ENXIO;
sw = fs->swim3;
grab_drive(fs, revalidating, 0);
out_8(&sw->intr_enable, 0);
out_8(&sw->control_bis, DRIVE_ENABLE);
swim3_action(fs, MOTOR_ON); /* necessary? */
fs->write_prot = -1;
fs->cur_cyl = -1;
mdelay(1);
for (n = HZ; n > 0; --n) {
if (swim3_readbit(fs, SEEK_COMPLETE))
break;
if (signal_pending(current))
break;
swim3_select(fs, RELAX);
schedule_timeout_interruptible(1);
}
ret = swim3_readbit(fs, SEEK_COMPLETE) == 0
|| swim3_readbit(fs, DISK_IN) == 0;
if (ret)
swim3_action(fs, MOTOR_OFF);
else {
fs->ejected = 0;
swim3_action(fs, SETMFM);
}
swim3_select(fs, RELAX);
release_drive(fs);
return ret;
}
static const struct block_device_operations floppy_fops = {
.open = floppy_unlocked_open,
.release = floppy_release,
.ioctl = floppy_ioctl,
.check_events = floppy_check_events,
.revalidate_disk= floppy_revalidate,
};
static void swim3_mb_event(struct macio_dev* mdev, int mb_state)
{
struct floppy_state *fs = macio_get_drvdata(mdev);
struct swim3 __iomem *sw;
if (!fs)
return;
sw = fs->swim3;
if (mb_state != MB_FD)
return;
/* Clear state */
out_8(&sw->intr_enable, 0);
in_8(&sw->intr);
in_8(&sw->error);
}
static int swim3_add_device(struct macio_dev *mdev, int index)
{
struct device_node *swim = mdev->ofdev.dev.of_node;
struct floppy_state *fs = &floppy_states[index];
int rc = -EBUSY;
/* Do this first for message macros */
memset(fs, 0, sizeof(*fs));
fs->mdev = mdev;
fs->index = index;
/* Check & Request resources */
if (macio_resource_count(mdev) < 2) {
swim3_err("%s", "No address in device-tree\n");
return -ENXIO;
}
if (macio_irq_count(mdev) < 1) {
swim3_err("%s", "No interrupt in device-tree\n");
return -ENXIO;
}
if (macio_request_resource(mdev, 0, "swim3 (mmio)")) {
swim3_err("%s", "Can't request mmio resource\n");
return -EBUSY;
}
if (macio_request_resource(mdev, 1, "swim3 (dma)")) {
swim3_err("%s", "Can't request dma resource\n");
macio_release_resource(mdev, 0);
return -EBUSY;
}
dev_set_drvdata(&mdev->ofdev.dev, fs);
if (mdev->media_bay == NULL)
pmac_call_feature(PMAC_FTR_SWIM3_ENABLE, swim, 0, 1);
fs->state = idle;
fs->swim3 = (struct swim3 __iomem *)
ioremap(macio_resource_start(mdev, 0), 0x200);
if (fs->swim3 == NULL) {
swim3_err("%s", "Couldn't map mmio registers\n");
rc = -ENOMEM;
goto out_release;
}
fs->dma = (struct dbdma_regs __iomem *)
ioremap(macio_resource_start(mdev, 1), 0x200);
if (fs->dma == NULL) {
swim3_err("%s", "Couldn't map dma registers\n");
iounmap(fs->swim3);
rc = -ENOMEM;
goto out_release;
}
fs->swim3_intr = macio_irq(mdev, 0);
fs->dma_intr = macio_irq(mdev, 1);
fs->cur_cyl = -1;
fs->cur_sector = -1;
fs->secpercyl = 36;
fs->secpertrack = 18;
fs->total_secs = 2880;
init_waitqueue_head(&fs->wait);
fs->dma_cmd = (struct dbdma_cmd *) DBDMA_ALIGN(fs->dbdma_cmd_space);
memset(fs->dma_cmd, 0, 2 * sizeof(struct dbdma_cmd));
fs->dma_cmd[1].command = cpu_to_le16(DBDMA_STOP);
if (mdev->media_bay == NULL || check_media_bay(mdev->media_bay) == MB_FD)
swim3_mb_event(mdev, MB_FD);
if (request_irq(fs->swim3_intr, swim3_interrupt, 0, "SWIM3", fs)) {
swim3_err("%s", "Couldn't request interrupt\n");
pmac_call_feature(PMAC_FTR_SWIM3_ENABLE, swim, 0, 0);
goto out_unmap;
return -EBUSY;
}
init_timer(&fs->timeout);
swim3_info("SWIM3 floppy controller %s\n",
mdev->media_bay ? "in media bay" : "");
return 0;
out_unmap:
iounmap(fs->dma);
iounmap(fs->swim3);
out_release:
macio_release_resource(mdev, 0);
macio_release_resource(mdev, 1);
return rc;
}
static int swim3_attach(struct macio_dev *mdev,
const struct of_device_id *match)
{
struct gendisk *disk;
int index, rc;
index = floppy_count++;
if (index >= MAX_FLOPPIES)
return -ENXIO;
/* Add the drive */
rc = swim3_add_device(mdev, index);
if (rc)
return rc;
/* Now register that disk. Same comment about failure handling */
disk = disks[index] = alloc_disk(1);
if (disk == NULL)
return -ENOMEM;
disk->queue = blk_init_queue(do_fd_request, &swim3_lock);
if (disk->queue == NULL) {
put_disk(disk);
return -ENOMEM;
}
blk_queue_bounce_limit(disk->queue, BLK_BOUNCE_HIGH);
disk->queue->queuedata = &floppy_states[index];
if (index == 0) {
/* If we failed, there isn't much we can do as the driver is still
* too dumb to remove the device, just bail out
*/
if (register_blkdev(FLOPPY_MAJOR, "fd"))
return 0;
}
disk->major = FLOPPY_MAJOR;
disk->first_minor = index;
disk->fops = &floppy_fops;
disk->private_data = &floppy_states[index];
disk->flags |= GENHD_FL_REMOVABLE;
sprintf(disk->disk_name, "fd%d", index);
set_capacity(disk, 2880);
add_disk(disk);
return 0;
}
static const struct of_device_id swim3_match[] =
{
{
.name = "swim3",
},
{
.compatible = "ohare-swim3"
},
{
.compatible = "swim3"
},
{ /* end of list */ }
};
static struct macio_driver swim3_driver =
{
.driver = {
.name = "swim3",
.of_match_table = swim3_match,
},
.probe = swim3_attach,
#ifdef CONFIG_PMAC_MEDIABAY
.mediabay_event = swim3_mb_event,
#endif
#if 0
.suspend = swim3_suspend,
.resume = swim3_resume,
#endif
};
int swim3_init(void)
{
macio_register_driver(&swim3_driver);
return 0;
}
module_init(swim3_init)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Paul Mackerras");
MODULE_ALIAS_BLOCKDEV_MAJOR(FLOPPY_MAJOR);
| {
"pile_set_name": "Github"
} |
#ifndef __OBJECT
#define __OBJECT
typedef struct M_Object_ * M_Object;
struct M_Object_ {
m_bit* data;
Type type_ref;
Vector vtable;
volatile size_t ref;
};
ANN void instantiate_object(const VM_Shred, const Type);
ANN void free_object(MemPool p, const M_Object);
ANEW M_Object new_object(MemPool, const VM_Shred, const Type);
ANEW M_Object new_M_UGen(const struct Gwion_*);
ANN void fork_clean(const VM_Shred, const Vector);
ANN ANEW M_Object new_array(MemPool, const Type t, const m_uint length);
ANEW M_Object new_string(MemPool, const VM_Shred, const m_str);
ANEW M_Object new_string2(const struct Gwion_*, const VM_Shred, const m_str);
ANEW M_Object new_shred(const VM_Shred);
ANN void fork_launch(const M_Object, const m_uint);
ANN void __release(const M_Object, const VM_Shred);
ANN void exception(const VM_Shred, const m_str);
ANN void broadcast(const M_Object);
#define STRING(o) (*(m_str*) ((M_Object)o)->data)
#define ME(o) (*(VM_Shred*) ((M_Object)o)->data)
#define EV_SHREDS(o) (*(Vector*) ((M_Object)o)->data)
#define UGEN(o) (*(UGen*) ((M_Object)o)->data)
#define ARRAY(o) (*(M_Vector*) ((M_Object)o)->data)
#define IO_FILE(o) (*(FILE**) (((M_Object)o)->data + SZ_INT))
#define Except(s, c) { exception(s, c); return; }
static inline void _release(const restrict M_Object obj, const restrict VM_Shred shred) {
if(!--obj->ref)__release(obj, shred);
}
static inline void release(const restrict M_Object obj, const restrict VM_Shred shred) {
if(obj)_release(obj, shred);
}
typedef void (f_release)(const VM_Shred shred, const Type t NUSED, const m_bit* ptr);
#define RELEASE_FUNC(a) void (a)(const VM_Shred shred, const Type t NUSED, const m_bit* ptr)
static inline RELEASE_FUNC(object_release) { release(*(M_Object*)ptr, shred); }
RELEASE_FUNC(struct_release);
#endif
| {
"pile_set_name": "Github"
} |
#ifndef __SIMD_DEF_X86_X64_H__
#define __SIMD_DEF_X86_X64_H__
#include <intrin.h>
#ifdef _MSC_VER
#ifndef _mm_srli_pi64
#define _mm_srli_pi64 _mm_srli_si64
#endif
#ifndef _mm_slli_pi64
#define _mm_slli_pi64 _mm_slli_si64
#endif
#pragma warning(push)
#pragma warning(disable : 4799) // ignore _mm_empty request.
#ifndef _mm_cvtsi64_m64
__inline __m64 _mm_cvtsi64_m64( __int64 v ) { __m64 ret; ret.m64_i64 = v; return ret; }
#endif
#ifndef _mm_cvtm64_si64
__inline __int64 _mm_cvtm64_si64( __m64 v ) { return v.m64_i64; }
#endif
#pragma warning(pop)
#endif
#ifdef _MSC_VER // visual c++
# define ALIGN16_BEG __declspec(align(16))
# define ALIGN16_END
# define ALIGN32_BEG __declspec(align(32))
# define ALIGN32_END
#else // gcc or icc
# define ALIGN16_BEG
# define ALIGN16_END __attribute__((aligned(16)))
# define ALIGN32_BEG
# define ALIGN32_END __attribute__((aligned(32)))
#endif
#define _PS_CONST128(Name, Val) \
const ALIGN16_BEG float WeightValuesSSE::##Name[4] ALIGN16_END = { Val, Val, Val, Val }
#define _PI32_CONST128(Name, Val) \
const ALIGN16_BEG tjs_uint32 WeightValuesSSE::##Name[4] ALIGN16_END = { Val, Val, Val, Val }
#define _PS_CONST256(Name, Val) \
const ALIGN32_BEG float WeightValuesAVX::##Name[8] ALIGN32_END = { Val, Val, Val, Val, Val, Val, Val, Val }
#define _PI32_CONST256(Name, Val) \
const ALIGN32_BEG tjs_uint32 WeightValuesAVX::##Name[8] ALIGN32_END = { Val, Val, Val, Val, Val, Val, Val, Val }
#define _PS_CONST_TYPE256(Name, Type, Val) \
static const ALIGN32_BEG Type m256_ps_##Name[8] ALIGN32_END = { Val, Val, Val, Val, Val, Val, Val, Val }
#endif // __SIMD_DEF_X86_X64_H__
| {
"pile_set_name": "Github"
} |
.switch {
--switch-checked-background-color: #0654ba;
--switch-disabled-background-color: #ccc;
--switch-unchecked-background-color: #767676;
--switch-foreground-color: #fff;
}
.switch {
-webkit-box-sizing: border-box;
box-sizing: border-box;
height: 40px;
position: relative;
vertical-align: middle;
}
div.switch {
display: -webkit-box;
display: flex;
}
span.switch {
display: -webkit-inline-box;
display: inline-flex;
}
span.switch__button {
background-color: #767676;
background-color: var(--switch-unchecked-background-color, #767676);
align-self: center;
border-radius: 400px;
color: transparent;
display: inline-block;
height: 24px;
position: relative;
text-indent: 100%;
-webkit-transition: left 0.15s ease-out 0s;
transition: left 0.15s ease-out 0s;
width: 40px;
}
span.switch__button::after {
background-color: #fff;
background-color: var(--switch-foreground-color, #fff);
border-radius: 50%;
content: "";
display: block;
height: 18px;
left: 3px;
position: absolute;
top: 3px;
-webkit-transform: translate3d(0, 0, 0);
transform: translate3d(0, 0, 0);
-webkit-transition: left 0.15s ease-out 0s;
transition: left 0.15s ease-out 0s;
width: 18px;
}
input.switch__control,
span.switch__control {
height: 24px;
left: 0;
margin: 0;
padding: 0;
position: absolute;
top: 8px;
width: 40px;
z-index: 1;
}
input.switch__control {
opacity: 0;
}
input.switch__control:focus + span.switch__button {
outline: 1px dotted #767676;
}
input.switch__control[disabled] + span.switch__button {
background-color: #ccc;
background-color: var(--switch-disabled-background-color, #ccc);
}
input.switch__control:checked + span.switch__button::after {
left: 19px;
}
span.switch__control[aria-disabled="true"] + span.switch__button {
background-color: #ccc;
background-color: var(--switch-disabled-background-color, #ccc);
}
span.switch__control[aria-checked="true"] + span.switch__button::after {
left: 19px;
}
input.switch__control:not([disabled]):checked + span.switch__button,
span.switch__control:not([aria-disabled="true"])[aria-checked="true"] + span.switch__button {
background-color: #0654ba;
background-color: var(--switch-checked-background-color, #0654ba);
}
@media screen and (-ms-high-contrast: active) {
input.switch__control {
opacity: 1;
}
}
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="utf-8"?>
<LinearLayout xmlns:android="http://schemas.android.com/apk/res/android"
android:layout_width="match_parent"
android:layout_height="match_parent"
android:orientation="vertical">
<Button
android:id="@+id/btn_shot"
android:layout_width="match_parent"
android:layout_height="wrap_content"
android:text="截图" />
<ImageView
android:id="@+id/img_shot"
android:layout_width="match_parent"
android:layout_height="200dp"
android:background="@mipmap/bg_shottest"/>
</LinearLayout> | {
"pile_set_name": "Github"
} |
<?xml version="1.0"?>
<!--
This Source Code Form is subject to the terms of the Mozilla Public License,
v. 2.0. If a copy of the MPL was not distributed with this file, You can
obtain one at http://mozilla.org/MPL/2.0/. OpenMRS is also distributed under
the terms of the Healthcare Disclaimer located at http://openmrs.org/license.
Copyright (C) OpenMRS Inc. OpenMRS is a registered trademark and the OpenMRS
graphic logo is a trademark of OpenMRS Inc.
-->
<!DOCTYPE hibernate-mapping PUBLIC
"-//Hibernate/Hibernate Mapping DTD 3.0//EN"
"http://www.hibernate.org/dtd/hibernate-mapping-3.0.dtd">
<hibernate-mapping>
<class name="org.openmrs.ConceptSource" table="concept_reference_source">
<id name="conceptSourceId" type="java.lang.Integer" column="concept_source_id" unsaved-value="0">
<generator class="native">
<param name="sequence">concept_reference_source_concept_source_id_seq</param>
</generator>
</id>
<property name="uuid" type="java.lang.String"
column="uuid" length="38" unique="true" />
<property name="name" type="java.lang.String"
column="name" length="50" not-null="true" />
<property name="description" type="java.lang.String"
column="description" length="1024" not-null="true" />
<property name="hl7Code" type="java.lang.String"
column="hl7_code" length="50" />
<property name="uniqueId" type="java.lang.String"
column="unique_id" length="250" unique="true" />
<property name="dateCreated" type="java.util.Date"
column="date_created" not-null="true" length="19" />
<property name="retired" type="boolean" column="retired" not-null="true"/>
<property name="dateRetired" type="java.util.Date"
column="date_retired" length="19" />
<property name="retireReason" type="java.lang.String"
column="retire_reason" length="255" />
<!-- Associations -->
<!-- bi-directional many-to-one association to User -->
<many-to-one name="creator" class="org.openmrs.User"
not-null="true">
<column name="creator" />
</many-to-one>
<many-to-one name="retiredBy" class="org.openmrs.User"
column="retired_by" />
<many-to-one name="changedBy" class="org.openmrs.User" column="changed_by"/>
<property name="dateChanged" type="java.util.Date"
column="date_changed" length="19"/>
</class>
</hibernate-mapping>
| {
"pile_set_name": "Github"
} |
gh.gench.edu.cn
webplus.gench.edu.cn
jdxy.gench.edu.cn
i.gench.edu.cn
yx.gench.edu.cn
gzxy.gench.edu.cn
smc.gench.edu.cn
xg.gench.edu.cn
tsxy.gench.edu.cn
wmdw1516.gench.edu.cn
jwxt.gench.edu.cn
youth.gench.edu.cn
career.gench.edu.cn
sxy.gench.edu.cn
ygty.gench.edu.cn
xxhbgs.gench.edu.cn
xsc.gench.edu.cn
bgs.gench.edu.cn
library.gench.edu.cn
wmdw.gench.edu.cn
develop.gench.edu.cn
jewelry.gench.edu.cn
zsb.gench.edu.cn
my.gench.edu.cn
xwcb.gench.edu.cn
kczx.gench.edu.cn
tsjs.gench.edu.cn
wgyxy.gench.edu.cn
news.gench.edu.cn
hr.gench.edu.cn
kyperson.gench.edu.cn
love.gench.edu.cn
iidc.gench.edu.cn
ids1.gench.edu.cn
jxjy.gench.edu.cn
art.gench.edu.cn
lab.gench.edu.cn
dwgk.gench.edu.cn
cwgzcx.gench.edu.cn
www.gench.edu.cn
xxjs.gench.edu.cn
| {
"pile_set_name": "Github"
} |
package caliban.client
import caliban.client.CalibanClientError.DecodingError
import caliban.client.FieldBuilder._
import caliban.client.SelectionBuilder._
import caliban.client.Operations._
import caliban.client.Value._
object Client {
sealed trait Origin extends scala.Product with scala.Serializable
object Origin {
case object BELT extends Origin
case object EARTH extends Origin
case object MARS extends Origin
implicit val decoder: ScalarDecoder[Origin] = {
case StringValue("BELT") => Right(Origin.BELT)
case StringValue("EARTH") => Right(Origin.EARTH)
case StringValue("MARS") => Right(Origin.MARS)
case other => Left(DecodingError(s"Can't build Origin from input $other"))
}
implicit val encoder: ArgEncoder[Origin] = new ArgEncoder[Origin] {
override def encode(value: Origin): Value = value match {
case Origin.BELT => EnumValue("BELT")
case Origin.EARTH => EnumValue("EARTH")
case Origin.MARS => EnumValue("MARS")
}
override def typeName: String = "Origin"
}
}
type Engineer
object Engineer {
def shipName: SelectionBuilder[Engineer, String] = Field("shipName", Scalar())
}
type Character
object Character {
def name: SelectionBuilder[Character, String] = Field("name", Scalar())
def nicknames: SelectionBuilder[Character, List[String]] = Field("nicknames", ListOf(Scalar()))
def origin: SelectionBuilder[Character, Origin] = Field("origin", Scalar())
def role[A](
onCaptain: SelectionBuilder[Captain, A],
onEngineer: SelectionBuilder[Engineer, A],
onMechanic: SelectionBuilder[Mechanic, A],
onPilot: SelectionBuilder[Pilot, A]
): SelectionBuilder[Character, Option[A]] =
Field(
"role",
OptionOf(
ChoiceOf(
Map(
"Captain" -> Obj(onCaptain),
"Engineer" -> Obj(onEngineer),
"Mechanic" -> Obj(onMechanic),
"Pilot" -> Obj(onPilot)
)
)
)
)
}
type Pilot
object Pilot {
def shipName: SelectionBuilder[Pilot, String] = Field("shipName", Scalar())
}
type Mechanic
object Mechanic {
def shipName: SelectionBuilder[Mechanic, String] = Field("shipName", Scalar())
}
type Captain
object Captain {
def shipName: SelectionBuilder[Captain, String] = Field("shipName", Scalar())
}
type Queries = RootQuery
object Queries {
def characters[A](
origin: Option[Origin] = None
)(innerSelection: SelectionBuilder[Character, A]): SelectionBuilder[RootQuery, List[A]] =
Field("characters", ListOf(Obj(innerSelection)), arguments = List(Argument("origin", origin)))
def character[A](
name: String
)(innerSelection: SelectionBuilder[Character, A]): SelectionBuilder[RootQuery, Option[A]] =
Field("character", OptionOf(Obj(innerSelection)), arguments = List(Argument("name", name)))
}
type Mutations = RootMutation
object Mutations {
def deleteCharacter(name: String): SelectionBuilder[RootMutation, Boolean] =
Field("deleteCharacter", Scalar(), arguments = List(Argument("name", name)))
}
}
| {
"pile_set_name": "Github"
} |
/*--------------------------------*- C++ -*----------------------------------*\
| ========= | |
| \\ / F ield | OpenFOAM: The Open Source CFD Toolbox |
| \\ / O peration | Version: 2.3.0 |
| \\ / A nd | Web: www.OpenFOAM.org |
| \\/ M anipulation | |
\*---------------------------------------------------------------------------*/
FoamFile
{
version 2.0;
format ascii;
class polyBoundaryMesh;
location "constant/polyMesh";
object boundary;
}
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
4
(
frontAndBack
{
type wall;
inGroups 1(wall);
nFaces 1050;
startFace 228225;
}
topAndBottom
{
type wall;
inGroups 1(wall);
nFaces 10500;
startFace 229275;
}
hot
{
type wall;
inGroups 1(wall);
nFaces 2250;
startFace 239775;
}
cold
{
type wall;
inGroups 1(wall);
nFaces 2250;
startFace 242025;
}
)
// ************************************************************************* //
| {
"pile_set_name": "Github"
} |
{
"images" : [
{
"idiom" : "universal",
"scale" : "1x"
},
{
"idiom" : "universal",
"scale" : "2x"
},
{
"idiom" : "universal",
"filename" : "[email protected]",
"scale" : "3x"
}
],
"info" : {
"version" : 1,
"author" : "xcode"
}
} | {
"pile_set_name": "Github"
} |
/*
*
* Copyright (c) 2006-2020, Speedment, Inc. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); You may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package com.speedment.runtime.config.mutator;
import static org.junit.jupiter.api.Assertions.assertDoesNotThrow;
import com.speedment.runtime.config.ForeignKeyColumn;
import com.speedment.runtime.config.mutator.trait.HasIdMutatorMixin;
import com.speedment.runtime.config.mutator.trait.HasNameMutatorMixin;
import com.speedment.runtime.config.mutator.trait.HasOrdinalPositionMutatorMixin;
import org.junit.jupiter.api.Test;
import java.util.HashMap;
final class ForeignKeyColumnMutatorTest implements
HasIdMutatorMixin<ForeignKeyColumn, ForeignKeyColumnMutator<ForeignKeyColumn>>,
HasNameMutatorMixin<ForeignKeyColumn, ForeignKeyColumnMutator<ForeignKeyColumn>>,
HasOrdinalPositionMutatorMixin<ForeignKeyColumn, ForeignKeyColumnMutator<ForeignKeyColumn>> {
@Override
@SuppressWarnings("unchecked")
public ForeignKeyColumnMutator<ForeignKeyColumn> getMutatorInstance() {
return (ForeignKeyColumnMutator<ForeignKeyColumn>) ForeignKeyColumn.create(null, new HashMap<>()).mutator();
}
@Test
void setForeignTableName() {
assertDoesNotThrow(() -> getMutatorInstance().setForeignTableName("table"));
}
@Test
void setForeignColumnName() {
assertDoesNotThrow(() -> getMutatorInstance().setForeignColumnName("column"));
}
@Test
void setForeignDatabaseName() {
assertDoesNotThrow(() -> getMutatorInstance().setForeignDatabaseName("database"));
}
@Test
void setForeignSchemaName() {
assertDoesNotThrow(() -> getMutatorInstance().setForeignSchemaName("table"));
}
}
| {
"pile_set_name": "Github"
} |
Flask-OAuthlib==0.9.5
Flask==1.1.2
backoff==1.8.1
boto3==1.10.4
boto==2.49.0
click==6.7
furl==1.0.2
gevent==1.2.2
jq==0.1.6
json_delta>=2.0
kubernetes==3.0.0
requests==2.22.0
stups-tokens>=1.1.19
wal_e==1.1.0
werkzeug==0.16.1
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>CFBundleDevelopmentRegion</key>
<string>en</string>
<key>CFBundleDocumentTypes</key>
<array>
<dict>
<key>CFBundleTypeExtensions</key>
<array>
<string>txt</string>
</array>
<key>CFBundleTypeMIMETypes</key>
<array>
<string>text/plain</string>
<string>application/x-latex</string>
</array>
<key>CFBundleTypeName</key>
<string>Plain Text File</string>
<key>CFBundleTypeRole</key>
<string>Editor</string>
<key>LSItemContentTypes</key>
<array>
<string>public.plain-text</string>
<string>public.html</string>
<string>public.text</string>
<string>public.data</string>
<string>public.content</string>
</array>
<key>LSTypeIsPackage</key>
<integer>0</integer>
<key>NSDocumentClass</key>
<string>Noto.Document</string>
<key>NSIsRelatedItemType</key>
<true/>
</dict>
</array>
<key>CFBundleExecutable</key>
<string>$(EXECUTABLE_NAME)</string>
<key>CFBundleHelpBookFolder</key>
<string>Noto.help</string>
<key>CFBundleHelpBookName</key>
<string>com.brunophilipe.Noto.help</string>
<key>CFBundleIconFile</key>
<string></string>
<key>CFBundleIdentifier</key>
<string>$(PRODUCT_BUNDLE_IDENTIFIER)</string>
<key>CFBundleInfoDictionaryVersion</key>
<string>6.0</string>
<key>CFBundleName</key>
<string>$(PRODUCT_NAME)</string>
<key>CFBundlePackageType</key>
<string>APPL</string>
<key>CFBundleShortVersionString</key>
<string>1.2.1</string>
<key>CFBundleVersion</key>
<string>830</string>
<key>ITSAppUsesNonExemptEncryption</key>
<false/>
<key>LSApplicationCategoryType</key>
<string>public.app-category.utilities</string>
<key>LSMinimumSystemVersion</key>
<string>$(MACOSX_DEPLOYMENT_TARGET)</string>
<key>NSHumanReadableCopyright</key>
<string>Copyright © 2017 Bruno Philipe. All rights reserved.</string>
<key>NSMainNibFile</key>
<string>MainMenu</string>
<key>NSPrincipalClass</key>
<string>NSApplication</string>
<key>NSUbiquitousContainers</key>
<dict>
<key>iCloud.com.brunophilipe.Noto</key>
<dict>
<key>NSUbiquitousContainerIsDocumentScopePublic</key>
<true/>
<key>NSUbiquitousContainerName</key>
<string>Noto</string>
<key>NSUbiquitousContainerSupportedFolderLevels</key>
<string>Any</string>
</dict>
</dict>
</dict>
</plist>
| {
"pile_set_name": "Github"
} |
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: google/cloud/videointelligence/v1p1beta1/video_intelligence.proto
package videointelligence
import (
fmt "fmt"
proto "github.com/golang/protobuf/proto"
duration "github.com/golang/protobuf/ptypes/duration"
timestamp "github.com/golang/protobuf/ptypes/timestamp"
_ "google.golang.org/genproto/googleapis/api/annotations"
longrunning "google.golang.org/genproto/googleapis/longrunning"
status "google.golang.org/genproto/googleapis/rpc/status"
math "math"
)
import (
context "golang.org/x/net/context"
grpc "google.golang.org/grpc"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// Video annotation feature.
type Feature int32
const (
// Unspecified.
Feature_FEATURE_UNSPECIFIED Feature = 0
// Label detection. Detect objects, such as dog or flower.
Feature_LABEL_DETECTION Feature = 1
// Shot change detection.
Feature_SHOT_CHANGE_DETECTION Feature = 2
// Explicit content detection.
Feature_EXPLICIT_CONTENT_DETECTION Feature = 3
// Speech transcription.
Feature_SPEECH_TRANSCRIPTION Feature = 6
)
var Feature_name = map[int32]string{
0: "FEATURE_UNSPECIFIED",
1: "LABEL_DETECTION",
2: "SHOT_CHANGE_DETECTION",
3: "EXPLICIT_CONTENT_DETECTION",
6: "SPEECH_TRANSCRIPTION",
}
var Feature_value = map[string]int32{
"FEATURE_UNSPECIFIED": 0,
"LABEL_DETECTION": 1,
"SHOT_CHANGE_DETECTION": 2,
"EXPLICIT_CONTENT_DETECTION": 3,
"SPEECH_TRANSCRIPTION": 6,
}
func (x Feature) String() string {
return proto.EnumName(Feature_name, int32(x))
}
func (Feature) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_9e6ec0147460ac77, []int{0}
}
// Label detection mode.
type LabelDetectionMode int32
const (
// Unspecified.
LabelDetectionMode_LABEL_DETECTION_MODE_UNSPECIFIED LabelDetectionMode = 0
// Detect shot-level labels.
LabelDetectionMode_SHOT_MODE LabelDetectionMode = 1
// Detect frame-level labels.
LabelDetectionMode_FRAME_MODE LabelDetectionMode = 2
// Detect both shot-level and frame-level labels.
LabelDetectionMode_SHOT_AND_FRAME_MODE LabelDetectionMode = 3
)
var LabelDetectionMode_name = map[int32]string{
0: "LABEL_DETECTION_MODE_UNSPECIFIED",
1: "SHOT_MODE",
2: "FRAME_MODE",
3: "SHOT_AND_FRAME_MODE",
}
var LabelDetectionMode_value = map[string]int32{
"LABEL_DETECTION_MODE_UNSPECIFIED": 0,
"SHOT_MODE": 1,
"FRAME_MODE": 2,
"SHOT_AND_FRAME_MODE": 3,
}
func (x LabelDetectionMode) String() string {
return proto.EnumName(LabelDetectionMode_name, int32(x))
}
func (LabelDetectionMode) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_9e6ec0147460ac77, []int{1}
}
// Bucketized representation of likelihood.
type Likelihood int32
const (
// Unspecified likelihood.
Likelihood_LIKELIHOOD_UNSPECIFIED Likelihood = 0
// Very unlikely.
Likelihood_VERY_UNLIKELY Likelihood = 1
// Unlikely.
Likelihood_UNLIKELY Likelihood = 2
// Possible.
Likelihood_POSSIBLE Likelihood = 3
// Likely.
Likelihood_LIKELY Likelihood = 4
// Very likely.
Likelihood_VERY_LIKELY Likelihood = 5
)
var Likelihood_name = map[int32]string{
0: "LIKELIHOOD_UNSPECIFIED",
1: "VERY_UNLIKELY",
2: "UNLIKELY",
3: "POSSIBLE",
4: "LIKELY",
5: "VERY_LIKELY",
}
var Likelihood_value = map[string]int32{
"LIKELIHOOD_UNSPECIFIED": 0,
"VERY_UNLIKELY": 1,
"UNLIKELY": 2,
"POSSIBLE": 3,
"LIKELY": 4,
"VERY_LIKELY": 5,
}
func (x Likelihood) String() string {
return proto.EnumName(Likelihood_name, int32(x))
}
func (Likelihood) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_9e6ec0147460ac77, []int{2}
}
// Video annotation request.
type AnnotateVideoRequest struct {
// Input video location. Currently, only
// [Google Cloud Storage](https://cloud.google.com/storage/) URIs are
// supported, which must be specified in the following format:
// `gs://bucket-id/object-id` (other URI formats return
// [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more information, see
// [Request URIs](/storage/docs/reference-uris).
// A video URI may include wildcards in `object-id`, and thus identify
// multiple videos. Supported wildcards: '*' to match 0 or more characters;
// '?' to match 1 character. If unset, the input video should be embedded
// in the request as `input_content`. If set, `input_content` should be unset.
InputUri string `protobuf:"bytes,1,opt,name=input_uri,json=inputUri,proto3" json:"input_uri,omitempty"`
// The video data bytes.
// If unset, the input video(s) should be specified via `input_uri`.
// If set, `input_uri` should be unset.
InputContent []byte `protobuf:"bytes,6,opt,name=input_content,json=inputContent,proto3" json:"input_content,omitempty"`
// Requested video annotation features.
Features []Feature `protobuf:"varint,2,rep,packed,name=features,proto3,enum=google.cloud.videointelligence.v1p1beta1.Feature" json:"features,omitempty"`
// Additional video context and/or feature-specific parameters.
VideoContext *VideoContext `protobuf:"bytes,3,opt,name=video_context,json=videoContext,proto3" json:"video_context,omitempty"`
// Optional location where the output (in JSON format) should be stored.
// Currently, only [Google Cloud Storage](https://cloud.google.com/storage/)
// URIs are supported, which must be specified in the following format:
// `gs://bucket-id/object-id` (other URI formats return
// [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more information, see
// [Request URIs](/storage/docs/reference-uris).
OutputUri string `protobuf:"bytes,4,opt,name=output_uri,json=outputUri,proto3" json:"output_uri,omitempty"`
// Optional cloud region where annotation should take place. Supported cloud
// regions: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no region
// is specified, a region will be determined based on video file location.
LocationId string `protobuf:"bytes,5,opt,name=location_id,json=locationId,proto3" json:"location_id,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *AnnotateVideoRequest) Reset() { *m = AnnotateVideoRequest{} }
func (m *AnnotateVideoRequest) String() string { return proto.CompactTextString(m) }
func (*AnnotateVideoRequest) ProtoMessage() {}
func (*AnnotateVideoRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_9e6ec0147460ac77, []int{0}
}
func (m *AnnotateVideoRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_AnnotateVideoRequest.Unmarshal(m, b)
}
func (m *AnnotateVideoRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_AnnotateVideoRequest.Marshal(b, m, deterministic)
}
func (m *AnnotateVideoRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_AnnotateVideoRequest.Merge(m, src)
}
func (m *AnnotateVideoRequest) XXX_Size() int {
return xxx_messageInfo_AnnotateVideoRequest.Size(m)
}
func (m *AnnotateVideoRequest) XXX_DiscardUnknown() {
xxx_messageInfo_AnnotateVideoRequest.DiscardUnknown(m)
}
var xxx_messageInfo_AnnotateVideoRequest proto.InternalMessageInfo
func (m *AnnotateVideoRequest) GetInputUri() string {
if m != nil {
return m.InputUri
}
return ""
}
func (m *AnnotateVideoRequest) GetInputContent() []byte {
if m != nil {
return m.InputContent
}
return nil
}
func (m *AnnotateVideoRequest) GetFeatures() []Feature {
if m != nil {
return m.Features
}
return nil
}
func (m *AnnotateVideoRequest) GetVideoContext() *VideoContext {
if m != nil {
return m.VideoContext
}
return nil
}
func (m *AnnotateVideoRequest) GetOutputUri() string {
if m != nil {
return m.OutputUri
}
return ""
}
func (m *AnnotateVideoRequest) GetLocationId() string {
if m != nil {
return m.LocationId
}
return ""
}
// Video context and/or feature-specific parameters.
type VideoContext struct {
// Video segments to annotate. The segments may overlap and are not required
// to be contiguous or span the whole video. If unspecified, each video is
// treated as a single segment.
Segments []*VideoSegment `protobuf:"bytes,1,rep,name=segments,proto3" json:"segments,omitempty"`
// Config for LABEL_DETECTION.
LabelDetectionConfig *LabelDetectionConfig `protobuf:"bytes,2,opt,name=label_detection_config,json=labelDetectionConfig,proto3" json:"label_detection_config,omitempty"`
// Config for SHOT_CHANGE_DETECTION.
ShotChangeDetectionConfig *ShotChangeDetectionConfig `protobuf:"bytes,3,opt,name=shot_change_detection_config,json=shotChangeDetectionConfig,proto3" json:"shot_change_detection_config,omitempty"`
// Config for EXPLICIT_CONTENT_DETECTION.
ExplicitContentDetectionConfig *ExplicitContentDetectionConfig `protobuf:"bytes,4,opt,name=explicit_content_detection_config,json=explicitContentDetectionConfig,proto3" json:"explicit_content_detection_config,omitempty"`
// Config for SPEECH_TRANSCRIPTION.
SpeechTranscriptionConfig *SpeechTranscriptionConfig `protobuf:"bytes,6,opt,name=speech_transcription_config,json=speechTranscriptionConfig,proto3" json:"speech_transcription_config,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *VideoContext) Reset() { *m = VideoContext{} }
func (m *VideoContext) String() string { return proto.CompactTextString(m) }
func (*VideoContext) ProtoMessage() {}
func (*VideoContext) Descriptor() ([]byte, []int) {
return fileDescriptor_9e6ec0147460ac77, []int{1}
}
func (m *VideoContext) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_VideoContext.Unmarshal(m, b)
}
func (m *VideoContext) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_VideoContext.Marshal(b, m, deterministic)
}
func (m *VideoContext) XXX_Merge(src proto.Message) {
xxx_messageInfo_VideoContext.Merge(m, src)
}
func (m *VideoContext) XXX_Size() int {
return xxx_messageInfo_VideoContext.Size(m)
}
func (m *VideoContext) XXX_DiscardUnknown() {
xxx_messageInfo_VideoContext.DiscardUnknown(m)
}
var xxx_messageInfo_VideoContext proto.InternalMessageInfo
func (m *VideoContext) GetSegments() []*VideoSegment {
if m != nil {
return m.Segments
}
return nil
}
func (m *VideoContext) GetLabelDetectionConfig() *LabelDetectionConfig {
if m != nil {
return m.LabelDetectionConfig
}
return nil
}
func (m *VideoContext) GetShotChangeDetectionConfig() *ShotChangeDetectionConfig {
if m != nil {
return m.ShotChangeDetectionConfig
}
return nil
}
func (m *VideoContext) GetExplicitContentDetectionConfig() *ExplicitContentDetectionConfig {
if m != nil {
return m.ExplicitContentDetectionConfig
}
return nil
}
func (m *VideoContext) GetSpeechTranscriptionConfig() *SpeechTranscriptionConfig {
if m != nil {
return m.SpeechTranscriptionConfig
}
return nil
}
// Config for LABEL_DETECTION.
type LabelDetectionConfig struct {
// What labels should be detected with LABEL_DETECTION, in addition to
// video-level labels or segment-level labels.
// If unspecified, defaults to `SHOT_MODE`.
LabelDetectionMode LabelDetectionMode `protobuf:"varint,1,opt,name=label_detection_mode,json=labelDetectionMode,proto3,enum=google.cloud.videointelligence.v1p1beta1.LabelDetectionMode" json:"label_detection_mode,omitempty"`
// Whether the video has been shot from a stationary (i.e. non-moving) camera.
// When set to true, might improve detection accuracy for moving objects.
// Should be used with `SHOT_AND_FRAME_MODE` enabled.
StationaryCamera bool `protobuf:"varint,2,opt,name=stationary_camera,json=stationaryCamera,proto3" json:"stationary_camera,omitempty"`
// Model to use for label detection.
// Supported values: "builtin/stable" (the default if unset) and
// "builtin/latest".
Model string `protobuf:"bytes,3,opt,name=model,proto3" json:"model,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *LabelDetectionConfig) Reset() { *m = LabelDetectionConfig{} }
func (m *LabelDetectionConfig) String() string { return proto.CompactTextString(m) }
func (*LabelDetectionConfig) ProtoMessage() {}
func (*LabelDetectionConfig) Descriptor() ([]byte, []int) {
return fileDescriptor_9e6ec0147460ac77, []int{2}
}
func (m *LabelDetectionConfig) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_LabelDetectionConfig.Unmarshal(m, b)
}
func (m *LabelDetectionConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_LabelDetectionConfig.Marshal(b, m, deterministic)
}
func (m *LabelDetectionConfig) XXX_Merge(src proto.Message) {
xxx_messageInfo_LabelDetectionConfig.Merge(m, src)
}
func (m *LabelDetectionConfig) XXX_Size() int {
return xxx_messageInfo_LabelDetectionConfig.Size(m)
}
func (m *LabelDetectionConfig) XXX_DiscardUnknown() {
xxx_messageInfo_LabelDetectionConfig.DiscardUnknown(m)
}
var xxx_messageInfo_LabelDetectionConfig proto.InternalMessageInfo
func (m *LabelDetectionConfig) GetLabelDetectionMode() LabelDetectionMode {
if m != nil {
return m.LabelDetectionMode
}
return LabelDetectionMode_LABEL_DETECTION_MODE_UNSPECIFIED
}
func (m *LabelDetectionConfig) GetStationaryCamera() bool {
if m != nil {
return m.StationaryCamera
}
return false
}
func (m *LabelDetectionConfig) GetModel() string {
if m != nil {
return m.Model
}
return ""
}
// Config for SHOT_CHANGE_DETECTION.
type ShotChangeDetectionConfig struct {
// Model to use for shot change detection.
// Supported values: "builtin/stable" (the default if unset) and
// "builtin/latest".
Model string `protobuf:"bytes,1,opt,name=model,proto3" json:"model,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ShotChangeDetectionConfig) Reset() { *m = ShotChangeDetectionConfig{} }
func (m *ShotChangeDetectionConfig) String() string { return proto.CompactTextString(m) }
func (*ShotChangeDetectionConfig) ProtoMessage() {}
func (*ShotChangeDetectionConfig) Descriptor() ([]byte, []int) {
return fileDescriptor_9e6ec0147460ac77, []int{3}
}
func (m *ShotChangeDetectionConfig) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ShotChangeDetectionConfig.Unmarshal(m, b)
}
func (m *ShotChangeDetectionConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ShotChangeDetectionConfig.Marshal(b, m, deterministic)
}
func (m *ShotChangeDetectionConfig) XXX_Merge(src proto.Message) {
xxx_messageInfo_ShotChangeDetectionConfig.Merge(m, src)
}
func (m *ShotChangeDetectionConfig) XXX_Size() int {
return xxx_messageInfo_ShotChangeDetectionConfig.Size(m)
}
func (m *ShotChangeDetectionConfig) XXX_DiscardUnknown() {
xxx_messageInfo_ShotChangeDetectionConfig.DiscardUnknown(m)
}
var xxx_messageInfo_ShotChangeDetectionConfig proto.InternalMessageInfo
func (m *ShotChangeDetectionConfig) GetModel() string {
if m != nil {
return m.Model
}
return ""
}
// Config for EXPLICIT_CONTENT_DETECTION.
type ExplicitContentDetectionConfig struct {
// Model to use for explicit content detection.
// Supported values: "builtin/stable" (the default if unset) and
// "builtin/latest".
Model string `protobuf:"bytes,1,opt,name=model,proto3" json:"model,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ExplicitContentDetectionConfig) Reset() { *m = ExplicitContentDetectionConfig{} }
func (m *ExplicitContentDetectionConfig) String() string { return proto.CompactTextString(m) }
func (*ExplicitContentDetectionConfig) ProtoMessage() {}
func (*ExplicitContentDetectionConfig) Descriptor() ([]byte, []int) {
return fileDescriptor_9e6ec0147460ac77, []int{4}
}
func (m *ExplicitContentDetectionConfig) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ExplicitContentDetectionConfig.Unmarshal(m, b)
}
func (m *ExplicitContentDetectionConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ExplicitContentDetectionConfig.Marshal(b, m, deterministic)
}
func (m *ExplicitContentDetectionConfig) XXX_Merge(src proto.Message) {
xxx_messageInfo_ExplicitContentDetectionConfig.Merge(m, src)
}
func (m *ExplicitContentDetectionConfig) XXX_Size() int {
return xxx_messageInfo_ExplicitContentDetectionConfig.Size(m)
}
func (m *ExplicitContentDetectionConfig) XXX_DiscardUnknown() {
xxx_messageInfo_ExplicitContentDetectionConfig.DiscardUnknown(m)
}
var xxx_messageInfo_ExplicitContentDetectionConfig proto.InternalMessageInfo
func (m *ExplicitContentDetectionConfig) GetModel() string {
if m != nil {
return m.Model
}
return ""
}
// Video segment.
type VideoSegment struct {
// Time-offset, relative to the beginning of the video,
// corresponding to the start of the segment (inclusive).
StartTimeOffset *duration.Duration `protobuf:"bytes,1,opt,name=start_time_offset,json=startTimeOffset,proto3" json:"start_time_offset,omitempty"`
// Time-offset, relative to the beginning of the video,
// corresponding to the end of the segment (inclusive).
EndTimeOffset *duration.Duration `protobuf:"bytes,2,opt,name=end_time_offset,json=endTimeOffset,proto3" json:"end_time_offset,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *VideoSegment) Reset() { *m = VideoSegment{} }
func (m *VideoSegment) String() string { return proto.CompactTextString(m) }
func (*VideoSegment) ProtoMessage() {}
func (*VideoSegment) Descriptor() ([]byte, []int) {
return fileDescriptor_9e6ec0147460ac77, []int{5}
}
func (m *VideoSegment) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_VideoSegment.Unmarshal(m, b)
}
func (m *VideoSegment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_VideoSegment.Marshal(b, m, deterministic)
}
func (m *VideoSegment) XXX_Merge(src proto.Message) {
xxx_messageInfo_VideoSegment.Merge(m, src)
}
func (m *VideoSegment) XXX_Size() int {
return xxx_messageInfo_VideoSegment.Size(m)
}
func (m *VideoSegment) XXX_DiscardUnknown() {
xxx_messageInfo_VideoSegment.DiscardUnknown(m)
}
var xxx_messageInfo_VideoSegment proto.InternalMessageInfo
func (m *VideoSegment) GetStartTimeOffset() *duration.Duration {
if m != nil {
return m.StartTimeOffset
}
return nil
}
func (m *VideoSegment) GetEndTimeOffset() *duration.Duration {
if m != nil {
return m.EndTimeOffset
}
return nil
}
// Video segment level annotation results for label detection.
type LabelSegment struct {
// Video segment where a label was detected.
Segment *VideoSegment `protobuf:"bytes,1,opt,name=segment,proto3" json:"segment,omitempty"`
// Confidence that the label is accurate. Range: [0, 1].
Confidence float32 `protobuf:"fixed32,2,opt,name=confidence,proto3" json:"confidence,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *LabelSegment) Reset() { *m = LabelSegment{} }
func (m *LabelSegment) String() string { return proto.CompactTextString(m) }
func (*LabelSegment) ProtoMessage() {}
func (*LabelSegment) Descriptor() ([]byte, []int) {
return fileDescriptor_9e6ec0147460ac77, []int{6}
}
func (m *LabelSegment) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_LabelSegment.Unmarshal(m, b)
}
func (m *LabelSegment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_LabelSegment.Marshal(b, m, deterministic)
}
func (m *LabelSegment) XXX_Merge(src proto.Message) {
xxx_messageInfo_LabelSegment.Merge(m, src)
}
func (m *LabelSegment) XXX_Size() int {
return xxx_messageInfo_LabelSegment.Size(m)
}
func (m *LabelSegment) XXX_DiscardUnknown() {
xxx_messageInfo_LabelSegment.DiscardUnknown(m)
}
var xxx_messageInfo_LabelSegment proto.InternalMessageInfo
func (m *LabelSegment) GetSegment() *VideoSegment {
if m != nil {
return m.Segment
}
return nil
}
func (m *LabelSegment) GetConfidence() float32 {
if m != nil {
return m.Confidence
}
return 0
}
// Video frame level annotation results for label detection.
type LabelFrame struct {
// Time-offset, relative to the beginning of the video, corresponding to the
// video frame for this location.
TimeOffset *duration.Duration `protobuf:"bytes,1,opt,name=time_offset,json=timeOffset,proto3" json:"time_offset,omitempty"`
// Confidence that the label is accurate. Range: [0, 1].
Confidence float32 `protobuf:"fixed32,2,opt,name=confidence,proto3" json:"confidence,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *LabelFrame) Reset() { *m = LabelFrame{} }
func (m *LabelFrame) String() string { return proto.CompactTextString(m) }
func (*LabelFrame) ProtoMessage() {}
func (*LabelFrame) Descriptor() ([]byte, []int) {
return fileDescriptor_9e6ec0147460ac77, []int{7}
}
func (m *LabelFrame) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_LabelFrame.Unmarshal(m, b)
}
func (m *LabelFrame) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_LabelFrame.Marshal(b, m, deterministic)
}
func (m *LabelFrame) XXX_Merge(src proto.Message) {
xxx_messageInfo_LabelFrame.Merge(m, src)
}
func (m *LabelFrame) XXX_Size() int {
return xxx_messageInfo_LabelFrame.Size(m)
}
func (m *LabelFrame) XXX_DiscardUnknown() {
xxx_messageInfo_LabelFrame.DiscardUnknown(m)
}
var xxx_messageInfo_LabelFrame proto.InternalMessageInfo
func (m *LabelFrame) GetTimeOffset() *duration.Duration {
if m != nil {
return m.TimeOffset
}
return nil
}
func (m *LabelFrame) GetConfidence() float32 {
if m != nil {
return m.Confidence
}
return 0
}
// Detected entity from video analysis.
type Entity struct {
// Opaque entity ID. Some IDs may be available in
// [Google Knowledge Graph Search
// API](https://developers.google.com/knowledge-graph/).
EntityId string `protobuf:"bytes,1,opt,name=entity_id,json=entityId,proto3" json:"entity_id,omitempty"`
// Textual description, e.g. `Fixed-gear bicycle`.
Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
// Language code for `description` in BCP-47 format.
LanguageCode string `protobuf:"bytes,3,opt,name=language_code,json=languageCode,proto3" json:"language_code,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Entity) Reset() { *m = Entity{} }
func (m *Entity) String() string { return proto.CompactTextString(m) }
func (*Entity) ProtoMessage() {}
func (*Entity) Descriptor() ([]byte, []int) {
return fileDescriptor_9e6ec0147460ac77, []int{8}
}
func (m *Entity) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Entity.Unmarshal(m, b)
}
func (m *Entity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Entity.Marshal(b, m, deterministic)
}
func (m *Entity) XXX_Merge(src proto.Message) {
xxx_messageInfo_Entity.Merge(m, src)
}
func (m *Entity) XXX_Size() int {
return xxx_messageInfo_Entity.Size(m)
}
func (m *Entity) XXX_DiscardUnknown() {
xxx_messageInfo_Entity.DiscardUnknown(m)
}
var xxx_messageInfo_Entity proto.InternalMessageInfo
func (m *Entity) GetEntityId() string {
if m != nil {
return m.EntityId
}
return ""
}
func (m *Entity) GetDescription() string {
if m != nil {
return m.Description
}
return ""
}
func (m *Entity) GetLanguageCode() string {
if m != nil {
return m.LanguageCode
}
return ""
}
// Label annotation.
type LabelAnnotation struct {
// Detected entity.
Entity *Entity `protobuf:"bytes,1,opt,name=entity,proto3" json:"entity,omitempty"`
// Common categories for the detected entity.
// E.g. when the label is `Terrier` the category is likely `dog`. And in some
// cases there might be more than one categories e.g. `Terrier` could also be
// a `pet`.
CategoryEntities []*Entity `protobuf:"bytes,2,rep,name=category_entities,json=categoryEntities,proto3" json:"category_entities,omitempty"`
// All video segments where a label was detected.
Segments []*LabelSegment `protobuf:"bytes,3,rep,name=segments,proto3" json:"segments,omitempty"`
// All video frames where a label was detected.
Frames []*LabelFrame `protobuf:"bytes,4,rep,name=frames,proto3" json:"frames,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *LabelAnnotation) Reset() { *m = LabelAnnotation{} }
func (m *LabelAnnotation) String() string { return proto.CompactTextString(m) }
func (*LabelAnnotation) ProtoMessage() {}
func (*LabelAnnotation) Descriptor() ([]byte, []int) {
return fileDescriptor_9e6ec0147460ac77, []int{9}
}
func (m *LabelAnnotation) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_LabelAnnotation.Unmarshal(m, b)
}
func (m *LabelAnnotation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_LabelAnnotation.Marshal(b, m, deterministic)
}
func (m *LabelAnnotation) XXX_Merge(src proto.Message) {
xxx_messageInfo_LabelAnnotation.Merge(m, src)
}
func (m *LabelAnnotation) XXX_Size() int {
return xxx_messageInfo_LabelAnnotation.Size(m)
}
func (m *LabelAnnotation) XXX_DiscardUnknown() {
xxx_messageInfo_LabelAnnotation.DiscardUnknown(m)
}
var xxx_messageInfo_LabelAnnotation proto.InternalMessageInfo
func (m *LabelAnnotation) GetEntity() *Entity {
if m != nil {
return m.Entity
}
return nil
}
func (m *LabelAnnotation) GetCategoryEntities() []*Entity {
if m != nil {
return m.CategoryEntities
}
return nil
}
func (m *LabelAnnotation) GetSegments() []*LabelSegment {
if m != nil {
return m.Segments
}
return nil
}
func (m *LabelAnnotation) GetFrames() []*LabelFrame {
if m != nil {
return m.Frames
}
return nil
}
// Video frame level annotation results for explicit content.
type ExplicitContentFrame struct {
// Time-offset, relative to the beginning of the video, corresponding to the
// video frame for this location.
TimeOffset *duration.Duration `protobuf:"bytes,1,opt,name=time_offset,json=timeOffset,proto3" json:"time_offset,omitempty"`
// Likelihood of the pornography content..
PornographyLikelihood Likelihood `protobuf:"varint,2,opt,name=pornography_likelihood,json=pornographyLikelihood,proto3,enum=google.cloud.videointelligence.v1p1beta1.Likelihood" json:"pornography_likelihood,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ExplicitContentFrame) Reset() { *m = ExplicitContentFrame{} }
func (m *ExplicitContentFrame) String() string { return proto.CompactTextString(m) }
func (*ExplicitContentFrame) ProtoMessage() {}
func (*ExplicitContentFrame) Descriptor() ([]byte, []int) {
return fileDescriptor_9e6ec0147460ac77, []int{10}
}
func (m *ExplicitContentFrame) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ExplicitContentFrame.Unmarshal(m, b)
}
func (m *ExplicitContentFrame) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ExplicitContentFrame.Marshal(b, m, deterministic)
}
func (m *ExplicitContentFrame) XXX_Merge(src proto.Message) {
xxx_messageInfo_ExplicitContentFrame.Merge(m, src)
}
func (m *ExplicitContentFrame) XXX_Size() int {
return xxx_messageInfo_ExplicitContentFrame.Size(m)
}
func (m *ExplicitContentFrame) XXX_DiscardUnknown() {
xxx_messageInfo_ExplicitContentFrame.DiscardUnknown(m)
}
var xxx_messageInfo_ExplicitContentFrame proto.InternalMessageInfo
func (m *ExplicitContentFrame) GetTimeOffset() *duration.Duration {
if m != nil {
return m.TimeOffset
}
return nil
}
func (m *ExplicitContentFrame) GetPornographyLikelihood() Likelihood {
if m != nil {
return m.PornographyLikelihood
}
return Likelihood_LIKELIHOOD_UNSPECIFIED
}
// Explicit content annotation (based on per-frame visual signals only).
// If no explicit content has been detected in a frame, no annotations are
// present for that frame.
type ExplicitContentAnnotation struct {
// All video frames where explicit content was detected.
Frames []*ExplicitContentFrame `protobuf:"bytes,1,rep,name=frames,proto3" json:"frames,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ExplicitContentAnnotation) Reset() { *m = ExplicitContentAnnotation{} }
func (m *ExplicitContentAnnotation) String() string { return proto.CompactTextString(m) }
func (*ExplicitContentAnnotation) ProtoMessage() {}
func (*ExplicitContentAnnotation) Descriptor() ([]byte, []int) {
return fileDescriptor_9e6ec0147460ac77, []int{11}
}
func (m *ExplicitContentAnnotation) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ExplicitContentAnnotation.Unmarshal(m, b)
}
func (m *ExplicitContentAnnotation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ExplicitContentAnnotation.Marshal(b, m, deterministic)
}
func (m *ExplicitContentAnnotation) XXX_Merge(src proto.Message) {
xxx_messageInfo_ExplicitContentAnnotation.Merge(m, src)
}
func (m *ExplicitContentAnnotation) XXX_Size() int {
return xxx_messageInfo_ExplicitContentAnnotation.Size(m)
}
func (m *ExplicitContentAnnotation) XXX_DiscardUnknown() {
xxx_messageInfo_ExplicitContentAnnotation.DiscardUnknown(m)
}
var xxx_messageInfo_ExplicitContentAnnotation proto.InternalMessageInfo
func (m *ExplicitContentAnnotation) GetFrames() []*ExplicitContentFrame {
if m != nil {
return m.Frames
}
return nil
}
// Annotation results for a single video.
type VideoAnnotationResults struct {
// Output only. Video file location in
// [Google Cloud Storage](https://cloud.google.com/storage/).
InputUri string `protobuf:"bytes,1,opt,name=input_uri,json=inputUri,proto3" json:"input_uri,omitempty"`
// Label annotations on video level or user specified segment level.
// There is exactly one element for each unique label.
SegmentLabelAnnotations []*LabelAnnotation `protobuf:"bytes,2,rep,name=segment_label_annotations,json=segmentLabelAnnotations,proto3" json:"segment_label_annotations,omitempty"`
// Label annotations on shot level.
// There is exactly one element for each unique label.
ShotLabelAnnotations []*LabelAnnotation `protobuf:"bytes,3,rep,name=shot_label_annotations,json=shotLabelAnnotations,proto3" json:"shot_label_annotations,omitempty"`
// Label annotations on frame level.
// There is exactly one element for each unique label.
FrameLabelAnnotations []*LabelAnnotation `protobuf:"bytes,4,rep,name=frame_label_annotations,json=frameLabelAnnotations,proto3" json:"frame_label_annotations,omitempty"`
// Shot annotations. Each shot is represented as a video segment.
ShotAnnotations []*VideoSegment `protobuf:"bytes,6,rep,name=shot_annotations,json=shotAnnotations,proto3" json:"shot_annotations,omitempty"`
// Explicit content annotation.
ExplicitAnnotation *ExplicitContentAnnotation `protobuf:"bytes,7,opt,name=explicit_annotation,json=explicitAnnotation,proto3" json:"explicit_annotation,omitempty"`
// Speech transcription.
SpeechTranscriptions []*SpeechTranscription `protobuf:"bytes,11,rep,name=speech_transcriptions,json=speechTranscriptions,proto3" json:"speech_transcriptions,omitempty"`
// Output only. If set, indicates an error. Note that for a single
// `AnnotateVideoRequest` some videos may succeed and some may fail.
Error *status.Status `protobuf:"bytes,9,opt,name=error,proto3" json:"error,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *VideoAnnotationResults) Reset() { *m = VideoAnnotationResults{} }
func (m *VideoAnnotationResults) String() string { return proto.CompactTextString(m) }
func (*VideoAnnotationResults) ProtoMessage() {}
func (*VideoAnnotationResults) Descriptor() ([]byte, []int) {
return fileDescriptor_9e6ec0147460ac77, []int{12}
}
func (m *VideoAnnotationResults) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_VideoAnnotationResults.Unmarshal(m, b)
}
func (m *VideoAnnotationResults) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_VideoAnnotationResults.Marshal(b, m, deterministic)
}
func (m *VideoAnnotationResults) XXX_Merge(src proto.Message) {
xxx_messageInfo_VideoAnnotationResults.Merge(m, src)
}
func (m *VideoAnnotationResults) XXX_Size() int {
return xxx_messageInfo_VideoAnnotationResults.Size(m)
}
func (m *VideoAnnotationResults) XXX_DiscardUnknown() {
xxx_messageInfo_VideoAnnotationResults.DiscardUnknown(m)
}
var xxx_messageInfo_VideoAnnotationResults proto.InternalMessageInfo
func (m *VideoAnnotationResults) GetInputUri() string {
if m != nil {
return m.InputUri
}
return ""
}
func (m *VideoAnnotationResults) GetSegmentLabelAnnotations() []*LabelAnnotation {
if m != nil {
return m.SegmentLabelAnnotations
}
return nil
}
func (m *VideoAnnotationResults) GetShotLabelAnnotations() []*LabelAnnotation {
if m != nil {
return m.ShotLabelAnnotations
}
return nil
}
func (m *VideoAnnotationResults) GetFrameLabelAnnotations() []*LabelAnnotation {
if m != nil {
return m.FrameLabelAnnotations
}
return nil
}
func (m *VideoAnnotationResults) GetShotAnnotations() []*VideoSegment {
if m != nil {
return m.ShotAnnotations
}
return nil
}
func (m *VideoAnnotationResults) GetExplicitAnnotation() *ExplicitContentAnnotation {
if m != nil {
return m.ExplicitAnnotation
}
return nil
}
func (m *VideoAnnotationResults) GetSpeechTranscriptions() []*SpeechTranscription {
if m != nil {
return m.SpeechTranscriptions
}
return nil
}
func (m *VideoAnnotationResults) GetError() *status.Status {
if m != nil {
return m.Error
}
return nil
}
// Video annotation response. Included in the `response`
// field of the `Operation` returned by the `GetOperation`
// call of the `google::longrunning::Operations` service.
type AnnotateVideoResponse struct {
// Annotation results for all videos specified in `AnnotateVideoRequest`.
AnnotationResults []*VideoAnnotationResults `protobuf:"bytes,1,rep,name=annotation_results,json=annotationResults,proto3" json:"annotation_results,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *AnnotateVideoResponse) Reset() { *m = AnnotateVideoResponse{} }
func (m *AnnotateVideoResponse) String() string { return proto.CompactTextString(m) }
func (*AnnotateVideoResponse) ProtoMessage() {}
func (*AnnotateVideoResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_9e6ec0147460ac77, []int{13}
}
func (m *AnnotateVideoResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_AnnotateVideoResponse.Unmarshal(m, b)
}
func (m *AnnotateVideoResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_AnnotateVideoResponse.Marshal(b, m, deterministic)
}
func (m *AnnotateVideoResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_AnnotateVideoResponse.Merge(m, src)
}
func (m *AnnotateVideoResponse) XXX_Size() int {
return xxx_messageInfo_AnnotateVideoResponse.Size(m)
}
func (m *AnnotateVideoResponse) XXX_DiscardUnknown() {
xxx_messageInfo_AnnotateVideoResponse.DiscardUnknown(m)
}
var xxx_messageInfo_AnnotateVideoResponse proto.InternalMessageInfo
func (m *AnnotateVideoResponse) GetAnnotationResults() []*VideoAnnotationResults {
if m != nil {
return m.AnnotationResults
}
return nil
}
// Annotation progress for a single video.
type VideoAnnotationProgress struct {
// Output only. Video file location in
// [Google Cloud Storage](https://cloud.google.com/storage/).
InputUri string `protobuf:"bytes,1,opt,name=input_uri,json=inputUri,proto3" json:"input_uri,omitempty"`
// Output only. Approximate percentage processed thus far. Guaranteed to be
// 100 when fully processed.
ProgressPercent int32 `protobuf:"varint,2,opt,name=progress_percent,json=progressPercent,proto3" json:"progress_percent,omitempty"`
// Output only. Time when the request was received.
StartTime *timestamp.Timestamp `protobuf:"bytes,3,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"`
// Output only. Time of the most recent update.
UpdateTime *timestamp.Timestamp `protobuf:"bytes,4,opt,name=update_time,json=updateTime,proto3" json:"update_time,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *VideoAnnotationProgress) Reset() { *m = VideoAnnotationProgress{} }
func (m *VideoAnnotationProgress) String() string { return proto.CompactTextString(m) }
func (*VideoAnnotationProgress) ProtoMessage() {}
func (*VideoAnnotationProgress) Descriptor() ([]byte, []int) {
return fileDescriptor_9e6ec0147460ac77, []int{14}
}
func (m *VideoAnnotationProgress) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_VideoAnnotationProgress.Unmarshal(m, b)
}
func (m *VideoAnnotationProgress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_VideoAnnotationProgress.Marshal(b, m, deterministic)
}
func (m *VideoAnnotationProgress) XXX_Merge(src proto.Message) {
xxx_messageInfo_VideoAnnotationProgress.Merge(m, src)
}
func (m *VideoAnnotationProgress) XXX_Size() int {
return xxx_messageInfo_VideoAnnotationProgress.Size(m)
}
func (m *VideoAnnotationProgress) XXX_DiscardUnknown() {
xxx_messageInfo_VideoAnnotationProgress.DiscardUnknown(m)
}
var xxx_messageInfo_VideoAnnotationProgress proto.InternalMessageInfo
func (m *VideoAnnotationProgress) GetInputUri() string {
if m != nil {
return m.InputUri
}
return ""
}
func (m *VideoAnnotationProgress) GetProgressPercent() int32 {
if m != nil {
return m.ProgressPercent
}
return 0
}
func (m *VideoAnnotationProgress) GetStartTime() *timestamp.Timestamp {
if m != nil {
return m.StartTime
}
return nil
}
func (m *VideoAnnotationProgress) GetUpdateTime() *timestamp.Timestamp {
if m != nil {
return m.UpdateTime
}
return nil
}
// Video annotation progress. Included in the `metadata`
// field of the `Operation` returned by the `GetOperation`
// call of the `google::longrunning::Operations` service.
type AnnotateVideoProgress struct {
// Progress metadata for all videos specified in `AnnotateVideoRequest`.
AnnotationProgress []*VideoAnnotationProgress `protobuf:"bytes,1,rep,name=annotation_progress,json=annotationProgress,proto3" json:"annotation_progress,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *AnnotateVideoProgress) Reset() { *m = AnnotateVideoProgress{} }
func (m *AnnotateVideoProgress) String() string { return proto.CompactTextString(m) }
func (*AnnotateVideoProgress) ProtoMessage() {}
func (*AnnotateVideoProgress) Descriptor() ([]byte, []int) {
return fileDescriptor_9e6ec0147460ac77, []int{15}
}
func (m *AnnotateVideoProgress) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_AnnotateVideoProgress.Unmarshal(m, b)
}
func (m *AnnotateVideoProgress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_AnnotateVideoProgress.Marshal(b, m, deterministic)
}
func (m *AnnotateVideoProgress) XXX_Merge(src proto.Message) {
xxx_messageInfo_AnnotateVideoProgress.Merge(m, src)
}
func (m *AnnotateVideoProgress) XXX_Size() int {
return xxx_messageInfo_AnnotateVideoProgress.Size(m)
}
func (m *AnnotateVideoProgress) XXX_DiscardUnknown() {
xxx_messageInfo_AnnotateVideoProgress.DiscardUnknown(m)
}
var xxx_messageInfo_AnnotateVideoProgress proto.InternalMessageInfo
func (m *AnnotateVideoProgress) GetAnnotationProgress() []*VideoAnnotationProgress {
if m != nil {
return m.AnnotationProgress
}
return nil
}
// Config for SPEECH_TRANSCRIPTION.
type SpeechTranscriptionConfig struct {
// *Required* The language of the supplied audio as a
// [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
// Example: "en-US".
// See [Language Support](https://cloud.google.com/speech/docs/languages)
// for a list of the currently supported language codes.
LanguageCode string `protobuf:"bytes,1,opt,name=language_code,json=languageCode,proto3" json:"language_code,omitempty"`
// *Optional* Maximum number of recognition hypotheses to be returned.
// Specifically, the maximum number of `SpeechRecognitionAlternative` messages
// within each `SpeechRecognitionResult`. The server may return fewer than
// `max_alternatives`. Valid values are `0`-`30`. A value of `0` or `1` will
// return a maximum of one. If omitted, will return a maximum of one.
MaxAlternatives int32 `protobuf:"varint,2,opt,name=max_alternatives,json=maxAlternatives,proto3" json:"max_alternatives,omitempty"`
// *Optional* If set to `true`, the server will attempt to filter out
// profanities, replacing all but the initial character in each filtered word
// with asterisks, e.g. "f***". If set to `false` or omitted, profanities
// won't be filtered out.
FilterProfanity bool `protobuf:"varint,3,opt,name=filter_profanity,json=filterProfanity,proto3" json:"filter_profanity,omitempty"`
// *Optional* A means to provide context to assist the speech recognition.
SpeechContexts []*SpeechContext `protobuf:"bytes,4,rep,name=speech_contexts,json=speechContexts,proto3" json:"speech_contexts,omitempty"`
// *Optional* If 'true', adds punctuation to recognition result hypotheses.
// This feature is only available in select languages. Setting this for
// requests in other languages has no effect at all. The default 'false' value
// does not add punctuation to result hypotheses. NOTE: "This is currently
// offered as an experimental service, complimentary to all users. In the
// future this may be exclusively available as a premium feature."
EnableAutomaticPunctuation bool `protobuf:"varint,5,opt,name=enable_automatic_punctuation,json=enableAutomaticPunctuation,proto3" json:"enable_automatic_punctuation,omitempty"`
// *Optional* For file formats, such as MXF or MKV, supporting multiple audio
// tracks, specify up to two tracks. Default: track 0.
AudioTracks []int32 `protobuf:"varint,6,rep,packed,name=audio_tracks,json=audioTracks,proto3" json:"audio_tracks,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *SpeechTranscriptionConfig) Reset() { *m = SpeechTranscriptionConfig{} }
func (m *SpeechTranscriptionConfig) String() string { return proto.CompactTextString(m) }
func (*SpeechTranscriptionConfig) ProtoMessage() {}
func (*SpeechTranscriptionConfig) Descriptor() ([]byte, []int) {
return fileDescriptor_9e6ec0147460ac77, []int{16}
}
func (m *SpeechTranscriptionConfig) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_SpeechTranscriptionConfig.Unmarshal(m, b)
}
func (m *SpeechTranscriptionConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_SpeechTranscriptionConfig.Marshal(b, m, deterministic)
}
func (m *SpeechTranscriptionConfig) XXX_Merge(src proto.Message) {
xxx_messageInfo_SpeechTranscriptionConfig.Merge(m, src)
}
func (m *SpeechTranscriptionConfig) XXX_Size() int {
return xxx_messageInfo_SpeechTranscriptionConfig.Size(m)
}
func (m *SpeechTranscriptionConfig) XXX_DiscardUnknown() {
xxx_messageInfo_SpeechTranscriptionConfig.DiscardUnknown(m)
}
var xxx_messageInfo_SpeechTranscriptionConfig proto.InternalMessageInfo
func (m *SpeechTranscriptionConfig) GetLanguageCode() string {
if m != nil {
return m.LanguageCode
}
return ""
}
func (m *SpeechTranscriptionConfig) GetMaxAlternatives() int32 {
if m != nil {
return m.MaxAlternatives
}
return 0
}
func (m *SpeechTranscriptionConfig) GetFilterProfanity() bool {
if m != nil {
return m.FilterProfanity
}
return false
}
func (m *SpeechTranscriptionConfig) GetSpeechContexts() []*SpeechContext {
if m != nil {
return m.SpeechContexts
}
return nil
}
func (m *SpeechTranscriptionConfig) GetEnableAutomaticPunctuation() bool {
if m != nil {
return m.EnableAutomaticPunctuation
}
return false
}
func (m *SpeechTranscriptionConfig) GetAudioTracks() []int32 {
if m != nil {
return m.AudioTracks
}
return nil
}
// Provides "hints" to the speech recognizer to favor specific words and phrases
// in the results.
type SpeechContext struct {
// *Optional* A list of strings containing words and phrases "hints" so that
// the speech recognition is more likely to recognize them. This can be used
// to improve the accuracy for specific words and phrases, for example, if
// specific commands are typically spoken by the user. This can also be used
// to add additional words to the vocabulary of the recognizer. See
// [usage limits](https://cloud.google.com/speech/limits#content).
Phrases []string `protobuf:"bytes,1,rep,name=phrases,proto3" json:"phrases,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *SpeechContext) Reset() { *m = SpeechContext{} }
func (m *SpeechContext) String() string { return proto.CompactTextString(m) }
func (*SpeechContext) ProtoMessage() {}
func (*SpeechContext) Descriptor() ([]byte, []int) {
return fileDescriptor_9e6ec0147460ac77, []int{17}
}
func (m *SpeechContext) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_SpeechContext.Unmarshal(m, b)
}
func (m *SpeechContext) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_SpeechContext.Marshal(b, m, deterministic)
}
func (m *SpeechContext) XXX_Merge(src proto.Message) {
xxx_messageInfo_SpeechContext.Merge(m, src)
}
func (m *SpeechContext) XXX_Size() int {
return xxx_messageInfo_SpeechContext.Size(m)
}
func (m *SpeechContext) XXX_DiscardUnknown() {
xxx_messageInfo_SpeechContext.DiscardUnknown(m)
}
var xxx_messageInfo_SpeechContext proto.InternalMessageInfo
func (m *SpeechContext) GetPhrases() []string {
if m != nil {
return m.Phrases
}
return nil
}
// A speech recognition result corresponding to a portion of the audio.
type SpeechTranscription struct {
// Output only. May contain one or more recognition hypotheses (up to the
// maximum specified in `max_alternatives`).
// These alternatives are ordered in terms of accuracy, with the top (first)
// alternative being the most probable, as ranked by the recognizer.
Alternatives []*SpeechRecognitionAlternative `protobuf:"bytes,1,rep,name=alternatives,proto3" json:"alternatives,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *SpeechTranscription) Reset() { *m = SpeechTranscription{} }
func (m *SpeechTranscription) String() string { return proto.CompactTextString(m) }
func (*SpeechTranscription) ProtoMessage() {}
func (*SpeechTranscription) Descriptor() ([]byte, []int) {
return fileDescriptor_9e6ec0147460ac77, []int{18}
}
func (m *SpeechTranscription) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_SpeechTranscription.Unmarshal(m, b)
}
func (m *SpeechTranscription) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_SpeechTranscription.Marshal(b, m, deterministic)
}
func (m *SpeechTranscription) XXX_Merge(src proto.Message) {
xxx_messageInfo_SpeechTranscription.Merge(m, src)
}
func (m *SpeechTranscription) XXX_Size() int {
return xxx_messageInfo_SpeechTranscription.Size(m)
}
func (m *SpeechTranscription) XXX_DiscardUnknown() {
xxx_messageInfo_SpeechTranscription.DiscardUnknown(m)
}
var xxx_messageInfo_SpeechTranscription proto.InternalMessageInfo
func (m *SpeechTranscription) GetAlternatives() []*SpeechRecognitionAlternative {
if m != nil {
return m.Alternatives
}
return nil
}
// Alternative hypotheses (a.k.a. n-best list).
type SpeechRecognitionAlternative struct {
// Output only. Transcript text representing the words that the user spoke.
Transcript string `protobuf:"bytes,1,opt,name=transcript,proto3" json:"transcript,omitempty"`
// Output only. The confidence estimate between 0.0 and 1.0. A higher number
// indicates an estimated greater likelihood that the recognized words are
// correct. This field is typically provided only for the top hypothesis, and
// only for `is_final=true` results. Clients should not rely on the
// `confidence` field as it is not guaranteed to be accurate or consistent.
// The default of 0.0 is a sentinel value indicating `confidence` was not set.
Confidence float32 `protobuf:"fixed32,2,opt,name=confidence,proto3" json:"confidence,omitempty"`
// Output only. A list of word-specific information for each recognized word.
Words []*WordInfo `protobuf:"bytes,3,rep,name=words,proto3" json:"words,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *SpeechRecognitionAlternative) Reset() { *m = SpeechRecognitionAlternative{} }
func (m *SpeechRecognitionAlternative) String() string { return proto.CompactTextString(m) }
func (*SpeechRecognitionAlternative) ProtoMessage() {}
func (*SpeechRecognitionAlternative) Descriptor() ([]byte, []int) {
return fileDescriptor_9e6ec0147460ac77, []int{19}
}
func (m *SpeechRecognitionAlternative) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_SpeechRecognitionAlternative.Unmarshal(m, b)
}
func (m *SpeechRecognitionAlternative) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_SpeechRecognitionAlternative.Marshal(b, m, deterministic)
}
func (m *SpeechRecognitionAlternative) XXX_Merge(src proto.Message) {
xxx_messageInfo_SpeechRecognitionAlternative.Merge(m, src)
}
func (m *SpeechRecognitionAlternative) XXX_Size() int {
return xxx_messageInfo_SpeechRecognitionAlternative.Size(m)
}
func (m *SpeechRecognitionAlternative) XXX_DiscardUnknown() {
xxx_messageInfo_SpeechRecognitionAlternative.DiscardUnknown(m)
}
var xxx_messageInfo_SpeechRecognitionAlternative proto.InternalMessageInfo
func (m *SpeechRecognitionAlternative) GetTranscript() string {
if m != nil {
return m.Transcript
}
return ""
}
func (m *SpeechRecognitionAlternative) GetConfidence() float32 {
if m != nil {
return m.Confidence
}
return 0
}
func (m *SpeechRecognitionAlternative) GetWords() []*WordInfo {
if m != nil {
return m.Words
}
return nil
}
// Word-specific information for recognized words. Word information is only
// included in the response when certain request parameters are set, such
// as `enable_word_time_offsets`.
type WordInfo struct {
// Output only. Time offset relative to the beginning of the audio, and
// corresponding to the start of the spoken word. This field is only set if
// `enable_word_time_offsets=true` and only in the top hypothesis. This is an
// experimental feature and the accuracy of the time offset can vary.
StartTime *duration.Duration `protobuf:"bytes,1,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"`
// Output only. Time offset relative to the beginning of the audio, and
// corresponding to the end of the spoken word. This field is only set if
// `enable_word_time_offsets=true` and only in the top hypothesis. This is an
// experimental feature and the accuracy of the time offset can vary.
EndTime *duration.Duration `protobuf:"bytes,2,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"`
// Output only. The word corresponding to this set of information.
Word string `protobuf:"bytes,3,opt,name=word,proto3" json:"word,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *WordInfo) Reset() { *m = WordInfo{} }
func (m *WordInfo) String() string { return proto.CompactTextString(m) }
func (*WordInfo) ProtoMessage() {}
func (*WordInfo) Descriptor() ([]byte, []int) {
return fileDescriptor_9e6ec0147460ac77, []int{20}
}
func (m *WordInfo) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_WordInfo.Unmarshal(m, b)
}
func (m *WordInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_WordInfo.Marshal(b, m, deterministic)
}
func (m *WordInfo) XXX_Merge(src proto.Message) {
xxx_messageInfo_WordInfo.Merge(m, src)
}
func (m *WordInfo) XXX_Size() int {
return xxx_messageInfo_WordInfo.Size(m)
}
func (m *WordInfo) XXX_DiscardUnknown() {
xxx_messageInfo_WordInfo.DiscardUnknown(m)
}
var xxx_messageInfo_WordInfo proto.InternalMessageInfo
func (m *WordInfo) GetStartTime() *duration.Duration {
if m != nil {
return m.StartTime
}
return nil
}
func (m *WordInfo) GetEndTime() *duration.Duration {
if m != nil {
return m.EndTime
}
return nil
}
func (m *WordInfo) GetWord() string {
if m != nil {
return m.Word
}
return ""
}
func init() {
proto.RegisterEnum("google.cloud.videointelligence.v1p1beta1.Feature", Feature_name, Feature_value)
proto.RegisterEnum("google.cloud.videointelligence.v1p1beta1.LabelDetectionMode", LabelDetectionMode_name, LabelDetectionMode_value)
proto.RegisterEnum("google.cloud.videointelligence.v1p1beta1.Likelihood", Likelihood_name, Likelihood_value)
proto.RegisterType((*AnnotateVideoRequest)(nil), "google.cloud.videointelligence.v1p1beta1.AnnotateVideoRequest")
proto.RegisterType((*VideoContext)(nil), "google.cloud.videointelligence.v1p1beta1.VideoContext")
proto.RegisterType((*LabelDetectionConfig)(nil), "google.cloud.videointelligence.v1p1beta1.LabelDetectionConfig")
proto.RegisterType((*ShotChangeDetectionConfig)(nil), "google.cloud.videointelligence.v1p1beta1.ShotChangeDetectionConfig")
proto.RegisterType((*ExplicitContentDetectionConfig)(nil), "google.cloud.videointelligence.v1p1beta1.ExplicitContentDetectionConfig")
proto.RegisterType((*VideoSegment)(nil), "google.cloud.videointelligence.v1p1beta1.VideoSegment")
proto.RegisterType((*LabelSegment)(nil), "google.cloud.videointelligence.v1p1beta1.LabelSegment")
proto.RegisterType((*LabelFrame)(nil), "google.cloud.videointelligence.v1p1beta1.LabelFrame")
proto.RegisterType((*Entity)(nil), "google.cloud.videointelligence.v1p1beta1.Entity")
proto.RegisterType((*LabelAnnotation)(nil), "google.cloud.videointelligence.v1p1beta1.LabelAnnotation")
proto.RegisterType((*ExplicitContentFrame)(nil), "google.cloud.videointelligence.v1p1beta1.ExplicitContentFrame")
proto.RegisterType((*ExplicitContentAnnotation)(nil), "google.cloud.videointelligence.v1p1beta1.ExplicitContentAnnotation")
proto.RegisterType((*VideoAnnotationResults)(nil), "google.cloud.videointelligence.v1p1beta1.VideoAnnotationResults")
proto.RegisterType((*AnnotateVideoResponse)(nil), "google.cloud.videointelligence.v1p1beta1.AnnotateVideoResponse")
proto.RegisterType((*VideoAnnotationProgress)(nil), "google.cloud.videointelligence.v1p1beta1.VideoAnnotationProgress")
proto.RegisterType((*AnnotateVideoProgress)(nil), "google.cloud.videointelligence.v1p1beta1.AnnotateVideoProgress")
proto.RegisterType((*SpeechTranscriptionConfig)(nil), "google.cloud.videointelligence.v1p1beta1.SpeechTranscriptionConfig")
proto.RegisterType((*SpeechContext)(nil), "google.cloud.videointelligence.v1p1beta1.SpeechContext")
proto.RegisterType((*SpeechTranscription)(nil), "google.cloud.videointelligence.v1p1beta1.SpeechTranscription")
proto.RegisterType((*SpeechRecognitionAlternative)(nil), "google.cloud.videointelligence.v1p1beta1.SpeechRecognitionAlternative")
proto.RegisterType((*WordInfo)(nil), "google.cloud.videointelligence.v1p1beta1.WordInfo")
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// VideoIntelligenceServiceClient is the client API for VideoIntelligenceService service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type VideoIntelligenceServiceClient interface {
// Performs asynchronous video annotation. Progress and results can be
// retrieved through the `google.longrunning.Operations` interface.
// `Operation.metadata` contains `AnnotateVideoProgress` (progress).
// `Operation.response` contains `AnnotateVideoResponse` (results).
AnnotateVideo(ctx context.Context, in *AnnotateVideoRequest, opts ...grpc.CallOption) (*longrunning.Operation, error)
}
type videoIntelligenceServiceClient struct {
cc *grpc.ClientConn
}
func NewVideoIntelligenceServiceClient(cc *grpc.ClientConn) VideoIntelligenceServiceClient {
return &videoIntelligenceServiceClient{cc}
}
func (c *videoIntelligenceServiceClient) AnnotateVideo(ctx context.Context, in *AnnotateVideoRequest, opts ...grpc.CallOption) (*longrunning.Operation, error) {
out := new(longrunning.Operation)
err := c.cc.Invoke(ctx, "/google.cloud.videointelligence.v1p1beta1.VideoIntelligenceService/AnnotateVideo", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// VideoIntelligenceServiceServer is the server API for VideoIntelligenceService service.
type VideoIntelligenceServiceServer interface {
// Performs asynchronous video annotation. Progress and results can be
// retrieved through the `google.longrunning.Operations` interface.
// `Operation.metadata` contains `AnnotateVideoProgress` (progress).
// `Operation.response` contains `AnnotateVideoResponse` (results).
AnnotateVideo(context.Context, *AnnotateVideoRequest) (*longrunning.Operation, error)
}
func RegisterVideoIntelligenceServiceServer(s *grpc.Server, srv VideoIntelligenceServiceServer) {
s.RegisterService(&_VideoIntelligenceService_serviceDesc, srv)
}
func _VideoIntelligenceService_AnnotateVideo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(AnnotateVideoRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(VideoIntelligenceServiceServer).AnnotateVideo(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/google.cloud.videointelligence.v1p1beta1.VideoIntelligenceService/AnnotateVideo",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(VideoIntelligenceServiceServer).AnnotateVideo(ctx, req.(*AnnotateVideoRequest))
}
return interceptor(ctx, in, info, handler)
}
var _VideoIntelligenceService_serviceDesc = grpc.ServiceDesc{
ServiceName: "google.cloud.videointelligence.v1p1beta1.VideoIntelligenceService",
HandlerType: (*VideoIntelligenceServiceServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "AnnotateVideo",
Handler: _VideoIntelligenceService_AnnotateVideo_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "google/cloud/videointelligence/v1p1beta1/video_intelligence.proto",
}
func init() {
proto.RegisterFile("google/cloud/videointelligence/v1p1beta1/video_intelligence.proto", fileDescriptor_9e6ec0147460ac77)
}
var fileDescriptor_9e6ec0147460ac77 = []byte{
// 1807 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x58, 0xcd, 0x6f, 0x23, 0x49,
0x15, 0xa7, 0xed, 0xc4, 0x89, 0x9f, 0x9d, 0xd8, 0x53, 0x71, 0x12, 0x27, 0x3b, 0x93, 0xc9, 0xf6,
0x82, 0x94, 0x1d, 0x90, 0x4d, 0xc2, 0x6a, 0x61, 0x67, 0x61, 0xb5, 0x8e, 0xd3, 0xd9, 0x58, 0x64,
0x62, 0xab, 0xec, 0x09, 0x0c, 0x0c, 0x6a, 0x3a, 0xdd, 0xe5, 0x4e, 0x33, 0xed, 0xae, 0xde, 0xee,
0xea, 0x30, 0x73, 0x5b, 0x3e, 0x24, 0x90, 0x40, 0x5c, 0x46, 0x48, 0xfc, 0x09, 0x88, 0x23, 0xff,
0x00, 0x17, 0x2e, 0x70, 0xe5, 0x84, 0x04, 0x37, 0xfe, 0x0b, 0x2e, 0xab, 0xae, 0xaa, 0xb6, 0xdb,
0x76, 0xbe, 0x9c, 0xdc, 0xdc, 0xef, 0xe3, 0xf7, 0x3e, 0xea, 0xd5, 0x7b, 0xaf, 0x0c, 0x0d, 0x9b,
0x52, 0xdb, 0x25, 0x75, 0xd3, 0xa5, 0x91, 0x55, 0xbf, 0x70, 0x2c, 0x42, 0x1d, 0x8f, 0x11, 0xd7,
0x75, 0x6c, 0xe2, 0x99, 0xa4, 0x7e, 0xb1, 0xeb, 0xef, 0x9e, 0x11, 0x66, 0xec, 0x0a, 0x9e, 0x9e,
0x66, 0xd6, 0xfc, 0x80, 0x32, 0x8a, 0x76, 0x04, 0x44, 0x8d, 0x43, 0xd4, 0xa6, 0x20, 0x6a, 0x43,
0x88, 0xcd, 0x87, 0xd2, 0x98, 0xe1, 0x3b, 0x75, 0xc3, 0xf3, 0x28, 0x33, 0x98, 0x43, 0xbd, 0x50,
0xe0, 0x6c, 0xbe, 0x27, 0xb9, 0x2e, 0xf5, 0xec, 0x20, 0xf2, 0x3c, 0xc7, 0xb3, 0xeb, 0xd4, 0x27,
0xc1, 0x98, 0xd0, 0x96, 0x14, 0xe2, 0x5f, 0x67, 0x51, 0xbf, 0x6e, 0x45, 0x42, 0x40, 0xf2, 0x1f,
0x4f, 0xf2, 0x99, 0x33, 0x20, 0x21, 0x33, 0x06, 0xbe, 0x14, 0x58, 0x97, 0x02, 0x81, 0x6f, 0xd6,
0x43, 0x66, 0xb0, 0x48, 0x22, 0xab, 0x7f, 0xcf, 0x40, 0xa5, 0x21, 0x9c, 0x22, 0xa7, 0x71, 0x10,
0x98, 0x7c, 0x1e, 0x91, 0x90, 0xa1, 0x77, 0x20, 0xef, 0x78, 0x7e, 0xc4, 0xf4, 0x28, 0x70, 0xaa,
0xca, 0xb6, 0xb2, 0x93, 0xc7, 0x8b, 0x9c, 0xf0, 0x3c, 0x70, 0xd0, 0x7b, 0xb0, 0x24, 0x98, 0x26,
0xf5, 0x18, 0xf1, 0x58, 0x35, 0xb7, 0xad, 0xec, 0x14, 0x71, 0x91, 0x13, 0x9b, 0x82, 0x86, 0x9e,
0xc1, 0x62, 0x9f, 0x18, 0x2c, 0x0a, 0x48, 0x58, 0xcd, 0x6c, 0x67, 0x77, 0x96, 0xf7, 0x76, 0x6b,
0xb7, 0x4d, 0x5a, 0xed, 0x50, 0x68, 0xe2, 0x21, 0x04, 0xfa, 0x31, 0x2c, 0x89, 0xc3, 0xe0, 0x36,
0x5f, 0xb3, 0x6a, 0x76, 0x5b, 0xd9, 0x29, 0xec, 0x7d, 0x78, 0x7b, 0x4c, 0x1e, 0x5f, 0x53, 0x68,
0xe3, 0xe2, 0x45, 0xea, 0x0b, 0x3d, 0x02, 0xa0, 0x11, 0x4b, 0xc2, 0x9d, 0xe3, 0xe1, 0xe6, 0x05,
0x25, 0x8e, 0xf7, 0x31, 0x14, 0x5c, 0x6a, 0xf2, 0x8c, 0xeb, 0x8e, 0x55, 0x9d, 0xe7, 0x7c, 0x48,
0x48, 0x2d, 0x4b, 0xfd, 0xff, 0x1c, 0x14, 0xd3, 0xf0, 0x08, 0xc3, 0x62, 0x48, 0xec, 0x01, 0xf1,
0x58, 0x58, 0x55, 0xb6, 0xb3, 0x77, 0x70, 0xb4, 0x2b, 0xd4, 0xf1, 0x10, 0x07, 0x31, 0x58, 0x73,
0x8d, 0x33, 0xe2, 0xea, 0x16, 0x61, 0xc4, 0xe4, 0xce, 0x98, 0xd4, 0xeb, 0x3b, 0x76, 0x35, 0xc3,
0x53, 0xf1, 0xc9, 0xed, 0x2d, 0x1c, 0xc7, 0x38, 0x07, 0x09, 0x4c, 0x93, 0xa3, 0xe0, 0x8a, 0x7b,
0x09, 0x15, 0xfd, 0x5a, 0x81, 0x87, 0xe1, 0x39, 0x65, 0xba, 0x79, 0x6e, 0x78, 0x36, 0x99, 0x36,
0x2e, 0xce, 0xa1, 0x79, 0x7b, 0xe3, 0xdd, 0x73, 0xca, 0x9a, 0x1c, 0x6c, 0xd2, 0x83, 0x8d, 0xf0,
0x2a, 0x16, 0x7a, 0xab, 0xc0, 0xbb, 0xe4, 0xb5, 0xef, 0x3a, 0xa6, 0x33, 0x2c, 0xbb, 0x69, 0x5f,
0xe6, 0xb8, 0x2f, 0x47, 0xb7, 0xf7, 0x45, 0x93, 0x90, 0xb2, 0x68, 0x27, 0x1d, 0xda, 0x22, 0xd7,
0xf2, 0xd1, 0xaf, 0x14, 0x78, 0x27, 0xf4, 0x09, 0x31, 0xcf, 0x75, 0x16, 0x18, 0x5e, 0x68, 0x06,
0x8e, 0x9f, 0xf6, 0x27, 0x37, 0x73, 0x6e, 0x38, 0x58, 0x2f, 0x8d, 0x35, 0xcc, 0xcd, 0x55, 0x2c,
0xf5, 0x1f, 0x0a, 0x54, 0x2e, 0x3b, 0x51, 0xe4, 0x41, 0x65, 0xb2, 0x62, 0x06, 0xd4, 0x22, 0xfc,
0x3e, 0x2f, 0xef, 0x7d, 0xf7, 0xae, 0xf5, 0xf2, 0x8c, 0x5a, 0x04, 0x23, 0x77, 0x8a, 0x86, 0xbe,
0x0e, 0x0f, 0x42, 0xd1, 0xde, 0x8c, 0xe0, 0x8d, 0x6e, 0x1a, 0x03, 0x12, 0x18, 0xbc, 0x38, 0x17,
0x71, 0x79, 0xc4, 0x68, 0x72, 0x3a, 0xaa, 0xc0, 0x7c, 0xec, 0x8c, 0xcb, 0x0b, 0x28, 0x8f, 0xc5,
0x87, 0xba, 0x0b, 0x1b, 0x57, 0xd6, 0xc7, 0x48, 0x45, 0x49, 0xab, 0x7c, 0x08, 0x5b, 0xd7, 0x1f,
0xe3, 0x15, 0x7a, 0x7f, 0x52, 0xe4, 0xa5, 0x95, 0x57, 0x0d, 0x69, 0xdc, 0xfd, 0x80, 0xe9, 0x71,
0xfb, 0xd4, 0x69, 0xbf, 0x1f, 0x12, 0xc6, 0x55, 0x0a, 0x7b, 0x1b, 0x49, 0xae, 0x92, 0x16, 0x5b,
0x3b, 0x90, 0x2d, 0x18, 0x97, 0xb8, 0x4e, 0xcf, 0x19, 0x90, 0x36, 0xd7, 0x40, 0x0d, 0x28, 0x11,
0xcf, 0x1a, 0x03, 0xc9, 0xdc, 0x04, 0xb2, 0x44, 0x3c, 0x6b, 0x04, 0xa1, 0x7e, 0xa1, 0x40, 0x91,
0xe7, 0x3c, 0x71, 0xad, 0x03, 0x0b, 0xb2, 0x0f, 0x48, 0x87, 0xee, 0xda, 0x4e, 0x12, 0x18, 0xb4,
0x05, 0xc0, 0x8b, 0xd4, 0x8a, 0xa5, 0xb9, 0x83, 0x19, 0x9c, 0xa2, 0xa8, 0xe7, 0x00, 0xdc, 0x83,
0xc3, 0xc0, 0x18, 0x10, 0xf4, 0x14, 0x0a, 0x33, 0x25, 0x05, 0xd8, 0x28, 0x1f, 0x37, 0x59, 0x72,
0x21, 0xa7, 0x79, 0xcc, 0x61, 0x6f, 0xe2, 0xa1, 0x43, 0xf8, 0xaf, 0xb8, 0xcb, 0xca, 0xa1, 0x23,
0x08, 0x2d, 0x0b, 0x6d, 0x43, 0xc1, 0x22, 0xc3, 0xd2, 0xe7, 0x38, 0x79, 0x9c, 0x26, 0xc5, 0x63,
0xc9, 0x35, 0x3c, 0x3b, 0x32, 0x6c, 0xa2, 0x9b, 0x71, 0x9d, 0x8b, 0xca, 0x2a, 0x26, 0xc4, 0x26,
0xb5, 0x88, 0xfa, 0x9f, 0x0c, 0x94, 0x78, 0x60, 0x8d, 0xe1, 0x2c, 0x46, 0x47, 0x90, 0x13, 0x66,
0x64, 0x60, 0xdf, 0x9c, 0xa1, 0x81, 0x70, 0x3d, 0x2c, 0xf5, 0xd1, 0x4f, 0xe0, 0x81, 0x69, 0x30,
0x62, 0xd3, 0xe0, 0x8d, 0xce, 0x49, 0x8e, 0x9c, 0x7e, 0x77, 0x01, 0x2d, 0x27, 0x50, 0x9a, 0x44,
0x1a, 0x1b, 0x2b, 0xd9, 0x59, 0xc7, 0x4a, 0xba, 0xa0, 0x52, 0x63, 0xe5, 0x18, 0x72, 0xfd, 0xf8,
0x8c, 0xc3, 0xea, 0x1c, 0x47, 0xfc, 0x60, 0x46, 0x44, 0x5e, 0x20, 0x58, 0x62, 0xa8, 0x7f, 0x53,
0xa0, 0x32, 0x71, 0x1b, 0xef, 0x5f, 0x41, 0xaf, 0x60, 0xcd, 0xa7, 0x81, 0x47, 0xed, 0xc0, 0xf0,
0xcf, 0xdf, 0xe8, 0xae, 0xf3, 0x8a, 0xb8, 0xce, 0x39, 0xa5, 0x16, 0xaf, 0x82, 0xe5, 0x99, 0x5c,
0x1e, 0xea, 0xe2, 0xd5, 0x14, 0xe6, 0x88, 0xac, 0x86, 0xb0, 0x31, 0x11, 0x40, 0xaa, 0x52, 0x4e,
0x87, 0xc9, 0x12, 0x53, 0xfd, 0x93, 0x3b, 0x8f, 0x9a, 0xf1, 0xb4, 0xfd, 0x26, 0x07, 0x6b, 0xfc,
0x9e, 0x8e, 0x6c, 0x61, 0x12, 0x46, 0x2e, 0x0b, 0xaf, 0xdf, 0xc4, 0x22, 0xd8, 0x90, 0x07, 0xa9,
0x8b, 0x4e, 0x9f, 0xda, 0x30, 0x65, 0xdd, 0x7d, 0x34, 0xe3, 0x79, 0xa6, 0x3c, 0x58, 0x97, 0xd8,
0x13, 0xf4, 0x10, 0x51, 0x58, 0xe3, 0x3b, 0xc1, 0xb4, 0xcd, 0xec, 0x7d, 0x6d, 0x56, 0x62, 0xe0,
0x29, 0x83, 0x9f, 0xc3, 0x3a, 0xcf, 0xd4, 0x25, 0x16, 0xe7, 0xee, 0x6b, 0x71, 0x95, 0x23, 0x4f,
0x99, 0x34, 0xa0, 0xcc, 0x63, 0x4c, 0xdb, 0xca, 0xdd, 0x6b, 0x95, 0x2b, 0xc5, 0x78, 0x69, 0x13,
0x0c, 0x56, 0x86, 0x3b, 0xcd, 0xc8, 0x4c, 0x75, 0x61, 0xd6, 0xad, 0xe1, 0xca, 0x7a, 0xc5, 0x28,
0xc1, 0x4f, 0xd5, 0x70, 0x00, 0xab, 0x97, 0xed, 0x2c, 0x61, 0xb5, 0xc0, 0xa3, 0xfb, 0xde, 0xbd,
0xb6, 0x15, 0x5c, 0xb9, 0x64, 0x4f, 0x09, 0xd1, 0x0e, 0xcc, 0x93, 0x20, 0xa0, 0x41, 0x35, 0xcf,
0x63, 0x43, 0x89, 0x8d, 0xc0, 0x37, 0x6b, 0x5d, 0xfe, 0x20, 0xc1, 0x42, 0x40, 0xfd, 0xad, 0x02,
0xab, 0x13, 0x2f, 0x92, 0xd0, 0xa7, 0x5e, 0x48, 0x10, 0x05, 0x34, 0x4a, 0x92, 0x1e, 0x88, 0xeb,
0x21, 0xef, 0xe1, 0xa7, 0x33, 0x1e, 0xc9, 0xd4, 0x35, 0xc3, 0x0f, 0x8c, 0x49, 0x92, 0xfa, 0x6f,
0x05, 0xd6, 0x27, 0xa4, 0x3b, 0x01, 0xb5, 0x03, 0x12, 0xde, 0x70, 0x2b, 0xdf, 0x87, 0xb2, 0x2f,
0x05, 0x75, 0x9f, 0x04, 0x66, 0x3c, 0xb6, 0xe3, 0x4e, 0x35, 0x8f, 0x4b, 0x09, 0xbd, 0x23, 0xc8,
0xe8, 0x23, 0x80, 0xd1, 0xce, 0x21, 0x77, 0xe9, 0xcd, 0xa9, 0xae, 0xd8, 0x4b, 0xde, 0x73, 0x38,
0x3f, 0xdc, 0x36, 0xd0, 0xc7, 0x50, 0x88, 0x7c, 0xcb, 0x60, 0x44, 0xe8, 0xce, 0xdd, 0xa8, 0x0b,
0x42, 0x3c, 0x26, 0xa8, 0xbf, 0x9b, 0x4c, 0xf3, 0x30, 0xb2, 0x00, 0x56, 0x52, 0x69, 0x4e, 0xfc,
0x95, 0x79, 0x6e, 0xdc, 0x39, 0xcf, 0x09, 0x3e, 0x4e, 0x1d, 0x62, 0x42, 0x53, 0xff, 0x9b, 0x81,
0x8d, 0x2b, 0x57, 0xdf, 0xe9, 0xb9, 0xae, 0x4c, 0xcf, 0xf5, 0x38, 0xe7, 0x03, 0xe3, 0xb5, 0x6e,
0xb8, 0x8c, 0x04, 0x9e, 0xc1, 0x9c, 0x0b, 0x3e, 0x78, 0x79, 0xce, 0x07, 0xc6, 0xeb, 0x46, 0x8a,
0x1c, 0x8b, 0xf6, 0x9d, 0x98, 0x10, 0x47, 0xd7, 0x37, 0xbc, 0x78, 0xf0, 0x67, 0xf9, 0x96, 0x5a,
0x12, 0xf4, 0x4e, 0x42, 0x46, 0x3f, 0x85, 0x92, 0xbc, 0x2b, 0xf2, 0xd9, 0x99, 0xf4, 0x9b, 0x6f,
0xcf, 0x7a, 0x4b, 0x92, 0x87, 0xe7, 0x72, 0x98, 0xfe, 0x0c, 0xd1, 0xa7, 0xf0, 0x90, 0x78, 0xc6,
0x99, 0x4b, 0x74, 0x23, 0x62, 0x74, 0x60, 0x30, 0xc7, 0xd4, 0xfd, 0xc8, 0x33, 0x59, 0x24, 0x9a,
0xc1, 0x3c, 0x77, 0x6c, 0x53, 0xc8, 0x34, 0x12, 0x91, 0xce, 0x48, 0x02, 0xbd, 0x0b, 0x45, 0x23,
0xb2, 0x1c, 0x1a, 0x5f, 0x67, 0xf3, 0x95, 0x68, 0x52, 0xf3, 0xb8, 0xc0, 0x69, 0x3d, 0x4e, 0x52,
0xdf, 0x87, 0xa5, 0x31, 0x2f, 0x50, 0x15, 0x16, 0xfc, 0xf3, 0xc0, 0x08, 0xe5, 0x20, 0xcb, 0xe3,
0xe4, 0x53, 0xfd, 0x85, 0x02, 0x2b, 0x97, 0x1c, 0x05, 0xfa, 0x19, 0x14, 0xc7, 0x72, 0x2b, 0xea,
0xe1, 0x70, 0xd6, 0x34, 0x60, 0x62, 0x52, 0xdb, 0x73, 0x62, 0xc8, 0xd4, 0x99, 0xe0, 0x31, 0x6c,
0xf5, 0xcf, 0x0a, 0x3c, 0xbc, 0x4e, 0x3c, 0x5e, 0x29, 0x47, 0xbd, 0x4b, 0x96, 0x43, 0x8a, 0x72,
0xd3, 0xca, 0x89, 0x8e, 0x60, 0xfe, 0xe7, 0x34, 0xb0, 0x92, 0x71, 0xb5, 0x77, 0xfb, 0x28, 0x7e,
0x40, 0x03, 0xab, 0xe5, 0xf5, 0x29, 0x16, 0x00, 0xea, 0x1f, 0x14, 0x58, 0x4c, 0x68, 0xe8, 0x3b,
0x63, 0x97, 0xf9, 0xc6, 0x15, 0x27, 0x75, 0x97, 0x3f, 0x80, 0xc5, 0xe4, 0xcd, 0x70, 0xf3, 0x63,
0x61, 0x41, 0x3e, 0x16, 0x10, 0x82, 0xb9, 0xd8, 0x0b, 0xb9, 0xe7, 0xf2, 0xdf, 0x4f, 0x7e, 0xaf,
0xc0, 0x82, 0xfc, 0xf7, 0x04, 0xad, 0xc3, 0xca, 0xa1, 0xd6, 0xe8, 0x3d, 0xc7, 0x9a, 0xfe, 0xfc,
0xa4, 0xdb, 0xd1, 0x9a, 0xad, 0xc3, 0x96, 0x76, 0x50, 0xfe, 0x0a, 0x5a, 0x81, 0xd2, 0x71, 0x63,
0x5f, 0x3b, 0xd6, 0x0f, 0xb4, 0x9e, 0xd6, 0xec, 0xb5, 0xda, 0x27, 0x65, 0x05, 0x6d, 0xc0, 0x6a,
0xf7, 0xa8, 0xdd, 0xd3, 0x9b, 0x47, 0x8d, 0x93, 0xcf, 0xb4, 0x14, 0x2b, 0x83, 0xb6, 0x60, 0x53,
0xfb, 0x61, 0xe7, 0xb8, 0xd5, 0x6c, 0xf5, 0xf4, 0x66, 0xfb, 0xa4, 0xa7, 0x9d, 0xf4, 0x52, 0xfc,
0x2c, 0xaa, 0x42, 0xa5, 0xdb, 0xd1, 0xb4, 0xe6, 0x91, 0xde, 0xc3, 0x8d, 0x93, 0x6e, 0x13, 0xb7,
0x3a, 0x9c, 0x93, 0x7b, 0x12, 0x00, 0x9a, 0x7e, 0x3c, 0xa2, 0xaf, 0xc2, 0xf6, 0x84, 0x7d, 0xfd,
0x59, 0xfb, 0x60, 0xd2, 0xcb, 0x25, 0xc8, 0x73, 0x87, 0x62, 0x56, 0x59, 0x41, 0xcb, 0x00, 0x87,
0xb8, 0xf1, 0x4c, 0x13, 0xdf, 0x99, 0x38, 0x3a, 0xce, 0x6e, 0x9c, 0x1c, 0xe8, 0x29, 0x46, 0xf6,
0x09, 0x03, 0x18, 0xed, 0x73, 0x68, 0x13, 0xd6, 0x8e, 0x5b, 0xdf, 0xd7, 0x8e, 0x5b, 0x47, 0xed,
0xf6, 0xc1, 0x84, 0x85, 0x07, 0xb0, 0x74, 0xaa, 0xe1, 0x17, 0xfa, 0xf3, 0x13, 0x2e, 0xf2, 0xa2,
0xac, 0xa0, 0x22, 0x2c, 0x0e, 0xbf, 0x32, 0xf1, 0x57, 0xa7, 0xdd, 0xed, 0xb6, 0xf6, 0x8f, 0xb5,
0x72, 0x16, 0x01, 0xe4, 0x24, 0x67, 0x0e, 0x95, 0xa0, 0xc0, 0x55, 0x25, 0x61, 0x7e, 0xef, 0xaf,
0x0a, 0x54, 0x79, 0xcf, 0x6b, 0xa5, 0x2a, 0xa7, 0x4b, 0x82, 0x0b, 0xc7, 0x24, 0xe8, 0x8f, 0x0a,
0x2c, 0x8d, 0xb5, 0x5b, 0x34, 0xc3, 0xe6, 0x78, 0xd9, 0x1f, 0x74, 0x9b, 0x8f, 0x12, 0xfd, 0xd4,
0x3f, 0x87, 0xb5, 0x76, 0xf2, 0xcf, 0xa1, 0xfa, 0xb5, 0x5f, 0xfe, 0xeb, 0x7f, 0x6f, 0x33, 0x8f,
0xd5, 0xcd, 0xc9, 0x3f, 0x33, 0xc3, 0xa7, 0xb2, 0xff, 0x92, 0xa7, 0xca, 0x93, 0xfd, 0x2f, 0x32,
0xf0, 0x0d, 0x93, 0x0e, 0x6e, 0xed, 0xcb, 0xfe, 0xa3, 0xab, 0x42, 0xec, 0xc4, 0x65, 0xda, 0x51,
0x7e, 0xf4, 0x42, 0x42, 0xd9, 0x34, 0x6e, 0xcf, 0x35, 0x1a, 0xd8, 0x75, 0x9b, 0x78, 0xbc, 0x88,
0xeb, 0x82, 0x65, 0xf8, 0x4e, 0x78, 0xf3, 0x5f, 0xaf, 0x1f, 0x4f, 0xf1, 0xfe, 0x92, 0xd9, 0xf9,
0x4c, 0x60, 0x37, 0xb9, 0x9b, 0x53, 0x9e, 0xd4, 0x4e, 0x77, 0x3b, 0xbb, 0xfb, 0xb1, 0xf2, 0x3f,
0x13, 0xd1, 0x97, 0x5c, 0xf4, 0xe5, 0x94, 0xe8, 0xcb, 0xd3, 0xc4, 0xce, 0x59, 0x8e, 0xfb, 0xf6,
0xad, 0x2f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x82, 0x99, 0xe4, 0x9b, 0x15, 0x16, 0x00, 0x00,
}
| {
"pile_set_name": "Github"
} |
// Copyright (c) 2012 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
__declspec(dllexport) void some_function() {
}
int main() {
return 0;
}
| {
"pile_set_name": "Github"
} |
/*!
* Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0
*/
import * as path from 'path'
import { CloudFormationTemplateRegistry } from '../../shared/cloudformation/templateRegistry'
import { CloudFormationTemplateRegistryManager } from '../../shared/cloudformation/templateRegistryManager'
import { mkdir, rmrf } from '../../shared/filesystem'
import { makeSampleSamTemplateYaml, strToYamlFile } from '../../test/shared/cloudformation/cloudformationTestUtils'
import { getTestWorkspaceFolder } from '../integrationTestsUtilities'
/**
* Note: these tests are pretty shallow right now. They do not test the following:
* * Adding/removing workspace folders
*/
describe('CloudFormation Template Registry Manager', async () => {
let registry: CloudFormationTemplateRegistry
let manager: CloudFormationTemplateRegistryManager
let workspaceDir: string
let testDir: string
let testDirNested: string
let dir: number = 0
before(async () => {
workspaceDir = getTestWorkspaceFolder()
})
beforeEach(async () => {
testDir = path.join(workspaceDir, dir.toString())
testDirNested = path.join(testDir, 'nested')
await mkdir(testDirNested, { recursive: true })
registry = new CloudFormationTemplateRegistry()
manager = new CloudFormationTemplateRegistryManager(registry)
})
afterEach(async () => {
manager.dispose()
await rmrf(testDir)
dir++
})
it('adds initial template files with yaml and yml extensions at various nesting levels', async () => {
await strToYamlFile(makeSampleSamTemplateYaml(true), path.join(testDir, 'test.yaml'))
await strToYamlFile(makeSampleSamTemplateYaml(false), path.join(testDirNested, 'test.yml'))
await manager.addTemplateGlob('**/test.{yaml,yml}')
await registryHasTargetNumberOfFiles(registry, 2)
})
it('adds dynamically-added template files with yaml and yml extensions at various nesting levels', async () => {
await manager.addTemplateGlob('**/test.{yaml,yml}')
await strToYamlFile(makeSampleSamTemplateYaml(false), path.join(testDir, 'test.yml'))
await strToYamlFile(makeSampleSamTemplateYaml(true), path.join(testDirNested, 'test.yaml'))
await registryHasTargetNumberOfFiles(registry, 2)
})
it('can handle changed files', async () => {
const filepath = path.join(testDir, 'changeMe.yml')
await strToYamlFile(makeSampleSamTemplateYaml(false), filepath)
await manager.addTemplateGlob('**/changeMe.yml')
await registryHasTargetNumberOfFiles(registry, 1)
await queryRegistryForFileWithGlobalsKeyStatus(registry, filepath, false)
await strToYamlFile(makeSampleSamTemplateYaml(true), filepath)
await queryRegistryForFileWithGlobalsKeyStatus(registry, filepath, true)
})
it('can handle deleted files', async () => {
await manager.addTemplateGlob('**/deleteMe.yml')
// Specifically creating the file after the watcher is added
// Otherwise, it seems the file is deleted before the file watcher realizes the file exists
// This way, we know that a file watcher detects the create event on this file and thus is tracking it
const filepath = path.join(testDir, 'deleteMe.yml')
await strToYamlFile(makeSampleSamTemplateYaml(false), filepath)
await registryHasTargetNumberOfFiles(registry, 1)
await rmrf(filepath)
await registryHasTargetNumberOfFiles(registry, 0)
})
})
async function registryHasTargetNumberOfFiles(registry: CloudFormationTemplateRegistry, target: number) {
while (registry.registeredTemplates.length !== target) {
await new Promise(resolve => setTimeout(resolve, 20))
}
}
async function queryRegistryForFileWithGlobalsKeyStatus(
registry: CloudFormationTemplateRegistry,
filepath: string,
hasGlobals: boolean
) {
let foundMatch = false
while (!foundMatch) {
await new Promise(resolve => setTimeout(resolve, 20))
const obj = registry.getRegisteredTemplate(filepath)
if (obj) {
foundMatch = Object.keys(obj.template).includes('Globals') === hasGlobals
}
}
}
| {
"pile_set_name": "Github"
} |
<!-- Do not edit this file. It is automatically generated by API Documenter. -->
[Home](./index.md) > [sip.js](./sip.js.md) > [PublisherOptions](./sip.js.publisheroptions.md) > [unpublishOnClose](./sip.js.publisheroptions.unpublishonclose.md)
## PublisherOptions.unpublishOnClose property
If set true, UA will gracefully unpublish for the event on UA close.
<b>Signature:</b>
```typescript
unpublishOnClose?: boolean;
```
| {
"pile_set_name": "Github"
} |
#include "proto.h"
const struct compressor compressors[] = {
// NOTE: the "flags" field for each compressor will be set to the chosen/current method number minus the base
// number for that particular compressor. That means that each compressor will use a zero-based flags value.
{.methods = 72, .name = "singlepass", .function = &try_compress_single_pass}, // 0-71
{.methods = 2, .name = "null", .function = &store_uncompressed}, // 72-73
{.methods = 6, .name = "repetitions", .function = &try_compress_repetitions}, // 74-79
{.methods = 16, .name = "multipass", .function = &try_compress_multi_pass}, // 80-95
{0} // end of the list
};
const unsigned char bit_flipping_table[] = {
// For each byte, the table contains that same byte with its bits flipped around (for instance,
// 0x58 (01011000 binary) becomes 0x1a (00011010 binary)). This is faster than flipping bits
// manually at runtime.
0x00, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0, 0x10, 0x90, 0x50, 0xd0, 0x30, 0xb0, 0x70, 0xf0,
0x08, 0x88, 0x48, 0xc8, 0x28, 0xa8, 0x68, 0xe8, 0x18, 0x98, 0x58, 0xd8, 0x38, 0xb8, 0x78, 0xf8,
0x04, 0x84, 0x44, 0xc4, 0x24, 0xa4, 0x64, 0xe4, 0x14, 0x94, 0x54, 0xd4, 0x34, 0xb4, 0x74, 0xf4,
0x0c, 0x8c, 0x4c, 0xcc, 0x2c, 0xac, 0x6c, 0xec, 0x1c, 0x9c, 0x5c, 0xdc, 0x3c, 0xbc, 0x7c, 0xfc,
0x02, 0x82, 0x42, 0xc2, 0x22, 0xa2, 0x62, 0xe2, 0x12, 0x92, 0x52, 0xd2, 0x32, 0xb2, 0x72, 0xf2,
0x0a, 0x8a, 0x4a, 0xca, 0x2a, 0xaa, 0x6a, 0xea, 0x1a, 0x9a, 0x5a, 0xda, 0x3a, 0xba, 0x7a, 0xfa,
0x06, 0x86, 0x46, 0xc6, 0x26, 0xa6, 0x66, 0xe6, 0x16, 0x96, 0x56, 0xd6, 0x36, 0xb6, 0x76, 0xf6,
0x0e, 0x8e, 0x4e, 0xce, 0x2e, 0xae, 0x6e, 0xee, 0x1e, 0x9e, 0x5e, 0xde, 0x3e, 0xbe, 0x7e, 0xfe,
0x01, 0x81, 0x41, 0xc1, 0x21, 0xa1, 0x61, 0xe1, 0x11, 0x91, 0x51, 0xd1, 0x31, 0xb1, 0x71, 0xf1,
0x09, 0x89, 0x49, 0xc9, 0x29, 0xa9, 0x69, 0xe9, 0x19, 0x99, 0x59, 0xd9, 0x39, 0xb9, 0x79, 0xf9,
0x05, 0x85, 0x45, 0xc5, 0x25, 0xa5, 0x65, 0xe5, 0x15, 0x95, 0x55, 0xd5, 0x35, 0xb5, 0x75, 0xf5,
0x0d, 0x8d, 0x4d, 0xcd, 0x2d, 0xad, 0x6d, 0xed, 0x1d, 0x9d, 0x5d, 0xdd, 0x3d, 0xbd, 0x7d, 0xfd,
0x03, 0x83, 0x43, 0xc3, 0x23, 0xa3, 0x63, 0xe3, 0x13, 0x93, 0x53, 0xd3, 0x33, 0xb3, 0x73, 0xf3,
0x0b, 0x8b, 0x4b, 0xcb, 0x2b, 0xab, 0x6b, 0xeb, 0x1b, 0x9b, 0x5b, 0xdb, 0x3b, 0xbb, 0x7b, 0xfb,
0x07, 0x87, 0x47, 0xc7, 0x27, 0xa7, 0x67, 0xe7, 0x17, 0x97, 0x57, 0xd7, 0x37, 0xb7, 0x77, 0xf7,
0x0f, 0x8f, 0x4f, 0xcf, 0x2f, 0xaf, 0x6f, 0xef, 0x1f, 0x9f, 0x5f, 0xdf, 0x3f, 0xbf, 0x7f, 0xff
};
char option_name_buffer[] = "-?"; // used to extract the name of a short option (separated from its argument)
| {
"pile_set_name": "Github"
} |
/*
* Copyright (c) 2017, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package jdk.internal.vm.compiler.collections;
import java.util.Iterator;
import java.util.Objects;
import java.util.function.BiFunction;
/**
* Implementation of a map with a memory-efficient structure that always preserves insertion order
* when iterating over keys. Particularly efficient when number of entries is 0 or smaller equal
* {@link #INITIAL_CAPACITY} or smaller 256.
*
* The key/value pairs are kept in an expanding flat object array with keys at even indices and
* values at odd indices. If the map has smaller or equal to {@link #HASH_THRESHOLD} entries, there
* is no additional hash data structure and comparisons are done via linear checking of the
* key/value pairs. For the case where the equality check is particularly cheap (e.g., just an
* object identity comparison), this limit below which the map is without an actual hash table is
* higher and configured at {@link #HASH_THRESHOLD_IDENTITY_COMPARE}.
*
* When the hash table needs to be constructed, the field {@link #hashArray} becomes a new hash
* array where an entry of 0 means no hit and otherwise denotes the entry number in the
* {@link #entries} array. The hash array is interpreted as an actual byte array if the indices fit
* within 8 bit, or as an array of short values if the indices fit within 16 bit, or as an array of
* integer values in other cases.
*
* Hash collisions are handled by chaining a linked list of {@link CollisionLink} objects that take
* the place of the values in the {@link #entries} array.
*
* Removing entries will put {@code null} into the {@link #entries} array. If the occupation of the
* map falls below a specific threshold, the map will be compressed via the
* {@link #maybeCompress(int)} method.
*/
final class EconomicMapImpl<K, V> implements EconomicMap<K, V>, EconomicSet<K> {
/**
* Initial number of key/value pair entries that is allocated in the first entries array.
*/
private static final int INITIAL_CAPACITY = 4;
/**
* Maximum number of entries that are moved linearly forward if a key is removed.
*/
private static final int COMPRESS_IMMEDIATE_CAPACITY = 8;
/**
* Minimum number of key/value pair entries added when the entries array is increased in size.
*/
private static final int MIN_CAPACITY_INCREASE = 8;
/**
* Number of entries above which a hash table is created.
*/
private static final int HASH_THRESHOLD = 4;
/**
* Number of entries above which a hash table is created when equality can be checked with
* object identity.
*/
private static final int HASH_THRESHOLD_IDENTITY_COMPARE = 8;
/**
* Maximum number of entries allowed in the map.
*/
private static final int MAX_ELEMENT_COUNT = Integer.MAX_VALUE >> 1;
/**
* Number of entries above which more than 1 byte is necessary for the hash index.
*/
private static final int LARGE_HASH_THRESHOLD = ((1 << Byte.SIZE) << 1);
/**
* Number of entries above which more than 2 bytes are are necessary for the hash index.
*/
private static final int VERY_LARGE_HASH_THRESHOLD = (LARGE_HASH_THRESHOLD << Byte.SIZE);
/**
* Total number of entries (actual entries plus deleted entries).
*/
private int totalEntries;
/**
* Number of deleted entries.
*/
private int deletedEntries;
/**
* Entries array with even indices storing keys and odd indices storing values.
*/
private Object[] entries;
/**
* Hash array that is interpreted either as byte or short or int array depending on number of
* map entries.
*/
private byte[] hashArray;
/**
* The strategy used for comparing keys or {@code null} for denoting special strategy
* {@link Equivalence#IDENTITY}.
*/
private final Equivalence strategy;
/**
* Intercept method for debugging purposes.
*/
private static <K, V> EconomicMapImpl<K, V> intercept(EconomicMapImpl<K, V> map) {
return map;
}
public static <K, V> EconomicMapImpl<K, V> create(Equivalence strategy, boolean isSet) {
return intercept(new EconomicMapImpl<>(strategy, isSet));
}
public static <K, V> EconomicMapImpl<K, V> create(Equivalence strategy, int initialCapacity, boolean isSet) {
return intercept(new EconomicMapImpl<>(strategy, initialCapacity, isSet));
}
public static <K, V> EconomicMapImpl<K, V> create(Equivalence strategy, UnmodifiableEconomicMap<K, V> other, boolean isSet) {
return intercept(new EconomicMapImpl<>(strategy, other, isSet));
}
public static <K, V> EconomicMapImpl<K, V> create(Equivalence strategy, UnmodifiableEconomicSet<K> other, boolean isSet) {
return intercept(new EconomicMapImpl<>(strategy, other, isSet));
}
private EconomicMapImpl(Equivalence strategy, boolean isSet) {
if (strategy == Equivalence.IDENTITY) {
this.strategy = null;
} else {
this.strategy = strategy;
}
this.isSet = isSet;
}
private EconomicMapImpl(Equivalence strategy, int initialCapacity, boolean isSet) {
this(strategy, isSet);
init(initialCapacity);
}
private EconomicMapImpl(Equivalence strategy, UnmodifiableEconomicMap<K, V> other, boolean isSet) {
this(strategy, isSet);
if (!initFrom(other)) {
init(other.size());
putAll(other);
}
}
private EconomicMapImpl(Equivalence strategy, UnmodifiableEconomicSet<K> other, boolean isSet) {
this(strategy, isSet);
if (!initFrom(other)) {
init(other.size());
addAll(other);
}
}
@SuppressWarnings("unchecked")
private boolean initFrom(Object o) {
if (o instanceof EconomicMapImpl) {
EconomicMapImpl<K, V> otherMap = (EconomicMapImpl<K, V>) o;
// We are only allowed to directly copy if the strategies of the two maps are the same.
if (strategy == otherMap.strategy) {
totalEntries = otherMap.totalEntries;
deletedEntries = otherMap.deletedEntries;
if (otherMap.entries != null) {
entries = otherMap.entries.clone();
}
if (otherMap.hashArray != null) {
hashArray = otherMap.hashArray.clone();
}
return true;
}
}
return false;
}
private void init(int size) {
if (size > INITIAL_CAPACITY) {
entries = new Object[size << 1];
}
}
/**
* Links the collisions. Needs to be immutable class for allowing efficient shallow copy from
* other map on construction.
*/
private static final class CollisionLink {
CollisionLink(Object value, int next) {
this.value = value;
this.next = next;
}
final Object value;
/**
* Index plus one of the next entry in the collision link chain.
*/
final int next;
}
@SuppressWarnings("unchecked")
@Override
public V get(K key) {
Objects.requireNonNull(key);
int index = find(key);
if (index != -1) {
return (V) getValue(index);
}
return null;
}
private int find(K key) {
if (hasHashArray()) {
return findHash(key);
} else {
return findLinear(key);
}
}
private int findLinear(K key) {
for (int i = 0; i < totalEntries; i++) {
Object entryKey = entries[i << 1];
if (entryKey != null && compareKeys(key, entryKey)) {
return i;
}
}
return -1;
}
private boolean compareKeys(Object key, Object entryKey) {
if (key == entryKey) {
return true;
}
if (strategy != null && strategy != Equivalence.IDENTITY_WITH_SYSTEM_HASHCODE) {
if (strategy == Equivalence.DEFAULT) {
return key.equals(entryKey);
} else {
return strategy.equals(key, entryKey);
}
}
return false;
}
private int findHash(K key) {
int index = getHashArray(getHashIndex(key)) - 1;
if (index != -1) {
Object entryKey = getKey(index);
if (compareKeys(key, entryKey)) {
return index;
} else {
Object entryValue = getRawValue(index);
if (entryValue instanceof CollisionLink) {
return findWithCollision(key, (CollisionLink) entryValue);
}
}
}
return -1;
}
private int findWithCollision(K key, CollisionLink initialEntryValue) {
int index;
Object entryKey;
CollisionLink entryValue = initialEntryValue;
while (true) {
CollisionLink collisionLink = entryValue;
index = collisionLink.next;
entryKey = getKey(index);
if (compareKeys(key, entryKey)) {
return index;
} else {
Object value = getRawValue(index);
if (value instanceof CollisionLink) {
entryValue = (CollisionLink) getRawValue(index);
} else {
return -1;
}
}
}
}
private int getHashArray(int index) {
if (entries.length < LARGE_HASH_THRESHOLD) {
return (hashArray[index] & 0xFF);
} else if (entries.length < VERY_LARGE_HASH_THRESHOLD) {
int adjustedIndex = index << 1;
return (hashArray[adjustedIndex] & 0xFF) | ((hashArray[adjustedIndex + 1] & 0xFF) << 8);
} else {
int adjustedIndex = index << 2;
return (hashArray[adjustedIndex] & 0xFF) | ((hashArray[adjustedIndex + 1] & 0xFF) << 8) | ((hashArray[adjustedIndex + 2] & 0xFF) << 16) | ((hashArray[adjustedIndex + 3] & 0xFF) << 24);
}
}
private void setHashArray(int index, int value) {
if (entries.length < LARGE_HASH_THRESHOLD) {
hashArray[index] = (byte) value;
} else if (entries.length < VERY_LARGE_HASH_THRESHOLD) {
int adjustedIndex = index << 1;
hashArray[adjustedIndex] = (byte) value;
hashArray[adjustedIndex + 1] = (byte) (value >> 8);
} else {
int adjustedIndex = index << 2;
hashArray[adjustedIndex] = (byte) value;
hashArray[adjustedIndex + 1] = (byte) (value >> 8);
hashArray[adjustedIndex + 2] = (byte) (value >> 16);
hashArray[adjustedIndex + 3] = (byte) (value >> 24);
}
}
private int findAndRemoveHash(Object key) {
int hashIndex = getHashIndex(key);
int index = getHashArray(hashIndex) - 1;
if (index != -1) {
Object entryKey = getKey(index);
if (compareKeys(key, entryKey)) {
Object value = getRawValue(index);
int nextIndex = -1;
if (value instanceof CollisionLink) {
CollisionLink collisionLink = (CollisionLink) value;
nextIndex = collisionLink.next;
}
setHashArray(hashIndex, nextIndex + 1);
return index;
} else {
Object entryValue = getRawValue(index);
if (entryValue instanceof CollisionLink) {
return findAndRemoveWithCollision(key, (CollisionLink) entryValue, index);
}
}
}
return -1;
}
private int findAndRemoveWithCollision(Object key, CollisionLink initialEntryValue, int initialIndexValue) {
int index;
Object entryKey;
CollisionLink entryValue = initialEntryValue;
int lastIndex = initialIndexValue;
while (true) {
CollisionLink collisionLink = entryValue;
index = collisionLink.next;
entryKey = getKey(index);
if (compareKeys(key, entryKey)) {
Object value = getRawValue(index);
if (value instanceof CollisionLink) {
CollisionLink thisCollisionLink = (CollisionLink) value;
setRawValue(lastIndex, new CollisionLink(collisionLink.value, thisCollisionLink.next));
} else {
setRawValue(lastIndex, collisionLink.value);
}
return index;
} else {
Object value = getRawValue(index);
if (value instanceof CollisionLink) {
entryValue = (CollisionLink) getRawValue(index);
lastIndex = index;
} else {
return -1;
}
}
}
}
private int getHashIndex(Object key) {
int hash;
if (strategy != null && strategy != Equivalence.DEFAULT) {
if (strategy == Equivalence.IDENTITY_WITH_SYSTEM_HASHCODE) {
hash = System.identityHashCode(key);
} else {
hash = strategy.hashCode(key);
}
} else {
hash = key.hashCode();
}
hash = hash ^ (hash >>> 16);
return hash & (getHashTableSize() - 1);
}
@SuppressWarnings("unchecked")
@Override
public V put(K key, V value) {
if (key == null) {
throw new UnsupportedOperationException("null not supported as key!");
}
int index = find(key);
if (index != -1) {
Object oldValue = getValue(index);
setValue(index, value);
return (V) oldValue;
}
int nextEntryIndex = totalEntries;
if (entries == null) {
entries = new Object[INITIAL_CAPACITY << 1];
} else if (entries.length == nextEntryIndex << 1) {
grow();
assert entries.length > totalEntries << 1;
// Can change if grow is actually compressing.
nextEntryIndex = totalEntries;
}
setKey(nextEntryIndex, key);
setValue(nextEntryIndex, value);
totalEntries++;
if (hasHashArray()) {
// Rehash on collision if hash table is more than three quarters full.
boolean rehashOnCollision = (getHashTableSize() < (size() + (size() >> 1)));
putHashEntry(key, nextEntryIndex, rehashOnCollision);
} else if (totalEntries > getHashThreshold()) {
createHash();
}
return null;
}
/**
* Number of entries above which a hash table should be constructed.
*/
private int getHashThreshold() {
if (strategy == null || strategy == Equivalence.IDENTITY_WITH_SYSTEM_HASHCODE) {
return HASH_THRESHOLD_IDENTITY_COMPARE;
} else {
return HASH_THRESHOLD;
}
}
private void grow() {
int entriesLength = entries.length;
int newSize = (entriesLength >> 1) + Math.max(MIN_CAPACITY_INCREASE, entriesLength >> 2);
if (newSize > MAX_ELEMENT_COUNT) {
throw new UnsupportedOperationException("map grown too large!");
}
Object[] newEntries = new Object[newSize << 1];
System.arraycopy(entries, 0, newEntries, 0, entriesLength);
entries = newEntries;
if ((entriesLength < LARGE_HASH_THRESHOLD && newEntries.length >= LARGE_HASH_THRESHOLD) ||
(entriesLength < VERY_LARGE_HASH_THRESHOLD && newEntries.length > VERY_LARGE_HASH_THRESHOLD)) {
// Rehash in order to change number of bits reserved for hash indices.
createHash();
}
}
/**
* Compresses the graph if there is a large number of deleted entries and returns the translated
* new next index.
*/
private int maybeCompress(int nextIndex) {
if (entries.length != INITIAL_CAPACITY << 1 && deletedEntries >= (totalEntries >> 1) + (totalEntries >> 2)) {
return compressLarge(nextIndex);
}
return nextIndex;
}
/**
* Compresses the graph and returns the translated new next index.
*/
private int compressLarge(int nextIndex) {
int size = INITIAL_CAPACITY;
int remaining = totalEntries - deletedEntries;
while (size <= remaining) {
size += Math.max(MIN_CAPACITY_INCREASE, size >> 1);
}
Object[] newEntries = new Object[size << 1];
int z = 0;
int newNextIndex = remaining;
for (int i = 0; i < totalEntries; ++i) {
Object key = getKey(i);
if (i == nextIndex) {
newNextIndex = z;
}
if (key != null) {
newEntries[z << 1] = key;
newEntries[(z << 1) + 1] = getValue(i);
z++;
}
}
this.entries = newEntries;
totalEntries = z;
deletedEntries = 0;
if (z <= getHashThreshold()) {
this.hashArray = null;
} else {
createHash();
}
return newNextIndex;
}
private int getHashTableSize() {
if (entries.length < LARGE_HASH_THRESHOLD) {
return hashArray.length;
} else if (entries.length < VERY_LARGE_HASH_THRESHOLD) {
return hashArray.length >> 1;
} else {
return hashArray.length >> 2;
}
}
private void createHash() {
int entryCount = size();
// Calculate smallest 2^n that is greater number of entries.
int size = getHashThreshold();
while (size <= entryCount) {
size <<= 1;
}
// Give extra size to avoid collisions.
size <<= 1;
if (this.entries.length >= VERY_LARGE_HASH_THRESHOLD) {
// Every entry has 4 bytes.
size <<= 2;
} else if (this.entries.length >= LARGE_HASH_THRESHOLD) {
// Every entry has 2 bytes.
size <<= 1;
} else {
// Entries are very small => give extra size to further reduce collisions.
size <<= 1;
}
hashArray = new byte[size];
for (int i = 0; i < totalEntries; i++) {
Object entryKey = getKey(i);
if (entryKey != null) {
putHashEntry(entryKey, i, false);
}
}
}
private void putHashEntry(Object key, int entryIndex, boolean rehashOnCollision) {
int hashIndex = getHashIndex(key);
int oldIndex = getHashArray(hashIndex) - 1;
if (oldIndex != -1 && rehashOnCollision) {
this.createHash();
return;
}
setHashArray(hashIndex, entryIndex + 1);
Object value = getRawValue(entryIndex);
if (oldIndex != -1) {
assert entryIndex != oldIndex : "this cannot happend and would create an endless collision link cycle";
if (value instanceof CollisionLink) {
CollisionLink collisionLink = (CollisionLink) value;
setRawValue(entryIndex, new CollisionLink(collisionLink.value, oldIndex));
} else {
setRawValue(entryIndex, new CollisionLink(getRawValue(entryIndex), oldIndex));
}
} else {
if (value instanceof CollisionLink) {
CollisionLink collisionLink = (CollisionLink) value;
setRawValue(entryIndex, collisionLink.value);
}
}
}
@Override
public int size() {
return totalEntries - deletedEntries;
}
@Override
public boolean containsKey(K key) {
return find(key) != -1;
}
@Override
public void clear() {
entries = null;
hashArray = null;
totalEntries = deletedEntries = 0;
}
private boolean hasHashArray() {
return hashArray != null;
}
@SuppressWarnings("unchecked")
@Override
public V removeKey(K key) {
if (key == null) {
throw new UnsupportedOperationException("null not supported as key!");
}
int index;
if (hasHashArray()) {
index = this.findAndRemoveHash(key);
} else {
index = this.findLinear(key);
}
if (index != -1) {
Object value = getValue(index);
remove(index);
return (V) value;
}
return null;
}
/**
* Removes the element at the specific index and returns the index of the next element. This can
* be a different value if graph compression was triggered.
*/
private int remove(int indexToRemove) {
int index = indexToRemove;
int entriesAfterIndex = totalEntries - index - 1;
int result = index + 1;
// Without hash array, compress immediately.
if (entriesAfterIndex <= COMPRESS_IMMEDIATE_CAPACITY && !hasHashArray()) {
while (index < totalEntries - 1) {
setKey(index, getKey(index + 1));
setRawValue(index, getRawValue(index + 1));
index++;
}
result--;
}
setKey(index, null);
setRawValue(index, null);
if (index == totalEntries - 1) {
// Make sure last element is always non-null.
totalEntries--;
while (index > 0 && getKey(index - 1) == null) {
totalEntries--;
deletedEntries--;
index--;
}
} else {
deletedEntries++;
result = maybeCompress(result);
}
return result;
}
private abstract class SparseMapIterator<E> implements Iterator<E> {
protected int current;
@Override
public boolean hasNext() {
return current < totalEntries;
}
@Override
public void remove() {
if (hasHashArray()) {
EconomicMapImpl.this.findAndRemoveHash(getKey(current - 1));
}
current = EconomicMapImpl.this.remove(current - 1);
}
}
@Override
public Iterable<V> getValues() {
return new Iterable<V>() {
@Override
public Iterator<V> iterator() {
return new SparseMapIterator<V>() {
@SuppressWarnings("unchecked")
@Override
public V next() {
Object result;
while (true) {
result = getValue(current);
if (result == null && getKey(current) == null) {
// values can be null, double-check if key is also null
current++;
} else {
current++;
break;
}
}
return (V) result;
}
};
}
};
}
@Override
public Iterable<K> getKeys() {
return this;
}
@Override
public boolean isEmpty() {
return this.size() == 0;
}
@Override
public MapCursor<K, V> getEntries() {
return new MapCursor<K, V>() {
int current = -1;
@Override
public boolean advance() {
current++;
if (current >= totalEntries) {
return false;
} else {
while (EconomicMapImpl.this.getKey(current) == null) {
// Skip over null entries
current++;
}
return true;
}
}
@SuppressWarnings("unchecked")
@Override
public K getKey() {
return (K) EconomicMapImpl.this.getKey(current);
}
@SuppressWarnings("unchecked")
@Override
public V getValue() {
return (V) EconomicMapImpl.this.getValue(current);
}
@Override
public void remove() {
if (hasHashArray()) {
EconomicMapImpl.this.findAndRemoveHash(EconomicMapImpl.this.getKey(current));
}
current = EconomicMapImpl.this.remove(current) - 1;
}
};
}
@SuppressWarnings("unchecked")
@Override
public void replaceAll(BiFunction<? super K, ? super V, ? extends V> function) {
for (int i = 0; i < totalEntries; i++) {
Object entryKey = getKey(i);
if (entryKey != null) {
Object newValue = function.apply((K) entryKey, (V) getValue(i));
setValue(i, newValue);
}
}
}
private Object getKey(int index) {
return entries[index << 1];
}
private void setKey(int index, Object newValue) {
entries[index << 1] = newValue;
}
private void setValue(int index, Object newValue) {
Object oldValue = getRawValue(index);
if (oldValue instanceof CollisionLink) {
CollisionLink collisionLink = (CollisionLink) oldValue;
setRawValue(index, new CollisionLink(newValue, collisionLink.next));
} else {
setRawValue(index, newValue);
}
}
private void setRawValue(int index, Object newValue) {
entries[(index << 1) + 1] = newValue;
}
private Object getRawValue(int index) {
return entries[(index << 1) + 1];
}
private Object getValue(int index) {
Object object = getRawValue(index);
if (object instanceof CollisionLink) {
return ((CollisionLink) object).value;
}
return object;
}
private final boolean isSet;
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append(isSet ? "set(size=" : "map(size=").append(size()).append(", {");
String sep = "";
MapCursor<K, V> cursor = getEntries();
while (cursor.advance()) {
builder.append(sep);
if (isSet) {
builder.append(cursor.getKey());
} else {
builder.append("(").append(cursor.getKey()).append(",").append(cursor.getValue()).append(")");
}
sep = ",";
}
builder.append("})");
return builder.toString();
}
@Override
public Iterator<K> iterator() {
return new SparseMapIterator<K>() {
@SuppressWarnings("unchecked")
@Override
public K next() {
Object result;
while ((result = getKey(current++)) == null) {
// skip null entries
}
return (K) result;
}
};
}
@Override
public boolean contains(K element) {
return containsKey(element);
}
@SuppressWarnings("unchecked")
@Override
public boolean add(K element) {
return put(element, (V) element) == null;
}
@Override
public void remove(K element) {
removeKey(element);
}
}
| {
"pile_set_name": "Github"
} |
## `php:7.2-alpine`
```console
$ docker pull php@sha256:93bb06764c7200c073b7e97c2e095ffb6657c382a0e2b82a663cb5a496cea636
```
- Manifest MIME: `application/vnd.docker.distribution.manifest.list.v2+json`
- Platforms:
- linux; amd64
- linux; arm variant v6
- linux; arm variant v7
- linux; arm64 variant v8
- linux; 386
- linux; ppc64le
- linux; s390x
### `php:7.2-alpine` - linux; amd64
```console
$ docker pull php@sha256:be606b14a4f254d5aea88b74588cba8a5891d7cec7e85eeb2ebc325bb95c2802
```
- Docker Version: 18.09.7
- Manifest MIME: `application/vnd.docker.distribution.manifest.v2+json`
- Total Size: **31.0 MB (31036537 bytes)**
(compressed transfer size, not on-disk size)
- Image ID: `sha256:aa4372becffecbca8345127e7ea8e860b705103a83c793b2be4c66464c713926`
- Entrypoint: `["docker-php-entrypoint"]`
- Default Command: `["php","-a"]`
```dockerfile
# Fri, 29 May 2020 21:19:46 GMT
ADD file:c92c248239f8c7b9b3c067650954815f391b7bcb09023f984972c082ace2a8d0 in /
# Fri, 29 May 2020 21:19:46 GMT
CMD ["/bin/sh"]
# Thu, 11 Jun 2020 18:51:57 GMT
ENV PHPIZE_DEPS=autoconf dpkg-dev dpkg file g++ gcc libc-dev make pkgconf re2c
# Thu, 11 Jun 2020 18:51:58 GMT
RUN apk add --no-cache ca-certificates curl tar xz openssl
# Thu, 11 Jun 2020 18:51:59 GMT
RUN set -eux; addgroup -g 82 -S www-data; adduser -u 82 -D -S -G www-data www-data
# Thu, 11 Jun 2020 18:51:59 GMT
ENV PHP_INI_DIR=/usr/local/etc/php
# Thu, 11 Jun 2020 18:52:00 GMT
RUN set -eux; mkdir -p "$PHP_INI_DIR/conf.d"; [ ! -d /var/www/html ]; mkdir -p /var/www/html; chown www-data:www-data /var/www/html; chmod 777 /var/www/html
# Thu, 11 Jun 2020 18:52:00 GMT
ENV PHP_CFLAGS=-fstack-protector-strong -fpic -fpie -O2 -D_LARGEFILE_SOURCE -D_FILE_OFFSET_BITS=64
# Thu, 11 Jun 2020 18:52:00 GMT
ENV PHP_CPPFLAGS=-fstack-protector-strong -fpic -fpie -O2 -D_LARGEFILE_SOURCE -D_FILE_OFFSET_BITS=64
# Thu, 11 Jun 2020 18:52:00 GMT
ENV PHP_LDFLAGS=-Wl,-O1 -pie
# Thu, 11 Jun 2020 21:44:07 GMT
ENV GPG_KEYS=1729F83938DA44E27BA0F4D3DBDB397470D12172 B1B44D8F021E4E2D6021E995DC9FF8D3EE5AF27F
# Thu, 06 Aug 2020 23:25:02 GMT
ENV PHP_VERSION=7.2.33
# Thu, 06 Aug 2020 23:25:03 GMT
ENV PHP_URL=https://www.php.net/distributions/php-7.2.33.tar.xz PHP_ASC_URL=https://www.php.net/distributions/php-7.2.33.tar.xz.asc
# Thu, 06 Aug 2020 23:25:03 GMT
ENV PHP_SHA256=0f160a3483ffce36be5962fab7bcf09d605ee66c5707df83e4195cb796bbb03a PHP_MD5=
# Thu, 06 Aug 2020 23:25:07 GMT
RUN set -eux; apk add --no-cache --virtual .fetch-deps gnupg; mkdir -p /usr/src; cd /usr/src; curl -fsSL -o php.tar.xz "$PHP_URL"; if [ -n "$PHP_SHA256" ]; then echo "$PHP_SHA256 *php.tar.xz" | sha256sum -c -; fi; if [ -n "$PHP_MD5" ]; then echo "$PHP_MD5 *php.tar.xz" | md5sum -c -; fi; if [ -n "$PHP_ASC_URL" ]; then curl -fsSL -o php.tar.xz.asc "$PHP_ASC_URL"; export GNUPGHOME="$(mktemp -d)"; for key in $GPG_KEYS; do gpg --batch --keyserver ha.pool.sks-keyservers.net --recv-keys "$key"; done; gpg --batch --verify php.tar.xz.asc php.tar.xz; gpgconf --kill all; rm -rf "$GNUPGHOME"; fi; apk del --no-network .fetch-deps
# Thu, 06 Aug 2020 23:25:07 GMT
COPY file:ce57c04b70896f77cc11eb2766417d8a1240fcffe5bba92179ec78c458844110 in /usr/local/bin/
# Tue, 01 Sep 2020 06:31:54 GMT
RUN set -eux; apk add --no-cache --virtual .build-deps $PHPIZE_DEPS argon2-dev coreutils curl-dev libedit-dev libsodium-dev libxml2-dev openssl-dev sqlite-dev ; export CFLAGS="$PHP_CFLAGS" CPPFLAGS="$PHP_CPPFLAGS" LDFLAGS="$PHP_LDFLAGS" ; docker-php-source extract; cd /usr/src/php; gnuArch="$(dpkg-architecture --query DEB_BUILD_GNU_TYPE)"; ./configure --build="$gnuArch" --with-config-file-path="$PHP_INI_DIR" --with-config-file-scan-dir="$PHP_INI_DIR/conf.d" --enable-option-checking=fatal --with-mhash --enable-ftp --enable-mbstring --enable-mysqlnd --with-password-argon2 --with-sodium=shared --with-pdo-sqlite=/usr --with-sqlite3=/usr --with-curl --with-libedit --with-openssl --with-zlib $(test "$gnuArch" = 's390x-linux-musl' && echo '--without-pcre-jit') ${PHP_EXTRA_CONFIGURE_ARGS:-} ; make -j "$(nproc)"; find -type f -name '*.a' -delete; make install; find /usr/local/bin /usr/local/sbin -type f -perm +0111 -exec strip --strip-all '{}' + || true; make clean; cp -v php.ini-* "$PHP_INI_DIR/"; cd /; docker-php-source delete; runDeps="$( scanelf --needed --nobanner --format '%n#p' --recursive /usr/local | tr ',' '\n' | sort -u | awk 'system("[ -e /usr/local/lib/" $1 " ]") == 0 { next } { print "so:" $1 }' )"; apk add --no-cache $runDeps; apk del --no-network .build-deps; pecl update-channels; rm -rf /tmp/pear ~/.pearrc; php --version
# Tue, 01 Sep 2020 06:31:56 GMT
COPY multi:cfe027e655535d9b3eb4b44f84eafb2e1d257620ca628247fe5c1c4fb008a78a in /usr/local/bin/
# Tue, 01 Sep 2020 06:31:57 GMT
RUN docker-php-ext-enable sodium
# Tue, 01 Sep 2020 06:31:58 GMT
ENTRYPOINT ["docker-php-entrypoint"]
# Tue, 01 Sep 2020 06:31:58 GMT
CMD ["php" "-a"]
```
- Layers:
- `sha256:df20fa9351a15782c64e6dddb2d4a6f50bf6d3688060a34c4014b0d9a752eb4c`
Last Modified: Fri, 29 May 2020 21:20:06 GMT
Size: 2.8 MB (2797541 bytes)
MIME: application/vnd.docker.image.rootfs.diff.tar.gzip
- `sha256:b358d6dbbdff5c10cbe23c608b7ff9c6d1dd13331dd7dc7644b727ca5ea8e742`
Last Modified: Thu, 11 Jun 2020 22:14:55 GMT
Size: 1.3 MB (1340817 bytes)
MIME: application/vnd.docker.image.rootfs.diff.tar.gzip
- `sha256:0232d962484c6b9a1caa33a12f1beed4cb20996085056b4fb1591a9fd1d8c89f`
Last Modified: Thu, 11 Jun 2020 22:14:54 GMT
Size: 1.2 KB (1231 bytes)
MIME: application/vnd.docker.image.rootfs.diff.tar.gzip
- `sha256:0c1d3ac04d2af7a9f0eee25eccc72e586400051054ffb4aff7cdf2f4ebc993e8`
Last Modified: Thu, 11 Jun 2020 22:14:54 GMT
Size: 222.0 B
MIME: application/vnd.docker.image.rootfs.diff.tar.gzip
- `sha256:a0d194555a5a8e99a5d5bc56b049875ab716850d9ba675cdc60f7d1de567443c`
Last Modified: Fri, 07 Aug 2020 00:31:50 GMT
Size: 12.3 MB (12330058 bytes)
MIME: application/vnd.docker.image.rootfs.diff.tar.gzip
- `sha256:6d8e4bd43a5c5e775e94805c2c81663e264e8862086ddcab78da3380fe483d93`
Last Modified: Fri, 07 Aug 2020 00:31:50 GMT
Size: 497.0 B
MIME: application/vnd.docker.image.rootfs.diff.tar.gzip
- `sha256:5884f2708ebe2b2eb1a25869280a460f494ad8fd15ce9ea5169cb0cfe313f7eb`
Last Modified: Tue, 01 Sep 2020 07:26:53 GMT
Size: 14.5 MB (14547106 bytes)
MIME: application/vnd.docker.image.rootfs.diff.tar.gzip
- `sha256:ba3b2e4d48b7c17d3ea21373b1ea52b768a6faac852b3ccc377319bf5123b81f`
Last Modified: Tue, 01 Sep 2020 07:26:47 GMT
Size: 2.3 KB (2275 bytes)
MIME: application/vnd.docker.image.rootfs.diff.tar.gzip
- `sha256:a374ce27ec33226aef476b8a45ac6224064ceaa668044c43d11a748506bd0ff9`
Last Modified: Tue, 01 Sep 2020 07:26:47 GMT
Size: 16.8 KB (16790 bytes)
MIME: application/vnd.docker.image.rootfs.diff.tar.gzip
### `php:7.2-alpine` - linux; arm variant v6
```console
$ docker pull php@sha256:970ea72760a1d5cbe87bc181ea322338724637f04c1370e7b4ecd728da4f5166
```
- Docker Version: 19.03.12
- Manifest MIME: `application/vnd.docker.distribution.manifest.v2+json`
- Total Size: **29.8 MB (29809096 bytes)**
(compressed transfer size, not on-disk size)
- Image ID: `sha256:d2513b6ee78e5bbadde86dccd0ee8e4dbdec00f4be63302a7fadd4c86d770dbe`
- Entrypoint: `["docker-php-entrypoint"]`
- Default Command: `["php","-a"]`
```dockerfile
# Fri, 29 May 2020 21:50:55 GMT
ADD file:f46e997a56849423db17e5fc9f0249ab6c73b155245927dba5fcb9dfd65f622f in /
# Fri, 29 May 2020 21:50:56 GMT
CMD ["/bin/sh"]
# Thu, 11 Jun 2020 18:14:47 GMT
ENV PHPIZE_DEPS=autoconf dpkg-dev dpkg file g++ gcc libc-dev make pkgconf re2c
# Thu, 11 Jun 2020 18:14:50 GMT
RUN apk add --no-cache ca-certificates curl tar xz openssl
# Thu, 11 Jun 2020 18:14:52 GMT
RUN set -eux; addgroup -g 82 -S www-data; adduser -u 82 -D -S -G www-data www-data
# Thu, 11 Jun 2020 18:14:53 GMT
ENV PHP_INI_DIR=/usr/local/etc/php
# Thu, 11 Jun 2020 18:14:55 GMT
RUN set -eux; mkdir -p "$PHP_INI_DIR/conf.d"; [ ! -d /var/www/html ]; mkdir -p /var/www/html; chown www-data:www-data /var/www/html; chmod 777 /var/www/html
# Thu, 11 Jun 2020 18:14:55 GMT
ENV PHP_CFLAGS=-fstack-protector-strong -fpic -fpie -O2 -D_LARGEFILE_SOURCE -D_FILE_OFFSET_BITS=64
# Thu, 11 Jun 2020 18:14:57 GMT
ENV PHP_CPPFLAGS=-fstack-protector-strong -fpic -fpie -O2 -D_LARGEFILE_SOURCE -D_FILE_OFFSET_BITS=64
# Thu, 11 Jun 2020 18:14:57 GMT
ENV PHP_LDFLAGS=-Wl,-O1 -pie
# Thu, 11 Jun 2020 19:13:05 GMT
ENV GPG_KEYS=1729F83938DA44E27BA0F4D3DBDB397470D12172 B1B44D8F021E4E2D6021E995DC9FF8D3EE5AF27F
# Thu, 06 Aug 2020 20:11:27 GMT
ENV PHP_VERSION=7.2.33
# Thu, 06 Aug 2020 20:11:38 GMT
ENV PHP_URL=https://www.php.net/distributions/php-7.2.33.tar.xz PHP_ASC_URL=https://www.php.net/distributions/php-7.2.33.tar.xz.asc
# Thu, 06 Aug 2020 20:11:45 GMT
ENV PHP_SHA256=0f160a3483ffce36be5962fab7bcf09d605ee66c5707df83e4195cb796bbb03a PHP_MD5=
# Thu, 06 Aug 2020 20:12:19 GMT
RUN set -eux; apk add --no-cache --virtual .fetch-deps gnupg; mkdir -p /usr/src; cd /usr/src; curl -fsSL -o php.tar.xz "$PHP_URL"; if [ -n "$PHP_SHA256" ]; then echo "$PHP_SHA256 *php.tar.xz" | sha256sum -c -; fi; if [ -n "$PHP_MD5" ]; then echo "$PHP_MD5 *php.tar.xz" | md5sum -c -; fi; if [ -n "$PHP_ASC_URL" ]; then curl -fsSL -o php.tar.xz.asc "$PHP_ASC_URL"; export GNUPGHOME="$(mktemp -d)"; for key in $GPG_KEYS; do gpg --batch --keyserver ha.pool.sks-keyservers.net --recv-keys "$key"; done; gpg --batch --verify php.tar.xz.asc php.tar.xz; gpgconf --kill all; rm -rf "$GNUPGHOME"; fi; apk del --no-network .fetch-deps
# Thu, 06 Aug 2020 20:12:34 GMT
COPY file:ce57c04b70896f77cc11eb2766417d8a1240fcffe5bba92179ec78c458844110 in /usr/local/bin/
# Tue, 01 Sep 2020 02:40:35 GMT
RUN set -eux; apk add --no-cache --virtual .build-deps $PHPIZE_DEPS argon2-dev coreutils curl-dev libedit-dev libsodium-dev libxml2-dev openssl-dev sqlite-dev ; export CFLAGS="$PHP_CFLAGS" CPPFLAGS="$PHP_CPPFLAGS" LDFLAGS="$PHP_LDFLAGS" ; docker-php-source extract; cd /usr/src/php; gnuArch="$(dpkg-architecture --query DEB_BUILD_GNU_TYPE)"; ./configure --build="$gnuArch" --with-config-file-path="$PHP_INI_DIR" --with-config-file-scan-dir="$PHP_INI_DIR/conf.d" --enable-option-checking=fatal --with-mhash --enable-ftp --enable-mbstring --enable-mysqlnd --with-password-argon2 --with-sodium=shared --with-pdo-sqlite=/usr --with-sqlite3=/usr --with-curl --with-libedit --with-openssl --with-zlib $(test "$gnuArch" = 's390x-linux-musl' && echo '--without-pcre-jit') ${PHP_EXTRA_CONFIGURE_ARGS:-} ; make -j "$(nproc)"; find -type f -name '*.a' -delete; make install; find /usr/local/bin /usr/local/sbin -type f -perm +0111 -exec strip --strip-all '{}' + || true; make clean; cp -v php.ini-* "$PHP_INI_DIR/"; cd /; docker-php-source delete; runDeps="$( scanelf --needed --nobanner --format '%n#p' --recursive /usr/local | tr ',' '\n' | sort -u | awk 'system("[ -e /usr/local/lib/" $1 " ]") == 0 { next } { print "so:" $1 }' )"; apk add --no-cache $runDeps; apk del --no-network .build-deps; pecl update-channels; rm -rf /tmp/pear ~/.pearrc; php --version
# Tue, 01 Sep 2020 02:41:47 GMT
COPY multi:cfe027e655535d9b3eb4b44f84eafb2e1d257620ca628247fe5c1c4fb008a78a in /usr/local/bin/
# Tue, 01 Sep 2020 02:43:22 GMT
RUN docker-php-ext-enable sodium
# Tue, 01 Sep 2020 02:43:37 GMT
ENTRYPOINT ["docker-php-entrypoint"]
# Tue, 01 Sep 2020 02:43:58 GMT
CMD ["php" "-a"]
```
- Layers:
- `sha256:b4b72e716706d29f5d2351709c20bf737b94f876a5472a43ff1b6e203c65d27f`
Last Modified: Fri, 29 May 2020 21:51:30 GMT
Size: 2.6 MB (2603286 bytes)
MIME: application/vnd.docker.image.rootfs.diff.tar.gzip
- `sha256:72788d65e292354c2ce4b67441d334d0ba534db5ce71d5b8bf78011a256b566f`
Last Modified: Thu, 11 Jun 2020 19:29:37 GMT
Size: 1.3 MB (1310273 bytes)
MIME: application/vnd.docker.image.rootfs.diff.tar.gzip
- `sha256:10e32d62de43da999aca07cf15fdbe1cd6b03aebc88c15409c3463daadf13e01`
Last Modified: Thu, 11 Jun 2020 19:29:36 GMT
Size: 1.3 KB (1261 bytes)
MIME: application/vnd.docker.image.rootfs.diff.tar.gzip
- `sha256:6701e05673b745147608cd1b6eb47a3fbc35bcfd8a65bc536e5b9f919b3d0e77`
Last Modified: Thu, 11 Jun 2020 19:29:36 GMT
Size: 268.0 B
MIME: application/vnd.docker.image.rootfs.diff.tar.gzip
- `sha256:661d8e725765ca0dd01087a0d6f5351273e07130d7836eb2caf64ebf9a1ed0f6`
Last Modified: Thu, 06 Aug 2020 21:10:24 GMT
Size: 12.3 MB (12330084 bytes)
MIME: application/vnd.docker.image.rootfs.diff.tar.gzip
- `sha256:493b922b46d7536b7f21e0887ed2d8d3c8a4af1075a5eecda3c282fe99064f26`
Last Modified: Thu, 06 Aug 2020 21:10:19 GMT
Size: 502.0 B
MIME: application/vnd.docker.image.rootfs.diff.tar.gzip
- `sha256:51c9b6362c27f864095f3008bc587117541d3cf02796de32ea7c9bd9d5168472`
Last Modified: Tue, 01 Sep 2020 03:35:30 GMT
Size: 13.5 MB (13544358 bytes)
MIME: application/vnd.docker.image.rootfs.diff.tar.gzip
- `sha256:8574d1bc526753663a3d54b19403b32caf3b62c915b34b04ca57624d386368c8`
Last Modified: Tue, 01 Sep 2020 03:35:27 GMT
Size: 2.3 KB (2276 bytes)
MIME: application/vnd.docker.image.rootfs.diff.tar.gzip
- `sha256:f31439f16d0e1aa117df64a424659e36325fb78318c1ec855bddd14faf7e7690`
Last Modified: Tue, 01 Sep 2020 03:35:27 GMT
Size: 16.8 KB (16788 bytes)
MIME: application/vnd.docker.image.rootfs.diff.tar.gzip
### `php:7.2-alpine` - linux; arm variant v7
```console
$ docker pull php@sha256:d7757d5e0461776bb61e1bb27cda602e24c4b4fd812143c02359a4b8a6b50181
```
- Docker Version: 19.03.12
- Manifest MIME: `application/vnd.docker.distribution.manifest.v2+json`
- Total Size: **28.6 MB (28640630 bytes)**
(compressed transfer size, not on-disk size)
- Image ID: `sha256:fd87081718ccf3707614db867e2ffd5d5b67d30d23a11abfa5139b71e50fef19`
- Entrypoint: `["docker-php-entrypoint"]`
- Default Command: `["php","-a"]`
```dockerfile
# Fri, 29 May 2020 21:02:07 GMT
ADD file:e97bf0d217846312b19a9f7264604851aedd125c23b4d291eed4c69b880dce26 in /
# Fri, 29 May 2020 21:02:08 GMT
CMD ["/bin/sh"]
# Thu, 11 Jun 2020 18:34:59 GMT
ENV PHPIZE_DEPS=autoconf dpkg-dev dpkg file g++ gcc libc-dev make pkgconf re2c
# Thu, 11 Jun 2020 18:35:04 GMT
RUN apk add --no-cache ca-certificates curl tar xz openssl
# Thu, 11 Jun 2020 18:35:07 GMT
RUN set -eux; addgroup -g 82 -S www-data; adduser -u 82 -D -S -G www-data www-data
# Thu, 11 Jun 2020 18:35:08 GMT
ENV PHP_INI_DIR=/usr/local/etc/php
# Thu, 11 Jun 2020 18:35:12 GMT
RUN set -eux; mkdir -p "$PHP_INI_DIR/conf.d"; [ ! -d /var/www/html ]; mkdir -p /var/www/html; chown www-data:www-data /var/www/html; chmod 777 /var/www/html
# Thu, 11 Jun 2020 18:35:13 GMT
ENV PHP_CFLAGS=-fstack-protector-strong -fpic -fpie -O2 -D_LARGEFILE_SOURCE -D_FILE_OFFSET_BITS=64
# Thu, 11 Jun 2020 18:35:14 GMT
ENV PHP_CPPFLAGS=-fstack-protector-strong -fpic -fpie -O2 -D_LARGEFILE_SOURCE -D_FILE_OFFSET_BITS=64
# Thu, 11 Jun 2020 18:35:15 GMT
ENV PHP_LDFLAGS=-Wl,-O1 -pie
# Thu, 11 Jun 2020 19:56:42 GMT
ENV GPG_KEYS=1729F83938DA44E27BA0F4D3DBDB397470D12172 B1B44D8F021E4E2D6021E995DC9FF8D3EE5AF27F
# Fri, 07 Aug 2020 00:08:05 GMT
ENV PHP_VERSION=7.2.33
# Fri, 07 Aug 2020 00:08:21 GMT
ENV PHP_URL=https://www.php.net/distributions/php-7.2.33.tar.xz PHP_ASC_URL=https://www.php.net/distributions/php-7.2.33.tar.xz.asc
# Fri, 07 Aug 2020 00:08:35 GMT
ENV PHP_SHA256=0f160a3483ffce36be5962fab7bcf09d605ee66c5707df83e4195cb796bbb03a PHP_MD5=
# Fri, 07 Aug 2020 00:09:33 GMT
RUN set -eux; apk add --no-cache --virtual .fetch-deps gnupg; mkdir -p /usr/src; cd /usr/src; curl -fsSL -o php.tar.xz "$PHP_URL"; if [ -n "$PHP_SHA256" ]; then echo "$PHP_SHA256 *php.tar.xz" | sha256sum -c -; fi; if [ -n "$PHP_MD5" ]; then echo "$PHP_MD5 *php.tar.xz" | md5sum -c -; fi; if [ -n "$PHP_ASC_URL" ]; then curl -fsSL -o php.tar.xz.asc "$PHP_ASC_URL"; export GNUPGHOME="$(mktemp -d)"; for key in $GPG_KEYS; do gpg --batch --keyserver ha.pool.sks-keyservers.net --recv-keys "$key"; done; gpg --batch --verify php.tar.xz.asc php.tar.xz; gpgconf --kill all; rm -rf "$GNUPGHOME"; fi; apk del --no-network .fetch-deps
# Fri, 07 Aug 2020 00:09:55 GMT
COPY file:ce57c04b70896f77cc11eb2766417d8a1240fcffe5bba92179ec78c458844110 in /usr/local/bin/
# Tue, 01 Sep 2020 10:23:04 GMT
RUN set -eux; apk add --no-cache --virtual .build-deps $PHPIZE_DEPS argon2-dev coreutils curl-dev libedit-dev libsodium-dev libxml2-dev openssl-dev sqlite-dev ; export CFLAGS="$PHP_CFLAGS" CPPFLAGS="$PHP_CPPFLAGS" LDFLAGS="$PHP_LDFLAGS" ; docker-php-source extract; cd /usr/src/php; gnuArch="$(dpkg-architecture --query DEB_BUILD_GNU_TYPE)"; ./configure --build="$gnuArch" --with-config-file-path="$PHP_INI_DIR" --with-config-file-scan-dir="$PHP_INI_DIR/conf.d" --enable-option-checking=fatal --with-mhash --enable-ftp --enable-mbstring --enable-mysqlnd --with-password-argon2 --with-sodium=shared --with-pdo-sqlite=/usr --with-sqlite3=/usr --with-curl --with-libedit --with-openssl --with-zlib $(test "$gnuArch" = 's390x-linux-musl' && echo '--without-pcre-jit') ${PHP_EXTRA_CONFIGURE_ARGS:-} ; make -j "$(nproc)"; find -type f -name '*.a' -delete; make install; find /usr/local/bin /usr/local/sbin -type f -perm +0111 -exec strip --strip-all '{}' + || true; make clean; cp -v php.ini-* "$PHP_INI_DIR/"; cd /; docker-php-source delete; runDeps="$( scanelf --needed --nobanner --format '%n#p' --recursive /usr/local | tr ',' '\n' | sort -u | awk 'system("[ -e /usr/local/lib/" $1 " ]") == 0 { next } { print "so:" $1 }' )"; apk add --no-cache $runDeps; apk del --no-network .build-deps; pecl update-channels; rm -rf /tmp/pear ~/.pearrc; php --version
# Tue, 01 Sep 2020 10:23:06 GMT
COPY multi:cfe027e655535d9b3eb4b44f84eafb2e1d257620ca628247fe5c1c4fb008a78a in /usr/local/bin/
# Tue, 01 Sep 2020 10:23:09 GMT
RUN docker-php-ext-enable sodium
# Tue, 01 Sep 2020 10:23:10 GMT
ENTRYPOINT ["docker-php-entrypoint"]
# Tue, 01 Sep 2020 10:23:11 GMT
CMD ["php" "-a"]
```
- Layers:
- `sha256:52278dd8e57993669c5b72a9620e89bebdc098f2af2379caaa8945f7403f77a2`
Last Modified: Fri, 29 May 2020 21:02:38 GMT
Size: 2.4 MB (2406763 bytes)
MIME: application/vnd.docker.image.rootfs.diff.tar.gzip
- `sha256:9d5b4230297ea0028dae43acf7e134a55e11ebc15c53a61a2c0c72935dd0ed35`
Last Modified: Thu, 11 Jun 2020 20:13:04 GMT
Size: 1.2 MB (1214376 bytes)
MIME: application/vnd.docker.image.rootfs.diff.tar.gzip
- `sha256:779feafcddcba2692e3d187a609b90060e7deea16e8b629aada3a236e99c40f7`
Last Modified: Thu, 11 Jun 2020 20:13:04 GMT
Size: 1.3 KB (1258 bytes)
MIME: application/vnd.docker.image.rootfs.diff.tar.gzip
- `sha256:f23b20c0367792ccffce00af872fa2ea0300330509fb8c15fe5c08905d7db739`
Last Modified: Thu, 11 Jun 2020 20:13:03 GMT
Size: 266.0 B
MIME: application/vnd.docker.image.rootfs.diff.tar.gzip
- `sha256:dc56b0163ddf3f9a79a8d9438db9fcc3b9ecea500b2d0515c1e1270acde37569`
Last Modified: Fri, 07 Aug 2020 01:14:11 GMT
Size: 12.3 MB (12330079 bytes)
MIME: application/vnd.docker.image.rootfs.diff.tar.gzip
- `sha256:f17f1fcd3ffde8b4c285cdf4f54a76e211868486ce057def40e3d98387492c8a`
Last Modified: Fri, 07 Aug 2020 01:14:10 GMT
Size: 500.0 B
MIME: application/vnd.docker.image.rootfs.diff.tar.gzip
- `sha256:963a7c3f7c20bfb12c641af1d8406ab81b9c67e421ec63cb088859af99cc62c0`
Last Modified: Tue, 01 Sep 2020 10:50:49 GMT
Size: 12.7 MB (12668338 bytes)
MIME: application/vnd.docker.image.rootfs.diff.tar.gzip
- `sha256:2abaab8a489aed67735278b276c83db4171612dd2aaec95deda69b972374e09b`
Last Modified: Tue, 01 Sep 2020 10:50:45 GMT
Size: 2.3 KB (2275 bytes)
MIME: application/vnd.docker.image.rootfs.diff.tar.gzip
- `sha256:aae6cb52bc213c697da365e51df8efea21e88ec09418bb9dff1a2906c3a2ddcd`
Last Modified: Tue, 01 Sep 2020 10:50:45 GMT
Size: 16.8 KB (16775 bytes)
MIME: application/vnd.docker.image.rootfs.diff.tar.gzip
### `php:7.2-alpine` - linux; arm64 variant v8
```console
$ docker pull php@sha256:0a59fe404ba1a79035748eff4ebd86c74d71b24eb249d5a30bb228401379f306
```
- Docker Version: 18.09.7
- Manifest MIME: `application/vnd.docker.distribution.manifest.v2+json`
- Total Size: **30.7 MB (30709763 bytes)**
(compressed transfer size, not on-disk size)
- Image ID: `sha256:564ec95f19ee7b8371bd298d4970612378cea20b05fccc88f41b9acb854e6f52`
- Entrypoint: `["docker-php-entrypoint"]`
- Default Command: `["php","-a"]`
```dockerfile
# Fri, 29 May 2020 21:43:19 GMT
ADD file:7574aee4e37a85460ab889212d52912723a9b30dda1c060548f0deb4a05fc398 in /
# Fri, 29 May 2020 21:43:20 GMT
CMD ["/bin/sh"]
# Thu, 11 Jun 2020 18:40:23 GMT
ENV PHPIZE_DEPS=autoconf dpkg-dev dpkg file g++ gcc libc-dev make pkgconf re2c
# Thu, 11 Jun 2020 18:40:27 GMT
RUN apk add --no-cache ca-certificates curl tar xz openssl
# Thu, 11 Jun 2020 18:40:31 GMT
RUN set -eux; addgroup -g 82 -S www-data; adduser -u 82 -D -S -G www-data www-data
# Thu, 11 Jun 2020 18:40:32 GMT
ENV PHP_INI_DIR=/usr/local/etc/php
# Thu, 11 Jun 2020 18:40:34 GMT
RUN set -eux; mkdir -p "$PHP_INI_DIR/conf.d"; [ ! -d /var/www/html ]; mkdir -p /var/www/html; chown www-data:www-data /var/www/html; chmod 777 /var/www/html
# Thu, 11 Jun 2020 18:40:35 GMT
ENV PHP_CFLAGS=-fstack-protector-strong -fpic -fpie -O2 -D_LARGEFILE_SOURCE -D_FILE_OFFSET_BITS=64
# Thu, 11 Jun 2020 18:40:35 GMT
ENV PHP_CPPFLAGS=-fstack-protector-strong -fpic -fpie -O2 -D_LARGEFILE_SOURCE -D_FILE_OFFSET_BITS=64
# Thu, 11 Jun 2020 18:40:36 GMT
ENV PHP_LDFLAGS=-Wl,-O1 -pie
# Thu, 11 Jun 2020 20:13:51 GMT
ENV GPG_KEYS=1729F83938DA44E27BA0F4D3DBDB397470D12172 B1B44D8F021E4E2D6021E995DC9FF8D3EE5AF27F
# Thu, 06 Aug 2020 22:17:05 GMT
ENV PHP_VERSION=7.2.33
# Thu, 06 Aug 2020 22:17:16 GMT
ENV PHP_URL=https://www.php.net/distributions/php-7.2.33.tar.xz PHP_ASC_URL=https://www.php.net/distributions/php-7.2.33.tar.xz.asc
# Thu, 06 Aug 2020 22:17:26 GMT
ENV PHP_SHA256=0f160a3483ffce36be5962fab7bcf09d605ee66c5707df83e4195cb796bbb03a PHP_MD5=
# Thu, 06 Aug 2020 22:18:00 GMT
RUN set -eux; apk add --no-cache --virtual .fetch-deps gnupg; mkdir -p /usr/src; cd /usr/src; curl -fsSL -o php.tar.xz "$PHP_URL"; if [ -n "$PHP_SHA256" ]; then echo "$PHP_SHA256 *php.tar.xz" | sha256sum -c -; fi; if [ -n "$PHP_MD5" ]; then echo "$PHP_MD5 *php.tar.xz" | md5sum -c -; fi; if [ -n "$PHP_ASC_URL" ]; then curl -fsSL -o php.tar.xz.asc "$PHP_ASC_URL"; export GNUPGHOME="$(mktemp -d)"; for key in $GPG_KEYS; do gpg --batch --keyserver ha.pool.sks-keyservers.net --recv-keys "$key"; done; gpg --batch --verify php.tar.xz.asc php.tar.xz; gpgconf --kill all; rm -rf "$GNUPGHOME"; fi; apk del --no-network .fetch-deps
# Thu, 06 Aug 2020 22:18:01 GMT
COPY file:ce57c04b70896f77cc11eb2766417d8a1240fcffe5bba92179ec78c458844110 in /usr/local/bin/
# Tue, 01 Sep 2020 06:39:06 GMT
RUN set -eux; apk add --no-cache --virtual .build-deps $PHPIZE_DEPS argon2-dev coreutils curl-dev libedit-dev libsodium-dev libxml2-dev openssl-dev sqlite-dev ; export CFLAGS="$PHP_CFLAGS" CPPFLAGS="$PHP_CPPFLAGS" LDFLAGS="$PHP_LDFLAGS" ; docker-php-source extract; cd /usr/src/php; gnuArch="$(dpkg-architecture --query DEB_BUILD_GNU_TYPE)"; ./configure --build="$gnuArch" --with-config-file-path="$PHP_INI_DIR" --with-config-file-scan-dir="$PHP_INI_DIR/conf.d" --enable-option-checking=fatal --with-mhash --enable-ftp --enable-mbstring --enable-mysqlnd --with-password-argon2 --with-sodium=shared --with-pdo-sqlite=/usr --with-sqlite3=/usr --with-curl --with-libedit --with-openssl --with-zlib $(test "$gnuArch" = 's390x-linux-musl' && echo '--without-pcre-jit') ${PHP_EXTRA_CONFIGURE_ARGS:-} ; make -j "$(nproc)"; find -type f -name '*.a' -delete; make install; find /usr/local/bin /usr/local/sbin -type f -perm +0111 -exec strip --strip-all '{}' + || true; make clean; cp -v php.ini-* "$PHP_INI_DIR/"; cd /; docker-php-source delete; runDeps="$( scanelf --needed --nobanner --format '%n#p' --recursive /usr/local | tr ',' '\n' | sort -u | awk 'system("[ -e /usr/local/lib/" $1 " ]") == 0 { next } { print "so:" $1 }' )"; apk add --no-cache $runDeps; apk del --no-network .build-deps; pecl update-channels; rm -rf /tmp/pear ~/.pearrc; php --version
# Tue, 01 Sep 2020 06:39:08 GMT
COPY multi:cfe027e655535d9b3eb4b44f84eafb2e1d257620ca628247fe5c1c4fb008a78a in /usr/local/bin/
# Tue, 01 Sep 2020 06:39:11 GMT
RUN docker-php-ext-enable sodium
# Tue, 01 Sep 2020 06:39:12 GMT
ENTRYPOINT ["docker-php-entrypoint"]
# Tue, 01 Sep 2020 06:39:13 GMT
CMD ["php" "-a"]
```
- Layers:
- `sha256:b538f80385f9b48122e3da068c932a96ea5018afa3c7be79da00437414bd18cd`
Last Modified: Fri, 29 May 2020 21:43:57 GMT
Size: 2.7 MB (2707964 bytes)
MIME: application/vnd.docker.image.rootfs.diff.tar.gzip
- `sha256:dee09877319de284b8141c396485111447d75eb8cf26c68819f92f85a4de5649`
Last Modified: Thu, 11 Jun 2020 20:30:06 GMT
Size: 1.3 MB (1342990 bytes)
MIME: application/vnd.docker.image.rootfs.diff.tar.gzip
- `sha256:7bad472a11fde1c83808bc84cc9c77da0743b9974bb8c51ab25ed0608cbb4558`
Last Modified: Thu, 11 Jun 2020 20:30:06 GMT
Size: 1.3 KB (1258 bytes)
MIME: application/vnd.docker.image.rootfs.diff.tar.gzip
- `sha256:72fca52dcdf28b691bbc24b2e2aff931667b865a9188287ef18af016712d3a0f`
Last Modified: Thu, 11 Jun 2020 20:30:05 GMT
Size: 268.0 B
MIME: application/vnd.docker.image.rootfs.diff.tar.gzip
- `sha256:27b4ff5f5658c9b1bb9868deefaeb4a75b94411a391b3492f70ed90da873d869`
Last Modified: Thu, 06 Aug 2020 23:39:49 GMT
Size: 12.3 MB (12330098 bytes)
MIME: application/vnd.docker.image.rootfs.diff.tar.gzip
- `sha256:6c0f130b42e25fec8ebdfc9fab550deacb9504fed60f4d22d4d4b4ea3f3fbd59`
Last Modified: Thu, 06 Aug 2020 23:39:49 GMT
Size: 498.0 B
MIME: application/vnd.docker.image.rootfs.diff.tar.gzip
- `sha256:22a2a363ba2d9bd749199cf1875154681725a925bbe05548a07270350fb88671`
Last Modified: Tue, 01 Sep 2020 07:14:22 GMT
Size: 14.3 MB (14307612 bytes)
MIME: application/vnd.docker.image.rootfs.diff.tar.gzip
- `sha256:31c200fcb4b5f1e577e2926373b788b6178f2459cef9f895806382b0a16e4059`
Last Modified: Tue, 01 Sep 2020 07:14:18 GMT
Size: 2.3 KB (2273 bytes)
MIME: application/vnd.docker.image.rootfs.diff.tar.gzip
- `sha256:f3f6daa271b536afa8b5bc393a658c7429dde8cfc9dfe92a7b94cbdcde24565a`
Last Modified: Tue, 01 Sep 2020 07:14:18 GMT
Size: 16.8 KB (16802 bytes)
MIME: application/vnd.docker.image.rootfs.diff.tar.gzip
### `php:7.2-alpine` - linux; 386
```console
$ docker pull php@sha256:6567683e01542ab4be0e1ceca3c008d7c9104a670c035ca09f5764c8e89dd048
```
- Docker Version: 19.03.12
- Manifest MIME: `application/vnd.docker.distribution.manifest.v2+json`
- Total Size: **31.5 MB (31520504 bytes)**
(compressed transfer size, not on-disk size)
- Image ID: `sha256:f24b4056b3d3d4c610c18bb1ee23dfdabce35ac0a9a04fc1bb1530c257a7d609`
- Entrypoint: `["docker-php-entrypoint"]`
- Default Command: `["php","-a"]`
```dockerfile
# Fri, 29 May 2020 21:38:33 GMT
ADD file:5624441d97aca5eeb82a582941efc3586397098b8391227a9040ebe434cc1d6b in /
# Fri, 29 May 2020 21:38:33 GMT
CMD ["/bin/sh"]
# Thu, 11 Jun 2020 18:53:02 GMT
ENV PHPIZE_DEPS=autoconf dpkg-dev dpkg file g++ gcc libc-dev make pkgconf re2c
# Thu, 11 Jun 2020 18:53:04 GMT
RUN apk add --no-cache ca-certificates curl tar xz openssl
# Thu, 11 Jun 2020 18:53:06 GMT
RUN set -eux; addgroup -g 82 -S www-data; adduser -u 82 -D -S -G www-data www-data
# Thu, 11 Jun 2020 18:53:06 GMT
ENV PHP_INI_DIR=/usr/local/etc/php
# Thu, 11 Jun 2020 18:53:08 GMT
RUN set -eux; mkdir -p "$PHP_INI_DIR/conf.d"; [ ! -d /var/www/html ]; mkdir -p /var/www/html; chown www-data:www-data /var/www/html; chmod 777 /var/www/html
# Thu, 11 Jun 2020 18:53:08 GMT
ENV PHP_CFLAGS=-fstack-protector-strong -fpic -fpie -O2 -D_LARGEFILE_SOURCE -D_FILE_OFFSET_BITS=64
# Thu, 11 Jun 2020 18:53:09 GMT
ENV PHP_CPPFLAGS=-fstack-protector-strong -fpic -fpie -O2 -D_LARGEFILE_SOURCE -D_FILE_OFFSET_BITS=64
# Thu, 11 Jun 2020 18:53:09 GMT
ENV PHP_LDFLAGS=-Wl,-O1 -pie
# Thu, 11 Jun 2020 22:00:30 GMT
ENV GPG_KEYS=1729F83938DA44E27BA0F4D3DBDB397470D12172 B1B44D8F021E4E2D6021E995DC9FF8D3EE5AF27F
# Thu, 06 Aug 2020 23:47:47 GMT
ENV PHP_VERSION=7.2.33
# Thu, 06 Aug 2020 23:47:48 GMT
ENV PHP_URL=https://www.php.net/distributions/php-7.2.33.tar.xz PHP_ASC_URL=https://www.php.net/distributions/php-7.2.33.tar.xz.asc
# Thu, 06 Aug 2020 23:47:48 GMT
ENV PHP_SHA256=0f160a3483ffce36be5962fab7bcf09d605ee66c5707df83e4195cb796bbb03a PHP_MD5=
# Thu, 06 Aug 2020 23:47:54 GMT
RUN set -eux; apk add --no-cache --virtual .fetch-deps gnupg; mkdir -p /usr/src; cd /usr/src; curl -fsSL -o php.tar.xz "$PHP_URL"; if [ -n "$PHP_SHA256" ]; then echo "$PHP_SHA256 *php.tar.xz" | sha256sum -c -; fi; if [ -n "$PHP_MD5" ]; then echo "$PHP_MD5 *php.tar.xz" | md5sum -c -; fi; if [ -n "$PHP_ASC_URL" ]; then curl -fsSL -o php.tar.xz.asc "$PHP_ASC_URL"; export GNUPGHOME="$(mktemp -d)"; for key in $GPG_KEYS; do gpg --batch --keyserver ha.pool.sks-keyservers.net --recv-keys "$key"; done; gpg --batch --verify php.tar.xz.asc php.tar.xz; gpgconf --kill all; rm -rf "$GNUPGHOME"; fi; apk del --no-network .fetch-deps
# Thu, 06 Aug 2020 23:47:54 GMT
COPY file:ce57c04b70896f77cc11eb2766417d8a1240fcffe5bba92179ec78c458844110 in /usr/local/bin/
# Tue, 01 Sep 2020 04:17:36 GMT
RUN set -eux; apk add --no-cache --virtual .build-deps $PHPIZE_DEPS argon2-dev coreutils curl-dev libedit-dev libsodium-dev libxml2-dev openssl-dev sqlite-dev ; export CFLAGS="$PHP_CFLAGS" CPPFLAGS="$PHP_CPPFLAGS" LDFLAGS="$PHP_LDFLAGS" ; docker-php-source extract; cd /usr/src/php; gnuArch="$(dpkg-architecture --query DEB_BUILD_GNU_TYPE)"; ./configure --build="$gnuArch" --with-config-file-path="$PHP_INI_DIR" --with-config-file-scan-dir="$PHP_INI_DIR/conf.d" --enable-option-checking=fatal --with-mhash --enable-ftp --enable-mbstring --enable-mysqlnd --with-password-argon2 --with-sodium=shared --with-pdo-sqlite=/usr --with-sqlite3=/usr --with-curl --with-libedit --with-openssl --with-zlib $(test "$gnuArch" = 's390x-linux-musl' && echo '--without-pcre-jit') ${PHP_EXTRA_CONFIGURE_ARGS:-} ; make -j "$(nproc)"; find -type f -name '*.a' -delete; make install; find /usr/local/bin /usr/local/sbin -type f -perm +0111 -exec strip --strip-all '{}' + || true; make clean; cp -v php.ini-* "$PHP_INI_DIR/"; cd /; docker-php-source delete; runDeps="$( scanelf --needed --nobanner --format '%n#p' --recursive /usr/local | tr ',' '\n' | sort -u | awk 'system("[ -e /usr/local/lib/" $1 " ]") == 0 { next } { print "so:" $1 }' )"; apk add --no-cache $runDeps; apk del --no-network .build-deps; pecl update-channels; rm -rf /tmp/pear ~/.pearrc; php --version
# Tue, 01 Sep 2020 04:17:37 GMT
COPY multi:cfe027e655535d9b3eb4b44f84eafb2e1d257620ca628247fe5c1c4fb008a78a in /usr/local/bin/
# Tue, 01 Sep 2020 04:17:38 GMT
RUN docker-php-ext-enable sodium
# Tue, 01 Sep 2020 04:17:38 GMT
ENTRYPOINT ["docker-php-entrypoint"]
# Tue, 01 Sep 2020 04:17:38 GMT
CMD ["php" "-a"]
```
- Layers:
- `sha256:0625b4155e2a59f647ece47c0cd77ed3196b1f84454fa64ce80cad90e2b9b79e`
Last Modified: Fri, 29 May 2020 21:38:53 GMT
Size: 2.8 MB (2792298 bytes)
MIME: application/vnd.docker.image.rootfs.diff.tar.gzip
- `sha256:380f9ca051d120d4ed72890904488f0f1e0fd0128cbd1c73adbff366e90bc2aa`
Last Modified: Thu, 11 Jun 2020 22:28:37 GMT
Size: 1.4 MB (1439837 bytes)
MIME: application/vnd.docker.image.rootfs.diff.tar.gzip
- `sha256:613b9419908bb5beb7365d9d340ee3071aebf8c7919f5379a7b99221fd74c78f`
Last Modified: Thu, 11 Jun 2020 22:28:36 GMT
Size: 1.2 KB (1231 bytes)
MIME: application/vnd.docker.image.rootfs.diff.tar.gzip
- `sha256:7fd1bd9f7e9212850ee7c4017e1d3e5ec33176a9113258688a743d871983ee8c`
Last Modified: Thu, 11 Jun 2020 22:28:37 GMT
Size: 222.0 B
MIME: application/vnd.docker.image.rootfs.diff.tar.gzip
- `sha256:b6a05f71f4aecb750487271b78002698a82a74a07dd955c16ed93d92ae84f537`
Last Modified: Fri, 07 Aug 2020 00:54:24 GMT
Size: 12.3 MB (12330071 bytes)
MIME: application/vnd.docker.image.rootfs.diff.tar.gzip
- `sha256:d6ea2ff4eaecd652ddbfc8c3cfda0551419c186e164c6ab5a0a929408f7aca23`
Last Modified: Fri, 07 Aug 2020 00:54:21 GMT
Size: 499.0 B
MIME: application/vnd.docker.image.rootfs.diff.tar.gzip
- `sha256:c40a25a7b784349e7e24dfeb52343bcfd5343d273c0bdc6b08fdf447661778a8`
Last Modified: Tue, 01 Sep 2020 05:19:58 GMT
Size: 14.9 MB (14937274 bytes)
MIME: application/vnd.docker.image.rootfs.diff.tar.gzip
- `sha256:10577fcef0d5f7f90eb847dc2d0d63fc87e35fce34a2b69cd81469773100dcde`
Last Modified: Tue, 01 Sep 2020 05:19:50 GMT
Size: 2.3 KB (2273 bytes)
MIME: application/vnd.docker.image.rootfs.diff.tar.gzip
- `sha256:ced1c42eb8dbe88d15899ee3c7b81747fe0790c005b3b9846b4868e47cf7ffb4`
Last Modified: Tue, 01 Sep 2020 05:19:50 GMT
Size: 16.8 KB (16799 bytes)
MIME: application/vnd.docker.image.rootfs.diff.tar.gzip
### `php:7.2-alpine` - linux; ppc64le
```console
$ docker pull php@sha256:349921106b2fb585a03f323550a99f52cba92aa74526aea6fdfccd389752bdc0
```
- Docker Version: 18.09.7
- Manifest MIME: `application/vnd.docker.distribution.manifest.v2+json`
- Total Size: **32.1 MB (32069497 bytes)**
(compressed transfer size, not on-disk size)
- Image ID: `sha256:addf3f332df1c65a574176160058dac4c4e85e8525ee4bc8c339cbdeceeaef14`
- Entrypoint: `["docker-php-entrypoint"]`
- Default Command: `["php","-a"]`
```dockerfile
# Fri, 29 May 2020 21:23:03 GMT
ADD file:8194808a812370fd2202d80d1667f851bd9eac4c560d69d347fe1964f54343de in /
# Fri, 29 May 2020 21:23:06 GMT
CMD ["/bin/sh"]
# Thu, 11 Jun 2020 18:45:11 GMT
ENV PHPIZE_DEPS=autoconf dpkg-dev dpkg file g++ gcc libc-dev make pkgconf re2c
# Thu, 11 Jun 2020 18:45:20 GMT
RUN apk add --no-cache ca-certificates curl tar xz openssl
# Thu, 11 Jun 2020 18:45:29 GMT
RUN set -eux; addgroup -g 82 -S www-data; adduser -u 82 -D -S -G www-data www-data
# Thu, 11 Jun 2020 18:45:31 GMT
ENV PHP_INI_DIR=/usr/local/etc/php
# Thu, 11 Jun 2020 18:45:39 GMT
RUN set -eux; mkdir -p "$PHP_INI_DIR/conf.d"; [ ! -d /var/www/html ]; mkdir -p /var/www/html; chown www-data:www-data /var/www/html; chmod 777 /var/www/html
# Thu, 11 Jun 2020 18:45:42 GMT
ENV PHP_CFLAGS=-fstack-protector-strong -fpic -fpie -O2 -D_LARGEFILE_SOURCE -D_FILE_OFFSET_BITS=64
# Thu, 11 Jun 2020 18:45:44 GMT
ENV PHP_CPPFLAGS=-fstack-protector-strong -fpic -fpie -O2 -D_LARGEFILE_SOURCE -D_FILE_OFFSET_BITS=64
# Thu, 11 Jun 2020 18:45:46 GMT
ENV PHP_LDFLAGS=-Wl,-O1 -pie
# Thu, 11 Jun 2020 20:37:22 GMT
ENV GPG_KEYS=1729F83938DA44E27BA0F4D3DBDB397470D12172 B1B44D8F021E4E2D6021E995DC9FF8D3EE5AF27F
# Thu, 06 Aug 2020 21:50:50 GMT
ENV PHP_VERSION=7.2.33
# Thu, 06 Aug 2020 21:50:54 GMT
ENV PHP_URL=https://www.php.net/distributions/php-7.2.33.tar.xz PHP_ASC_URL=https://www.php.net/distributions/php-7.2.33.tar.xz.asc
# Thu, 06 Aug 2020 21:50:57 GMT
ENV PHP_SHA256=0f160a3483ffce36be5962fab7bcf09d605ee66c5707df83e4195cb796bbb03a PHP_MD5=
# Thu, 06 Aug 2020 21:51:19 GMT
RUN set -eux; apk add --no-cache --virtual .fetch-deps gnupg; mkdir -p /usr/src; cd /usr/src; curl -fsSL -o php.tar.xz "$PHP_URL"; if [ -n "$PHP_SHA256" ]; then echo "$PHP_SHA256 *php.tar.xz" | sha256sum -c -; fi; if [ -n "$PHP_MD5" ]; then echo "$PHP_MD5 *php.tar.xz" | md5sum -c -; fi; if [ -n "$PHP_ASC_URL" ]; then curl -fsSL -o php.tar.xz.asc "$PHP_ASC_URL"; export GNUPGHOME="$(mktemp -d)"; for key in $GPG_KEYS; do gpg --batch --keyserver ha.pool.sks-keyservers.net --recv-keys "$key"; done; gpg --batch --verify php.tar.xz.asc php.tar.xz; gpgconf --kill all; rm -rf "$GNUPGHOME"; fi; apk del --no-network .fetch-deps
# Thu, 06 Aug 2020 21:51:22 GMT
COPY file:ce57c04b70896f77cc11eb2766417d8a1240fcffe5bba92179ec78c458844110 in /usr/local/bin/
# Tue, 01 Sep 2020 06:23:00 GMT
RUN set -eux; apk add --no-cache --virtual .build-deps $PHPIZE_DEPS argon2-dev coreutils curl-dev libedit-dev libsodium-dev libxml2-dev openssl-dev sqlite-dev ; export CFLAGS="$PHP_CFLAGS" CPPFLAGS="$PHP_CPPFLAGS" LDFLAGS="$PHP_LDFLAGS" ; docker-php-source extract; cd /usr/src/php; gnuArch="$(dpkg-architecture --query DEB_BUILD_GNU_TYPE)"; ./configure --build="$gnuArch" --with-config-file-path="$PHP_INI_DIR" --with-config-file-scan-dir="$PHP_INI_DIR/conf.d" --enable-option-checking=fatal --with-mhash --enable-ftp --enable-mbstring --enable-mysqlnd --with-password-argon2 --with-sodium=shared --with-pdo-sqlite=/usr --with-sqlite3=/usr --with-curl --with-libedit --with-openssl --with-zlib $(test "$gnuArch" = 's390x-linux-musl' && echo '--without-pcre-jit') ${PHP_EXTRA_CONFIGURE_ARGS:-} ; make -j "$(nproc)"; find -type f -name '*.a' -delete; make install; find /usr/local/bin /usr/local/sbin -type f -perm +0111 -exec strip --strip-all '{}' + || true; make clean; cp -v php.ini-* "$PHP_INI_DIR/"; cd /; docker-php-source delete; runDeps="$( scanelf --needed --nobanner --format '%n#p' --recursive /usr/local | tr ',' '\n' | sort -u | awk 'system("[ -e /usr/local/lib/" $1 " ]") == 0 { next } { print "so:" $1 }' )"; apk add --no-cache $runDeps; apk del --no-network .build-deps; pecl update-channels; rm -rf /tmp/pear ~/.pearrc; php --version
# Tue, 01 Sep 2020 06:23:03 GMT
COPY multi:cfe027e655535d9b3eb4b44f84eafb2e1d257620ca628247fe5c1c4fb008a78a in /usr/local/bin/
# Tue, 01 Sep 2020 06:23:13 GMT
RUN docker-php-ext-enable sodium
# Tue, 01 Sep 2020 06:23:17 GMT
ENTRYPOINT ["docker-php-entrypoint"]
# Tue, 01 Sep 2020 06:23:19 GMT
CMD ["php" "-a"]
```
- Layers:
- `sha256:5077f8601dceb5744d875d7740ebc203f674b108a0188f3a31e292b21a4bee64`
Last Modified: Fri, 29 May 2020 21:23:37 GMT
Size: 2.8 MB (2805199 bytes)
MIME: application/vnd.docker.image.rootfs.diff.tar.gzip
- `sha256:46f98eb9049c4fcbaa1cf16ddb44aaf910a89a5471dc303198251fa25bc2a1ef`
Last Modified: Thu, 11 Jun 2020 20:57:07 GMT
Size: 1.4 MB (1383311 bytes)
MIME: application/vnd.docker.image.rootfs.diff.tar.gzip
- `sha256:e50cdb8e03fc1b19217cd9f80b86142d090ad3e818795b339ea4d9ef9b74555f`
Last Modified: Thu, 11 Jun 2020 20:57:06 GMT
Size: 1.3 KB (1259 bytes)
MIME: application/vnd.docker.image.rootfs.diff.tar.gzip
- `sha256:8b4466e085b12b265fb47397c7c105fd7d78a5883beef49ab9f386393c4c3dae`
Last Modified: Thu, 11 Jun 2020 20:57:06 GMT
Size: 268.0 B
MIME: application/vnd.docker.image.rootfs.diff.tar.gzip
- `sha256:dea8f78f02bef3eab582272f5d329d30e48428e831bb7f144683b09806ec9148`
Last Modified: Thu, 06 Aug 2020 22:49:24 GMT
Size: 12.3 MB (12330103 bytes)
MIME: application/vnd.docker.image.rootfs.diff.tar.gzip
- `sha256:9a18c020199bbf9e7d688e42c104c4617c244dd4436fbb439b6f45905155d41d`
Last Modified: Thu, 06 Aug 2020 22:49:22 GMT
Size: 498.0 B
MIME: application/vnd.docker.image.rootfs.diff.tar.gzip
- `sha256:548e885249cc0620ce827b8762265ad07a66edefa75976a41e422222f30e1766`
Last Modified: Tue, 01 Sep 2020 07:15:32 GMT
Size: 15.5 MB (15529788 bytes)
MIME: application/vnd.docker.image.rootfs.diff.tar.gzip
- `sha256:7ecd4f3ae52ea001625ce4c007a98fdf10545b7b18bc5af5e05147c069a4b557`
Last Modified: Tue, 01 Sep 2020 07:15:25 GMT
Size: 2.3 KB (2272 bytes)
MIME: application/vnd.docker.image.rootfs.diff.tar.gzip
- `sha256:296265b1dd103fe6fb1aa9ede2101cbf06b130b36137f1d2e3f476b675b942d1`
Last Modified: Tue, 01 Sep 2020 07:15:25 GMT
Size: 16.8 KB (16799 bytes)
MIME: application/vnd.docker.image.rootfs.diff.tar.gzip
### `php:7.2-alpine` - linux; s390x
```console
$ docker pull php@sha256:99c9946fea45e38dd7a15d16839740fec541a76fc91dc5e5b19ad654de75e445
```
- Docker Version: 18.09.7
- Manifest MIME: `application/vnd.docker.distribution.manifest.v2+json`
- Total Size: **30.2 MB (30228777 bytes)**
(compressed transfer size, not on-disk size)
- Image ID: `sha256:f604d61fb1696363d1427945541905c3c326a1271208024d9dcc03e5f36497fb`
- Entrypoint: `["docker-php-entrypoint"]`
- Default Command: `["php","-a"]`
```dockerfile
# Fri, 29 May 2020 21:41:39 GMT
ADD file:9799ce3b2f782a28e10b1846cd9b3db827fa99c9bc601feb268456195856814e in /
# Fri, 29 May 2020 21:41:39 GMT
CMD ["/bin/sh"]
# Thu, 11 Jun 2020 18:29:35 GMT
ENV PHPIZE_DEPS=autoconf dpkg-dev dpkg file g++ gcc libc-dev make pkgconf re2c
# Thu, 11 Jun 2020 18:29:37 GMT
RUN apk add --no-cache ca-certificates curl tar xz openssl
# Thu, 11 Jun 2020 18:29:38 GMT
RUN set -eux; addgroup -g 82 -S www-data; adduser -u 82 -D -S -G www-data www-data
# Thu, 11 Jun 2020 18:29:38 GMT
ENV PHP_INI_DIR=/usr/local/etc/php
# Thu, 11 Jun 2020 18:29:38 GMT
RUN set -eux; mkdir -p "$PHP_INI_DIR/conf.d"; [ ! -d /var/www/html ]; mkdir -p /var/www/html; chown www-data:www-data /var/www/html; chmod 777 /var/www/html
# Thu, 11 Jun 2020 18:29:39 GMT
ENV PHP_CFLAGS=-fstack-protector-strong -fpic -fpie -O2 -D_LARGEFILE_SOURCE -D_FILE_OFFSET_BITS=64
# Thu, 11 Jun 2020 18:29:39 GMT
ENV PHP_CPPFLAGS=-fstack-protector-strong -fpic -fpie -O2 -D_LARGEFILE_SOURCE -D_FILE_OFFSET_BITS=64
# Thu, 11 Jun 2020 18:29:39 GMT
ENV PHP_LDFLAGS=-Wl,-O1 -pie
# Thu, 11 Jun 2020 19:20:56 GMT
ENV GPG_KEYS=1729F83938DA44E27BA0F4D3DBDB397470D12172 B1B44D8F021E4E2D6021E995DC9FF8D3EE5AF27F
# Thu, 06 Aug 2020 19:27:53 GMT
ENV PHP_VERSION=7.2.33
# Thu, 06 Aug 2020 19:27:53 GMT
ENV PHP_URL=https://www.php.net/distributions/php-7.2.33.tar.xz PHP_ASC_URL=https://www.php.net/distributions/php-7.2.33.tar.xz.asc
# Thu, 06 Aug 2020 19:27:53 GMT
ENV PHP_SHA256=0f160a3483ffce36be5962fab7bcf09d605ee66c5707df83e4195cb796bbb03a PHP_MD5=
# Thu, 06 Aug 2020 19:27:57 GMT
RUN set -eux; apk add --no-cache --virtual .fetch-deps gnupg; mkdir -p /usr/src; cd /usr/src; curl -fsSL -o php.tar.xz "$PHP_URL"; if [ -n "$PHP_SHA256" ]; then echo "$PHP_SHA256 *php.tar.xz" | sha256sum -c -; fi; if [ -n "$PHP_MD5" ]; then echo "$PHP_MD5 *php.tar.xz" | md5sum -c -; fi; if [ -n "$PHP_ASC_URL" ]; then curl -fsSL -o php.tar.xz.asc "$PHP_ASC_URL"; export GNUPGHOME="$(mktemp -d)"; for key in $GPG_KEYS; do gpg --batch --keyserver ha.pool.sks-keyservers.net --recv-keys "$key"; done; gpg --batch --verify php.tar.xz.asc php.tar.xz; gpgconf --kill all; rm -rf "$GNUPGHOME"; fi; apk del --no-network .fetch-deps
# Thu, 06 Aug 2020 19:27:57 GMT
COPY file:ce57c04b70896f77cc11eb2766417d8a1240fcffe5bba92179ec78c458844110 in /usr/local/bin/
# Tue, 01 Sep 2020 01:59:15 GMT
RUN set -eux; apk add --no-cache --virtual .build-deps $PHPIZE_DEPS argon2-dev coreutils curl-dev libedit-dev libsodium-dev libxml2-dev openssl-dev sqlite-dev ; export CFLAGS="$PHP_CFLAGS" CPPFLAGS="$PHP_CPPFLAGS" LDFLAGS="$PHP_LDFLAGS" ; docker-php-source extract; cd /usr/src/php; gnuArch="$(dpkg-architecture --query DEB_BUILD_GNU_TYPE)"; ./configure --build="$gnuArch" --with-config-file-path="$PHP_INI_DIR" --with-config-file-scan-dir="$PHP_INI_DIR/conf.d" --enable-option-checking=fatal --with-mhash --enable-ftp --enable-mbstring --enable-mysqlnd --with-password-argon2 --with-sodium=shared --with-pdo-sqlite=/usr --with-sqlite3=/usr --with-curl --with-libedit --with-openssl --with-zlib $(test "$gnuArch" = 's390x-linux-musl' && echo '--without-pcre-jit') ${PHP_EXTRA_CONFIGURE_ARGS:-} ; make -j "$(nproc)"; find -type f -name '*.a' -delete; make install; find /usr/local/bin /usr/local/sbin -type f -perm +0111 -exec strip --strip-all '{}' + || true; make clean; cp -v php.ini-* "$PHP_INI_DIR/"; cd /; docker-php-source delete; runDeps="$( scanelf --needed --nobanner --format '%n#p' --recursive /usr/local | tr ',' '\n' | sort -u | awk 'system("[ -e /usr/local/lib/" $1 " ]") == 0 { next } { print "so:" $1 }' )"; apk add --no-cache $runDeps; apk del --no-network .build-deps; pecl update-channels; rm -rf /tmp/pear ~/.pearrc; php --version
# Tue, 01 Sep 2020 01:59:19 GMT
COPY multi:cfe027e655535d9b3eb4b44f84eafb2e1d257620ca628247fe5c1c4fb008a78a in /usr/local/bin/
# Tue, 01 Sep 2020 01:59:21 GMT
RUN docker-php-ext-enable sodium
# Tue, 01 Sep 2020 01:59:21 GMT
ENTRYPOINT ["docker-php-entrypoint"]
# Tue, 01 Sep 2020 01:59:22 GMT
CMD ["php" "-a"]
```
- Layers:
- `sha256:8fb3d41b2e9a59630b51745f257cd2561f96bcd15cf309fcc20120d5fcee8c5b`
Last Modified: Fri, 29 May 2020 21:42:03 GMT
Size: 2.6 MB (2566189 bytes)
MIME: application/vnd.docker.image.rootfs.diff.tar.gzip
- `sha256:057c8475c8b5e21c245ff682a57adf8970d16ccbed87694b3ec200c83e17a8fb`
Last Modified: Thu, 11 Jun 2020 19:32:19 GMT
Size: 1.4 MB (1382745 bytes)
MIME: application/vnd.docker.image.rootfs.diff.tar.gzip
- `sha256:ad2bbd1ef5644a3638bc6f073de941463cbfceb435ea6d11ce3c7fdb9f8d2539`
Last Modified: Thu, 11 Jun 2020 19:32:18 GMT
Size: 1.3 KB (1257 bytes)
MIME: application/vnd.docker.image.rootfs.diff.tar.gzip
- `sha256:8b73c5118a1a42bce068c040302cb99d7a26ef9ab76544773e9ba70f28d274d3`
Last Modified: Thu, 11 Jun 2020 19:32:18 GMT
Size: 268.0 B
MIME: application/vnd.docker.image.rootfs.diff.tar.gzip
- `sha256:dc88ef4cd24c6ee7e2efa9df5f143d3bc4fca5f7f4d44f04f25aa13e4b5e3ad9`
Last Modified: Thu, 06 Aug 2020 19:51:58 GMT
Size: 12.3 MB (12330086 bytes)
MIME: application/vnd.docker.image.rootfs.diff.tar.gzip
- `sha256:4cf6460deabdf1902ed4f21d3ac68c02f48173d2b65a286666a0267177145d42`
Last Modified: Thu, 06 Aug 2020 19:51:57 GMT
Size: 497.0 B
MIME: application/vnd.docker.image.rootfs.diff.tar.gzip
- `sha256:bf78a609527620da5819e831541dea940acb5de87e3f52d201da5cc8f76ef8c8`
Last Modified: Tue, 01 Sep 2020 02:30:34 GMT
Size: 13.9 MB (13928667 bytes)
MIME: application/vnd.docker.image.rootfs.diff.tar.gzip
- `sha256:11f31a3f5f071b947dc538c53c571b262db766720658d2d3c5b367df1e06b497`
Last Modified: Tue, 01 Sep 2020 02:30:32 GMT
Size: 2.3 KB (2274 bytes)
MIME: application/vnd.docker.image.rootfs.diff.tar.gzip
- `sha256:56161d9fc1cdabb4afe03b9f1e7d60f576f78be7ead934b7fe816fcbd5749a94`
Last Modified: Tue, 01 Sep 2020 02:30:32 GMT
Size: 16.8 KB (16794 bytes)
MIME: application/vnd.docker.image.rootfs.diff.tar.gzip
| {
"pile_set_name": "Github"
} |
@LinkShare.GetHtml("ASP.NET Web Pages Samples")
@LinkShare.GetHtml("ASP.NET Web Pages Samples", "http://www.asp.net") | {
"pile_set_name": "Github"
} |
#import <XCTest/XCTest.h>
#import <Nimble/Nimble.h>
#import "NimbleSpecHelper.h"
@interface ObjCAsyncTest : XCTestCase
@end
@implementation ObjCAsyncTest
- (void)testAsync {
__block id obj = @1;
dispatch_after(dispatch_time(DISPATCH_TIME_NOW, (int64_t)(0.2 * NSEC_PER_SEC)), dispatch_get_main_queue(), ^{
obj = nil;
});
expect(obj).toEventually(beNil());
}
- (void)testAsyncWithCustomTimeout {
__block id obj = nil;
dispatch_after(dispatch_time(DISPATCH_TIME_NOW, (int64_t)(1.5 * NSEC_PER_SEC)), dispatch_get_main_queue(), ^{
obj = @1;
});
expect(obj).withTimeout(5).toEventuallyNot(beNil());
}
- (void)testAsyncCallback {
waitUntil(^(void (^done)(void)){
done();
});
waitUntil(^(void (^done)(void)){
dispatch_async(dispatch_get_main_queue(), ^{
done();
});
});
expectFailureMessage(@"Waited more than 1.0 second", ^{
waitUntil(^(void (^done)(void)){ /* ... */ });
});
expectFailureMessage(@"Waited more than 0.01 seconds", ^{
waitUntilTimeout(0.01, ^(void (^done)(void)){
dispatch_async(dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0), ^{
[NSThread sleepForTimeInterval:0.1];
done();
});
});
});
expectFailureMessage(@"expected to equal <goodbye>, got <hello>", ^{
waitUntil(^(void (^done)(void)){
[NSThread sleepForTimeInterval:0.1];
expect(@"hello").to(equal(@"goodbye"));
done();
});
});
}
@end
| {
"pile_set_name": "Github"
} |
// RUN: %clang_cc1 -E -verify %s
// expected-no-diagnostics
#if 0
"
'
#endif
| {
"pile_set_name": "Github"
} |
C++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++C
C C
C HIERARCHICAL CLUSTERING using (user-specified) criterion. C
C C
C Parameters: C
C C
C N the number of points being clustered C
C DISS(LEN) dissimilarities in lower half diagonal C
C storage; LEN = N.N-1/2, C
C IOPT clustering criterion to be used, C
C IA, IB, CRIT history of agglomerations; dimensions C
C N, first N-1 locations only used, C
C MEMBR, NN, DISNN vectors of length N, used to store C
C cluster cardinalities, current nearest C
C neighbour, and the dissimilarity assoc. C
C with the latter. C
C MEMBR must be initialized by R to the C
C default of rep(1, N) C
C FLAG boolean indicator of agglomerable obj./ C
C clusters. C
C C
C F. Murtagh, ESA/ESO/STECF, Garching, February 1986. C
C Modifications for R: Ross Ihaka, Dec 1996 C
C Fritz Leisch, Jun 2000 C
C all vars declared: Martin Maechler, Apr 2001 C
C C
c- R Bug PR#4195 fixed "along" qclust.c, given in the report C
C- Testing: --> "hclust" in ../../../../tests/reg-tests-1b.R C
C "ward.D2" (iOpt = 8): Martin Maechler, Mar 2014 C
C------------------------------------------------------------C
SUBROUTINE HCLUST(N,LEN,IOPT,IA,IB,CRIT,MEMBR,NN,DISNN,
X FLAG,DISS)
c Args
INTEGER N, LEN, IOPT
INTEGER IA(N),IB(N), NN(N)
LOGICAL FLAG(N), isWard
DOUBLE PRECISION CRIT(N), MEMBR(N),DISS(LEN), DISNN(N)
c Var
INTEGER IM, JJ, JM, I, NCL, J, IND, I2, J2, K, IND1, IND2
DOUBLE PRECISION INF, DMIN, D12
c External function
INTEGER IOFFST
c
c was 1D+20
DATA INF/1.D+300/
c
c unnecessary initialization of im jj jm to keep g77 -Wall happy
c
IM = 0
JJ = 0
JM = 0
C
C Initializations
C
DO I=1,N
C We do not initialize MEMBR in order to be able to restart the
C algorithm from a cut.
C MEMBR(I)=1.
FLAG(I)=.TRUE.
end do
NCL=N
IF (iOpt .eq. 8) THEN ! Ward "D2" ---> using *squared* distances
do I=1,LEN
DISS(I)=DISS(I)*DISS(I)
end do
ENDIF
C
C Carry out an agglomeration - first create list of NNs
C Note NN and DISNN are the nearest neighbour and its distance
C TO THE RIGHT of I.
C
DO I=1,N-1
DMIN=INF
DO J=I+1,N
IND=IOFFST(N,I,J)
IF (DMIN .GT. DISS(IND)) THEN
DMIN=DISS(IND)
JM=J
end if
end do
NN(I)=JM
DISNN(I)=DMIN
end do
C-- Repeat -------------------------------------------------------
400 CONTINUE
C Next, determine least diss. using list of NNs
DMIN=INF
DO I=1,N-1
IF (FLAG(I) .AND. DISNN(I) .LT. DMIN) THEN
DMIN=DISNN(I)
IM=I
JM=NN(I)
end if
end do
NCL=NCL-1
C
C This allows an agglomeration to be carried out.
C
I2=MIN0(IM,JM)
J2=MAX0(IM,JM)
IA(N-NCL)=I2
IB(N-NCL)=J2
C WARD'S "D1", or "D2" MINIMUM VARIANCE METHOD -- iOpt = 1 or 8.
isWard = (iOpt .eq. 1 .or. iOpt .eq. 8)
IF (iOpt .eq. 8) DMIN = dsqrt(DMIN)
CRIT(N-NCL)=DMIN
FLAG(J2)=.FALSE.
C
C Update dissimilarities from new cluster.
C
DMIN=INF
DO K=1,N
IF (FLAG(K) .AND. K .NE. I2) THEN
IF (I2.LT.K) THEN
IND1=IOFFST(N,I2,K)
ELSE
IND1=IOFFST(N,K,I2)
ENDIF
IF (J2.LT.K) THEN
IND2=IOFFST(N,J2,K)
ELSE
IND2=IOFFST(N,K,J2)
ENDIF
D12=DISS(IOFFST(N,I2,J2))
C
C WARD'S "D1", or "D2" MINIMUM VARIANCE METHOD - IOPT=8.
IF (isWard) THEN
DISS(IND1)=(MEMBR(I2)+MEMBR(K))*DISS(IND1)+
X (MEMBR(J2)+MEMBR(K))*DISS(IND2) - MEMBR(K)*D12
DISS(IND1)=DISS(IND1) / (MEMBR(I2)+MEMBR(J2)+MEMBR(K))
C
C SINGLE LINK METHOD - IOPT=2.
ELSEIF (IOPT.EQ.2) THEN
DISS(IND1)=MIN(DISS(IND1),DISS(IND2))
C
C COMPLETE LINK METHOD - IOPT=3.
ELSEIF (IOPT.EQ.3) THEN
DISS(IND1)=MAX(DISS(IND1),DISS(IND2))
C
C AVERAGE LINK (OR GROUP AVERAGE) METHOD - IOPT=4.
ELSEIF (IOPT.EQ.4) THEN
DISS(IND1)= (MEMBR(I2)*DISS(IND1)+MEMBR(J2)*DISS(IND2))
X / (MEMBR(I2)+MEMBR(J2))
C
C MCQUITTY'S METHOD - IOPT=5.
ELSEIF (IOPT.EQ.5) THEN
DISS(IND1)=(DISS(IND1)+DISS(IND2)) / 2
C
C MEDIAN (GOWER'S) METHOD aka "Weighted Centroid" - IOPT=6.
ELSEIF (IOPT.EQ.6) THEN
DISS(IND1)= ((DISS(IND1)+DISS(IND2)) - D12/2) / 2
C
C Unweighted CENTROID METHOD - IOPT=7.
ELSEIF (IOPT.EQ.7) THEN
DISS(IND1)=(MEMBR(I2)*DISS(IND1)+MEMBR(J2)*DISS(IND2)-
X MEMBR(I2)*MEMBR(J2)*D12/(MEMBR(I2)+MEMBR(J2)))/
X (MEMBR(I2)+MEMBR(J2))
ENDIF
C
IF (I2 .lt. K) THEN
IF (DISS(IND1) .LT. DMIN) THEN
DMIN=DISS(IND1)
JJ=K
ENDIF
else ! i2 > k
c FIX: the rest of the else clause is a fix by JB to ensure
c correct nearest neighbours are found when a non-monotone
c clustering method (e.g. the centroid methods) are used
if(DISS(IND1) .lt. DISNN(K)) then ! find nearest neighbour of i2
DISNN(K) = DISS(IND1)
NN(K) = I2
end if
ENDIF
ENDIF
END DO
MEMBR(I2)=MEMBR(I2)+MEMBR(J2)
DISNN(I2)=DMIN
NN(I2)=JJ
C
C Update list of NNs insofar as this is required.
C
DO I=1,N-1
IF (FLAG(I) .AND.
X ((NN(I).EQ.I2) .OR. (NN(I).EQ.J2))) THEN
C (Redetermine NN of I:)
DMIN=INF
DO J=I+1,N
if (FLAG(J)) then
IND=IOFFST(N,I,J)
if (DISS(IND) .lt. DMIN) then
DMIN=DISS(IND)
JJ=J
end if
end if
end do
NN(I)=JJ
DISNN(I)=DMIN
end if
end do
C
C Repeat previous steps until N-1 agglomerations carried out.
C
IF (NCL.GT.1) GOTO 400
C
C
RETURN
END
C of HCLUST()
C
C
INTEGER FUNCTION IOFFST(N,I,J)
C Map row I and column J of upper half diagonal symmetric matrix
C onto vector.
INTEGER N,I,J
IOFFST=J+(I-1)*N-(I*(I+1))/2
RETURN
END
C+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++C
C C
C Given a HIERARCHIC CLUSTERING, described as a sequence of C
C agglomerations, prepare the seq. of aggloms. and "horiz." C
C order of objects for plotting the dendrogram using S routine C
C 'plclust'. C
C C
C Parameters: C
C C
C IA, IB: vectors of dimension N defining the agglomer- C
C ations. C
C IIA, IIB: used to store IA and IB values differently C
C (in form needed for S command 'plclust' C
C IORDER: "horiz." order of objects for dendrogram C
C C
C F. Murtagh, ESA/ESO/STECF, Garching, June 1991 C
C C
C HISTORY C
C C
C Adapted from routine HCASS, which additionally determines C
C cluster assignments at all levels, at extra comput. expense C
C C
C---------------------------------------------------------------C
SUBROUTINE HCASS2(N,IA,IB,IORDER,IIA,IIB)
c Args
INTEGER N,IA(N),IB(N),IORDER(N),IIA(N),IIB(N)
c Var
INTEGER I, J, K, K1, K2, LOC
C
C Following bit is to get seq. of merges into format acceptable to plclust
C I coded clusters as lowest seq. no. of constituents; S's 'hclust' codes
C singletons as -ve numbers, and non-singletons with their seq. nos.
C
do I=1,N
IIA(I)=IA(I)
IIB(I)=IB(I)
end do
do I=1,N-2
C In the following, smallest (+ve or -ve) seq. no. wanted
K=MIN(IA(I),IB(I))
do J=I+1, N-1
IF(IA(J).EQ.K) IIA(J)=-I
IF(IB(J).EQ.K) IIB(J)=-I
end do
end do
do I=1,N-1
IIA(I)=-IIA(I)
IIB(I)=-IIB(I)
end do
do I=1,N-1
IF (IIA(I).GT.0 .AND. IIB(I).LT.0) THEN
K = IIA(I)
IIA(I) = IIB(I)
IIB(I) = K
ENDIF
IF (IIA(I).GT.0 .AND. IIB(I).GT.0) THEN
K1 = MIN(IIA(I),IIB(I))
K2 = MAX(IIA(I),IIB(I))
IIA(I) = K1
IIB(I) = K2
ENDIF
end do
C
C
C NEW PART FOR 'ORDER'
C
IORDER(1) = IIA(N-1)
IORDER(2) = IIB(N-1)
LOC=2
DO I=N-2,1,-1
DO J=1,LOC
IF(IORDER(J).EQ.I) THEN
C REPLACE IORDER(J) WITH IIA(I) AND IIB(I)
IORDER(J)=IIA(I)
IF (J.EQ.LOC) THEN
LOC=LOC+1
IORDER(LOC)=IIB(I)
else
LOC=LOC+1
do K=LOC,J+2,-1
IORDER(K)=IORDER(K-1)
end do
IORDER(J+1)=IIB(I)
end if
GOTO 171
ENDIF
end do
C SHOULD NEVER REACH HERE
171 CONTINUE
end do
C
C
do I=1,N
IORDER(I) = -IORDER(I)
end do
C
C
RETURN
END
| {
"pile_set_name": "Github"
} |
const path = require('path');
const { getFileLoaderOptions } = require('./file-loader');
/**
* Build options for the webpack responsive loader
*
* @param {object} nextConfig - next.js configuration
* @param {object} detectedLoaders - all detected and installed loaders
* @returns {object}
*/
const getResponsiveLoaderOptions = ({
responsive,
...nextConfig
}, isServer, detectedLoaders) => {
let adapter = responsive ? responsive.adapter : undefined;
if (!adapter && detectedLoaders.responsiveAdapter === 'sharp') {
adapter = require(`${detectedLoaders.responsive}${path.sep}sharp`); // eslint-disable-line
}
return {
...getFileLoaderOptions(nextConfig, isServer),
name: '[name]-[width]-[hash].[ext]',
...(responsive || {}),
adapter,
};
};
/**
* Apply the responsive loader to the webpack configuration
*
* @param {object} webpackConfig - webpack configuration
* @param {object} nextConfig - next.js configuration
* @param {boolean} isServer - if the build is for the server
* @param {object} detectedLoaders - all detected and installed loaders
* @returns {object}
*/
const applyResponsiveLoader = (webpackConfig, nextConfig, isServer, detectedLoaders) => {
webpackConfig.module.rules.push({
test: /\.(jpe?g|png)$/i,
oneOf: [
{
use: {
loader: 'responsive-loader',
options: getResponsiveLoaderOptions(nextConfig, isServer, detectedLoaders),
},
},
],
});
return webpackConfig;
};
module.exports = {
getResponsiveLoaderOptions,
applyResponsiveLoader,
};
| {
"pile_set_name": "Github"
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.