text
stringlengths 2
99.9k
| meta
dict |
---|---|
{
"name": "@yodaos/network",
"version": "0.0.1",
"description": "",
"main": "index.js",
"scripts": {
"test": "echo \"Error: no test specified\" && exit 1"
},
"keywords": [
"yodaos",
"yoda",
"network"
],
"author": "yonzkon <[email protected]>",
"license": "MIT"
}
| {
"pile_set_name": "Github"
} |
import * as tree from '../misc/helpers';
describe('create table', function() {
it('basic create table', function(done) {
tree.equals(this, done);
});
it('create table alt syntax', function(done) {
tree.equals(this, done);
});
it('create foreign key 1', function(done) {
tree.equals(this, done);
});
it('create foreign key 2', function(done) {
tree.equals(this, done);
});
it('create check 1', function(done) {
tree.equals(this, done);
});
it('create check 2', function(done) {
tree.equals(this, done);
});
it('create primary key 1', function(done) {
tree.equals(this, done);
});
});
| {
"pile_set_name": "Github"
} |
global { }
Scalar c {
c.shape = Circ {
r = 5 + 5
x = c.val
y = c.shape.r
}
c.val = computeVal(1)
}
Scalar c
with Scalar a; Scalar b
where c := addS(a, b) {
override c.shape = Rect {
wow = 100
}
delete c.val
}
-- TODO: test equality / multiple matches, anonymous expressions, subtyping
| {
"pile_set_name": "Github"
} |
# see Using the Windows Update Agent API | Searching, Downloading, and Installing Updates
# at https://msdn.microsoft.com/en-us/library/windows/desktop/aa387102(v=vs.85).aspx
# see ISystemInformation interface
# at https://msdn.microsoft.com/en-us/library/windows/desktop/aa386095(v=vs.85).aspx
# see IUpdateSession interface
# at https://msdn.microsoft.com/en-us/library/windows/desktop/aa386854(v=vs.85).aspx
# see IUpdateSearcher interface
# at https://msdn.microsoft.com/en-us/library/windows/desktop/aa386515(v=vs.85).aspx
# see IUpdateSearcher::Search method
# at https://docs.microsoft.com/en-us/windows/desktop/api/wuapi/nf-wuapi-iupdatesearcher-search
# see IUpdateDownloader interface
# at https://msdn.microsoft.com/en-us/library/windows/desktop/aa386131(v=vs.85).aspx
# see IUpdateCollection interface
# at https://msdn.microsoft.com/en-us/library/windows/desktop/aa386107(v=vs.85).aspx
# see IUpdate interface
# at https://msdn.microsoft.com/en-us/library/windows/desktop/aa386099(v=vs.85).aspx
# see xWindowsUpdateAgent DSC resource
# at https://github.com/PowerShell/xWindowsUpdate/blob/dev/DscResources/MSFT_xWindowsUpdateAgent/MSFT_xWindowsUpdateAgent.psm1
# NB you can install common sets of updates with one of these settings:
# | Name | SearchCriteria | Filters |
# |---------------|-------------------------------------------|---------------|
# | Important | AutoSelectOnWebSites=1 and IsInstalled=0 | $true |
# | Recommended | BrowseOnly=0 and IsInstalled=0 | $true |
# | All | IsInstalled=0 | $true |
# | Optional Only | AutoSelectOnWebSites=0 and IsInstalled=0 | $_.BrowseOnly |
param(
[string]$SearchCriteria = 'BrowseOnly=0 and IsInstalled=0',
[string[]]$Filters = @('include:$true'),
[int]$UpdateLimit = 1000,
[switch]$OnlyCheckForRebootRequired = $false
)
$mock = $false
function ExitWithCode($exitCode) {
$host.SetShouldExit($exitCode)
Exit
}
Set-StrictMode -Version Latest
$ErrorActionPreference = 'Stop'
$ProgressPreference = 'SilentlyContinue'
trap {
Write-Output "ERROR: $_"
Write-Output (($_.ScriptStackTrace -split '\r?\n') -replace '^(.*)$','ERROR: $1')
Write-Output (($_.Exception.ToString() -split '\r?\n') -replace '^(.*)$','ERROR EXCEPTION: $1')
ExitWithCode 1
}
if ($mock) {
$mockWindowsUpdatePath = 'C:\Windows\Temp\windows-update-count-mock.txt'
if (!(Test-Path $mockWindowsUpdatePath)) {
Set-Content $mockWindowsUpdatePath 10
}
$count = [int]::Parse((Get-Content $mockWindowsUpdatePath).Trim())
if ($count) {
Write-Output "Synthetic reboot countdown counter is at $count"
Set-Content $mockWindowsUpdatePath (--$count)
ExitWithCode 101
}
Write-Output 'No Windows updates found'
ExitWithCode 0
}
Add-Type @'
using System;
using System.Runtime.InteropServices;
public static class Windows
{
[DllImport("kernel32", SetLastError=true)]
public static extern UInt64 GetTickCount64();
public static TimeSpan GetUptime()
{
return TimeSpan.FromMilliseconds(GetTickCount64());
}
}
'@
function Wait-Condition {
param(
[scriptblock]$Condition,
[int]$DebounceSeconds=15
)
process {
$begin = [Windows]::GetUptime()
do {
Start-Sleep -Seconds 1
try {
$result = &$Condition
} catch {
$result = $false
}
if (-not $result) {
$begin = [Windows]::GetUptime()
continue
}
} while ((([Windows]::GetUptime()) - $begin).TotalSeconds -lt $DebounceSeconds)
}
}
$operationResultCodes = @{
0 = "NotStarted";
1 = "InProgress";
2 = "Succeeded";
3 = "SucceededWithErrors";
4 = "Failed";
5 = "Aborted"
}
function LookupOperationResultCode($code) {
if ($operationResultCodes.ContainsKey($code)) {
return $operationResultCodes[$code]
}
return "Unknown Code $code"
}
function ExitWhenRebootRequired($rebootRequired = $false) {
# check for pending Windows Updates.
if (!$rebootRequired) {
$systemInformation = New-Object -ComObject 'Microsoft.Update.SystemInfo'
$rebootRequired = $systemInformation.RebootRequired
}
# check for pending Windows Features.
if (!$rebootRequired) {
$pendingPackagesKey = 'HKLM:\SOFTWARE\Microsoft\Windows\CurrentVersion\Component Based Servicing\PackagesPending'
$pendingPackagesCount = (Get-ChildItem -ErrorAction SilentlyContinue $pendingPackagesKey | Measure-Object).Count
$rebootRequired = $pendingPackagesCount -gt 0
}
if ($rebootRequired) {
Write-Output 'Waiting for the Windows Modules Installer to exit...'
Wait-Condition {(Get-Process -ErrorAction SilentlyContinue TiWorker | Measure-Object).Count -eq 0}
ExitWithCode 101
}
}
ExitWhenRebootRequired
if ($OnlyCheckForRebootRequired) {
Write-Output "$env:COMPUTERNAME restarted."
ExitWithCode 0
}
$updateFilters = $Filters | ForEach-Object {
$action, $expression = $_ -split ':',2
New-Object PSObject -Property @{
Action = $action
Expression = [ScriptBlock]::Create($expression)
}
}
function Test-IncludeUpdate($filters, $update) {
foreach ($filter in $filters) {
if (Where-Object -InputObject $update $filter.Expression) {
return $filter.Action -eq 'include'
}
}
return $false
}
$windowsOsVersion = [System.Environment]::OSVersion.Version
Write-Output 'Searching for Windows updates...'
$updatesToDownloadSize = 0
$updatesToDownload = New-Object -ComObject 'Microsoft.Update.UpdateColl'
$updatesToInstall = New-Object -ComObject 'Microsoft.Update.UpdateColl'
while ($true) {
try {
$updateSession = New-Object -ComObject 'Microsoft.Update.Session'
$updateSession.ClientApplicationID = 'packer-windows-update'
$updateSearcher = $updateSession.CreateUpdateSearcher()
$searchResult = $updateSearcher.Search($SearchCriteria)
if ($searchResult.ResultCode -eq 2) {
break
}
$searchStatus = LookupOperationResultCode($searchResult.ResultCode)
} catch {
$searchStatus = $_.ToString()
}
Write-Output "Search for Windows updates failed with '$searchStatus'. Retrying..."
Start-Sleep -Seconds 5
}
$rebootRequired = $false
for ($i = 0; $i -lt $searchResult.Updates.Count; ++$i) {
$update = $searchResult.Updates.Item($i)
$updateDate = $update.LastDeploymentChangeTime.ToString('yyyy-MM-dd')
$updateSize = ($update.MaxDownloadSize/1024/1024).ToString('0.##')
$updateTitle = $update.Title
$updateSummary = "Windows update ($updateDate; $updateSize MB): $updateTitle"
if (!(Test-IncludeUpdate $updateFilters $update)) {
Write-Output "Skipped (filter) $updateSummary"
continue
}
if ($update.InstallationBehavior.CanRequestUserInput) {
Write-Output "Warning The update '$updateTitle' has the CanRequestUserInput flag set (if the install hangs, you might need to exclude it with the filter 'exclude:`$_.InstallationBehavior.CanRequestUserInput' or 'exclude:`$_.Title -like '*$updateTitle*'')"
}
Write-Output "Found $updateSummary"
$update.AcceptEula() | Out-Null
$updatesToDownloadSize += $update.MaxDownloadSize
$updatesToDownload.Add($update) | Out-Null
$updatesToInstall.Add($update) | Out-Null
if ($updatesToInstall.Count -ge $UpdateLimit) {
$rebootRequired = $true
break
}
}
if ($updatesToDownload.Count) {
$updateSize = ($updatesToDownloadSize/1024/1024).ToString('0.##')
Write-Output "Downloading Windows updates ($($updatesToDownload.Count) updates; $updateSize MB)..."
$updateDownloader = $updateSession.CreateUpdateDownloader()
# https://docs.microsoft.com/en-us/windows/desktop/api/winnt/ns-winnt-_osversioninfoexa#remarks
if (($windowsOsVersion.Major -eq 6 -and $windowsOsVersion.Minor -gt 1) -or ($windowsOsVersion.Major -gt 6)) {
$updateDownloader.Priority = 4 # 1 (dpLow), 2 (dpNormal), 3 (dpHigh), 4 (dpExtraHigh).
} else {
# For versions lower then 6.2 highest prioirty is 3
$updateDownloader.Priority = 3 # 1 (dpLow), 2 (dpNormal), 3 (dpHigh).
}
$updateDownloader.Updates = $updatesToDownload
while ($true) {
$downloadResult = $updateDownloader.Download()
if ($downloadResult.ResultCode -eq 2) {
break
}
if ($downloadResult.ResultCode -eq 3) {
Write-Output "Download Windows updates succeeded with errors. Will retry after the next reboot."
$rebootRequired = $true
break
}
$downloadStatus = LookupOperationResultCode($downloadResult.ResultCode)
Write-Output "Download Windows updates failed with $downloadStatus. Retrying..."
Start-Sleep -Seconds 5
}
}
if ($updatesToInstall.Count) {
Write-Output 'Installing Windows updates...'
$updateInstaller = $updateSession.CreateUpdateInstaller()
$updateInstaller.Updates = $updatesToInstall
$installRebootRequired = $false
try {
$installResult = $updateInstaller.Install()
$installRebootRequired = $installResult.RebootRequired
} catch {
Write-Warning "Windows update installation failed with error:"
Write-Warning $_.Exception.ToString()
# Windows update install failed for some reason
# restart the machine and try again
$rebootRequired = $true
}
ExitWhenRebootRequired ($installRebootRequired -or $rebootRequired)
} else {
ExitWhenRebootRequired $rebootRequired
Write-Output 'No Windows updates found'
}
| {
"pile_set_name": "Github"
} |
5e5dec561d0648da495d704c721ad1260e0a66fdea55e4d340f7ff46507904462360226784f8cda8c3794643c6e39cb56ff4d8af4459e57f23b00edec1bd77df
| {
"pile_set_name": "Github"
} |
# -*- coding: utf-8 -*-
import os
import re
import json
import urllib
from urlparse import urljoin
import scrapy
class Spider(scrapy.Spider):
name = "councilors"
allowed_domains = ["www.tccc.gov.tw"]
start_urls = ["http://www.tccc.gov.tw/main.asp?uno=16", ]
download_delay = 0.5
def __init__(self):
with open(os.path.join(os.path.dirname(__file__), '../../data/cand-moi-direct-control-2018.json'), 'r') as infile:
self.ref = {re.sub(u'[\s ]', '', person['idname']): person for person in json.loads(infile.read()) if person['cityname'] == u'臺中市'}
def parse(self, response):
for node in response.xpath(u'//a[re:test(@title, "第\S+選區")]'):
item = {'constituency': node.xpath('text()').extract_first().strip()}
yield scrapy.Request(urljoin(response.url, node.xpath('@href').extract_first()), callback=self.parse_iframe, meta={'item': item})
def parse_iframe(self, response):
url = urljoin(response.url, response.xpath('//iframe[@name="wb_main"]/@src').extract_first())
meta = response.request.meta
return scrapy.Request(url, callback=self.parse_constituency, meta=meta)
def parse_constituency(self, response):
item = response.request.meta['item']
item['district'] = response.xpath(u'normalize-space(string(//td[re:test(., "\[%s\]")]/following-sibling::td[1]))' % item['constituency']).extract_first()
for url in response.xpath(u'//a[contains(@href, "wb_intro")]/@href').extract():
yield scrapy.Request(urljoin(response.url, url), callback=self.parse_profile, meta={'item': item})
def parse_profile(self, response):
item = response.request.meta['item']
item['county'] = u'臺中市'
item['election_year'] = '2014'
item['term_start'] = '%s-12-25' % item['election_year']
item['term_end'] = {'date': "2018-12-25"}
item['in_office'] = True
item['links'] = [{'url': response.url, 'note': u'議會個人官網'}]
item['name'] = response.xpath(u'//td[re:test(., "%s")]/text()' % item['constituency']).extract()[-1].strip()
item['image'] = urljoin(response.url, response.xpath(u'//*[@id="Layer2"]/descendant::img[re:test(@src, "^Conn/")]/@src').extract_first())
item['gender'] = self.ref[item['name']]['sex']
item['party'] = self.ref[item['name']]['partymship']
item['title'] = self.ref[item['name']]['posiname']
for value in response.xpath(u'//td[re:test(., "社群網站")]/following-sibling::td[1]/descendant::a/@href').extract():
item['links'].append({
'note': u'社群網站',
'url': value
})
item['contact_details'] = []
contact_mappings = {
u'電話': 'voice',
u'傳真': 'fax',
u'地址': 'address'
}
for label, name in contact_mappings.items():
values = [x.strip() for x in response.xpath(u'//td[re:test(., "%s:")]/following-sibling::td[1]/text()' % '\s*'.join(label)).re(u'%s:\s*(.+)\s*' % label) if x.strip()]
for value in values:
item['contact_details'].append({
'label': label,
'type': name,
'value': value
})
for value in response.xpath(u'//td[re:test(., "Email")]/following-sibling::td[1]/descendant::a/text()').extract():
item['contact_details'].append({
'label': 'E-mail',
'type': 'email',
'value': value
})
item['education'] = [x.strip() for x in response.xpath(u'//*[re:test(., "^學\s*歷$")]/ancestor::tr[2]/following-sibling::tr[1]/descendant::p/text()').extract() if x.strip()]
item['experience'] = [x.strip() for x in response.xpath(u'//*[re:test(., "^經\s*歷$")]/ancestor::tr[2]/following-sibling::tr[1]/descendant::p/text()').extract() if x.strip()]
item['platform'] = [x.strip() for x in response.xpath(u'//*[re:test(., "^政\s*見$")]/ancestor::tr[2]/following-sibling::tr[1]/descendant::p/text()').extract() if x.strip()]
yield item
| {
"pile_set_name": "Github"
} |
RolePlugins=WindowsFabric
TargetFrameworkVersion=v4.0
| {
"pile_set_name": "Github"
} |
////////////////////////////////////////////////////////////////////////////////
// Test case file for checkstyle.
// Created: Feb-2001
// Ignore violation
////////////////////////////////////////////////////////////////////////////////
package com.puppycrawl.tools.checkstyle.checks.sizes.parameternumber;
import java.io.*;
/**
* Contains simple mistakes:
* - Long lines
* - Tabs
* - Format of variables and parameters
* - Order of modifiers
* @author Oliver Burn
**/
final class InputParameterNumberSimple
{
// Long line ----------------------------------------------------------------
// Contains a tab -> <-
// Contains trailing whitespace ->
// Name format tests
//
/** Invalid format **/
public static final int badConstant = 2;
/** Valid format **/
public static final int MAX_ROWS = 2;
/** Invalid format **/
private static int badStatic = 2;
/** Valid format **/
private static int sNumCreated = 0;
/** Invalid format **/
private int badMember = 2;
/** Valid format **/
private int mNumCreated1 = 0;
/** Valid format **/
protected int mNumCreated2 = 0;
/** commas are wrong **/
private int[] mInts = new int[] {1,2, 3,
4};
//
// Accessor tests
//
/** should be private **/
public static int sTest1;
/** should be private **/
protected static int sTest3;
/** should be private **/
static int sTest2;
/** should be private **/
int mTest1;
/** should be private **/
public int mTest2;
//
// Parameter name format tests
//
/**
* @return hack
* @param badFormat1 bad format
* @param badFormat2 bad format
* @param badFormat3 bad format
* @throws java.lang.Exception abc
**/
int test1(int badFormat1,int badFormat2,
final int badFormat3)
throws java.lang.Exception
{
return 0;
}
/** method that is 20 lines long **/
private void longMethod()
{
// a line
// a line
// a line
// a line
// a line
// a line
// a line
// a line
// a line
// a line
// a line
// a line
// a line
// a line
// a line
// a line
// a line
// a line
}
/** constructor that is 10 lines long **/
private InputParameterNumberSimple()
{
// a line
// a line
// a line
// a line
// a line
// a line
// a line
// a line
}
/** test local variables */
private void localVariables()
{
// normal decl
int abc = 0;
int ABC = 0;
// final decls
final int cde = 0;
final int CDE = 0;
// decl in for loop init statement
for (int k = 0; k < 1; k++)
{
String innerBlockVariable = "";
}
for (int I = 0; I < 1; I++)
{
String InnerBlockVariable = "";
}
}
/** test method pattern */
void ALL_UPPERCASE_METHOD()
{
}
/** test illegal constant **/
private static final int BAD__NAME = 3;
// A very, very long line that is OK because it matches the regexp "^.*is OK.*regexp.*$"
// long line that has a tab -> <- and would be OK if tab counted as 1 char
// tabs that count as one char because of their position -> <- -> <-, OK
/** some lines to test the violation column after tabs */
void errorColumnAfterTabs()
{
// with tab-width 8 all statements below start at the same column,
// with different combinations of ' ' and '\t' before the statement
int tab0 =1;
int tab1 =1;
int tab2 =1;
int tab3 =1;
int tab4 =1;
int tab5 =1;
}
// MEMME:
/* MEMME: a
* MEMME:
* OOOO
*/
/* NOTHING */
/* YES */ /* MEMME: x */ /* YES!! */
/** test long comments **/
void veryLong()
{
/*
blah blah blah blah
blah blah blah blah
blah blah blah blah
blah blah blah blah
blah blah blah blah
blah blah blah blah
blah blah blah blah
blah blah blah blah
blah blah blah blah
blah blah blah blah
blah blah blah blah
blah blah blah blah
blah blah blah blah
blah blah blah blah
blah blah blah blah
enough talk */
}
/**
* @see to lazy to document all args. Testing excessive # args
**/
void toManyArgs(int aArg1, int aArg2, int aArg3, int aArg4, int aArg5,
int aArg6, int aArg7, int aArg8, int aArg9)
{
}
}
/** Test class for variable naming in for each clauses. */
class InputSimple2
{
/** Some more Javadoc. */
public void doSomething()
{
//"O" should be named "o"
for (Object O : new java.util.ArrayList())
{
}
}
}
/** Test enum for member naming check */
enum MyEnum1
{
/** ABC constant */
ABC,
/** XYZ constant */
XYZ;
/** Should be mSomeMember */
private int someMember;
}
| {
"pile_set_name": "Github"
} |
/* eslint-disable */
// Do this as the first thing so that any code reading it knows the right env.
process.env.BABEL_ENV = 'production';
process.env.NODE_ENV = 'production';
// Makes the script crash on unhandled rejections instead of silently
// ignoring them. In the future, promise rejections that are not handled will
// terminate the Node.js process with a non-zero exit code.
process.on('unhandledRejection', err => {
throw err;
});
// Ensure environment variables are read.
require('../config/env');
const path = require('path');
const chalk = require('chalk');
const fs = require('fs-extra');
const webpack = require('webpack');
const config = require('../config/webpack.config.prod');
const paths = require('../config/paths');
const checkRequiredFiles = require('react-dev-utils/checkRequiredFiles');
const formatWebpackMessages = require('react-dev-utils/formatWebpackMessages');
const FileSizeReporter = require('react-dev-utils/FileSizeReporter');
const measureFileSizesBeforeBuild = FileSizeReporter.measureFileSizesBeforeBuild;
const printFileSizesAfterBuild = FileSizeReporter.printFileSizesAfterBuild;
const useYarn = fs.existsSync(paths.yarnLockFile);
// Warn and crash if required files are missing
if (!checkRequiredFiles([paths.appHtml, paths.appIndexJs])) {
process.exit(1);
}
// First, read the current file sizes in build directory.
// This lets us display how much they changed later.
measureFileSizesBeforeBuild(paths.appBuild)
.then(previousFileSizes => {
// Remove all content but keep the directory so that
// if you're in it, you don't end up in Trash
fs.emptyDirSync(paths.appBuild);
// Merge with the public folder
copyPublicFolder();
// Start the webpack build
return build(previousFileSizes);
})
.then(
({ stats, previousFileSizes, warnings }) => {
if (warnings.length) {
console.log(chalk.yellow('Compiled with warnings.\n'));
console.log(warnings.join('\n\n'));
console.log(
`\nSearch for the ${chalk.underline(
chalk.yellow('keywords')
)} to learn more about each warning.`
);
} else {
console.log(chalk.green('Compiled successfully.\n'));
}
console.log('File sizes after gzip:\n');
printFileSizesAfterBuild(stats, previousFileSizes, paths.appBuild);
},
err => {
console.log(chalk.red('Failed to compile.\n'));
console.log(`${err.message || err}\n`);
process.exit(1);
}
);
// Create the production build and print the deployment instructions.
function build(previousFileSizes) {
console.log('Creating an optimized production build...');
let compiler = webpack(config);
return new Promise((resolve, reject) => {
compiler.run((err, stats) => {
if (err) {
return reject(err);
}
const messages = formatWebpackMessages(stats.toJson({}, true));
if (messages.errors.length) {
return reject(new Error(messages.errors.join('\n\n')));
}
if (process.env.CI && messages.warnings.length) {
console.log(
chalk.yellow(
'\nTreating warnings as errors because process.env.CI = true.\n' +
'Most CI servers set it automatically.\n'
)
);
return reject(new Error(messages.warnings.join('\n\n')));
}
return resolve({
stats,
previousFileSizes,
warnings: messages.warnings
});
});
});
}
function copyPublicFolder() {
fs.copySync(paths.appPublic, paths.appBuild, {
dereference: true,
filter: file => file !== paths.appHtml
});
}
| {
"pile_set_name": "Github"
} |
<?php
/**
* Zend Framework
*
* LICENSE
*
* This source file is subject to the new BSD license that is bundled
* with this package in the file LICENSE.txt.
* It is also available through the world-wide-web at this URL:
* http://framework.zend.com/license/new-bsd
* If you did not receive a copy of the license and are unable to
* obtain it through the world-wide-web, please send an email
* to [email protected] so we can send you a copy immediately.
*
* @category Zend
* @package Zend_Paginator
* @copyright Copyright (c) 2005-2011 Zend Technologies USA Inc. (http://www.zend.com)
* @license http://framework.zend.com/license/new-bsd New BSD License
* @version $Id: DbSelect.php 23855 2011-04-10 19:03:02Z ramon $
*/
/**
* @see Zend_Paginator_Adapter_Interface
*/
require_once 'Zend/Paginator/Adapter/Interface.php';
/**
* @see Zend_Db
*/
require_once 'Zend/Db.php';
/**
* @see Zend_Db_Select
*/
require_once 'Zend/Db/Select.php';
/**
* @category Zend
* @package Zend_Paginator
* @copyright Copyright (c) 2005-2011 Zend Technologies USA Inc. (http://www.zend.com)
* @license http://framework.zend.com/license/new-bsd New BSD License
*/
class Zend_Paginator_Adapter_DbSelect implements Zend_Paginator_Adapter_Interface
{
/**
* Name of the row count column
*
* @var string
*/
const ROW_COUNT_COLUMN = 'zend_paginator_row_count';
/**
* The COUNT query
*
* @var Zend_Db_Select
*/
protected $_countSelect = null;
/**
* Database query
*
* @var Zend_Db_Select
*/
protected $_select = null;
/**
* Total item count
*
* @var integer
*/
protected $_rowCount = null;
/**
* Constructor.
*
* @param Zend_Db_Select $select The select query
*/
public function __construct(Zend_Db_Select $select)
{
$this->_select = $select;
}
/**
* Sets the total row count, either directly or through a supplied
* query. Without setting this, {@link getPages()} selects the count
* as a subquery (SELECT COUNT ... FROM (SELECT ...)). While this
* yields an accurate count even with queries containing clauses like
* LIMIT, it can be slow in some circumstances. For example, in MySQL,
* subqueries are generally slow when using the InnoDB storage engine.
* Users are therefore encouraged to profile their queries to find
* the solution that best meets their needs.
*
* @param Zend_Db_Select|integer $totalRowCount Total row count integer
* or query
* @return Zend_Paginator_Adapter_DbSelect $this
* @throws Zend_Paginator_Exception
*/
public function setRowCount($rowCount)
{
if ($rowCount instanceof Zend_Db_Select) {
$columns = $rowCount->getPart(Zend_Db_Select::COLUMNS);
$countColumnPart = empty($columns[0][2])
? $columns[0][1]
: $columns[0][2];
if ($countColumnPart instanceof Zend_Db_Expr) {
$countColumnPart = $countColumnPart->__toString();
}
$rowCountColumn = $this->_select->getAdapter()->foldCase(self::ROW_COUNT_COLUMN);
// The select query can contain only one column, which should be the row count column
if (false === strpos($countColumnPart, $rowCountColumn)) {
/**
* @see Zend_Paginator_Exception
*/
require_once 'Zend/Paginator/Exception.php';
throw new Zend_Paginator_Exception('Row count column not found');
}
$result = $rowCount->query(Zend_Db::FETCH_ASSOC)->fetch();
$this->_rowCount = count($result) > 0 ? $result[$rowCountColumn] : 0;
} else if (is_integer($rowCount)) {
$this->_rowCount = $rowCount;
} else {
/**
* @see Zend_Paginator_Exception
*/
require_once 'Zend/Paginator/Exception.php';
throw new Zend_Paginator_Exception('Invalid row count');
}
return $this;
}
/**
* Returns an array of items for a page.
*
* @param integer $offset Page offset
* @param integer $itemCountPerPage Number of items per page
* @return array
*/
public function getItems($offset, $itemCountPerPage)
{
$this->_select->limit($itemCountPerPage, $offset);
return $this->_select->query()->fetchAll();
}
/**
* Returns the total number of rows in the result set.
*
* @return integer
*/
public function count()
{
if ($this->_rowCount === null) {
$this->setRowCount(
$this->getCountSelect()
);
}
return $this->_rowCount;
}
/**
* Get the COUNT select object for the provided query
*
* TODO: Have a look at queries that have both GROUP BY and DISTINCT specified.
* In that use-case I'm expecting problems when either GROUP BY or DISTINCT
* has one column.
*
* @return Zend_Db_Select
*/
public function getCountSelect()
{
/**
* We only need to generate a COUNT query once. It will not change for
* this instance.
*/
if ($this->_countSelect !== null) {
return $this->_countSelect;
}
$rowCount = clone $this->_select;
$rowCount->__toString(); // Workaround for ZF-3719 and related
$db = $rowCount->getAdapter();
$countColumn = $db->quoteIdentifier($db->foldCase(self::ROW_COUNT_COLUMN));
$countPart = 'COUNT(1) AS ';
$groupPart = null;
$unionParts = $rowCount->getPart(Zend_Db_Select::UNION);
/**
* If we're dealing with a UNION query, execute the UNION as a subquery
* to the COUNT query.
*/
if (!empty($unionParts)) {
$expression = new Zend_Db_Expr($countPart . $countColumn);
$rowCount = $db
->select()
->bind($rowCount->getBind())
->from($rowCount, $expression);
} else {
$columnParts = $rowCount->getPart(Zend_Db_Select::COLUMNS);
$groupParts = $rowCount->getPart(Zend_Db_Select::GROUP);
$havingParts = $rowCount->getPart(Zend_Db_Select::HAVING);
$isDistinct = $rowCount->getPart(Zend_Db_Select::DISTINCT);
/**
* If there is more than one column AND it's a DISTINCT query, more
* than one group, or if the query has a HAVING clause, then take
* the original query and use it as a subquery os the COUNT query.
*/
if (($isDistinct && count($columnParts) > 1) || count($groupParts) > 1 || !empty($havingParts)) {
$rowCount->reset(Zend_Db_Select::ORDER);
$rowCount = $db
->select()
->bind($rowCount->getBind())
->from($rowCount);
} else if ($isDistinct) {
$part = $columnParts[0];
if ($part[1] !== Zend_Db_Select::SQL_WILDCARD && !($part[1] instanceof Zend_Db_Expr)) {
$column = $db->quoteIdentifier($part[1], true);
if (!empty($part[0])) {
$column = $db->quoteIdentifier($part[0], true) . '.' . $column;
}
$groupPart = $column;
}
} else if (!empty($groupParts)) {
$groupPart = $db->quoteIdentifier($groupParts[0], true);
}
/**
* If the original query had a GROUP BY or a DISTINCT part and only
* one column was specified, create a COUNT(DISTINCT ) query instead
* of a regular COUNT query.
*/
if (!empty($groupPart)) {
$countPart = 'COUNT(DISTINCT ' . $groupPart . ') AS ';
}
/**
* Create the COUNT part of the query
*/
$expression = new Zend_Db_Expr($countPart . $countColumn);
$rowCount->reset(Zend_Db_Select::COLUMNS)
->reset(Zend_Db_Select::ORDER)
->reset(Zend_Db_Select::LIMIT_OFFSET)
->reset(Zend_Db_Select::GROUP)
->reset(Zend_Db_Select::DISTINCT)
->reset(Zend_Db_Select::HAVING)
->columns($expression);
}
$this->_countSelect = $rowCount;
return $rowCount;
}
}
| {
"pile_set_name": "Github"
} |
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows
package socket_test
import (
"net"
"runtime"
"syscall"
"testing"
"golang.org/x/net/internal/nettest"
"golang.org/x/net/internal/socket"
)
func TestSocket(t *testing.T) {
t.Run("Option", func(t *testing.T) {
testSocketOption(t, &socket.Option{Level: syscall.SOL_SOCKET, Name: syscall.SO_RCVBUF, Len: 4})
})
}
func testSocketOption(t *testing.T, so *socket.Option) {
c, err := nettest.NewLocalPacketListener("udp")
if err != nil {
t.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err)
}
defer c.Close()
cc, err := socket.NewConn(c.(net.Conn))
if err != nil {
t.Fatal(err)
}
const N = 2048
if err := so.SetInt(cc, N); err != nil {
t.Fatal(err)
}
n, err := so.GetInt(cc)
if err != nil {
t.Fatal(err)
}
if n < N {
t.Fatalf("got %d; want greater than or equal to %d", n, N)
}
}
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="UTF-8"?>
<Workspace
version = "1.0">
<FileRef
location = "self:/Volumes/Workspace/Zoom/Code/B_35_7196/Client/src/application/ios/zoom-ios-mobilertc/MobileRTCSample/MobileRTCSample.xcodeproj">
</FileRef>
</Workspace>
| {
"pile_set_name": "Github"
} |
// Copyright (c) 2006-2008 Max-Planck-Institute Saarbruecken (Germany).
// All rights reserved.
//
// This file is part of CGAL (www.cgal.org); you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public License as
// published by the Free Software Foundation; either version 3 of the License,
// or (at your option) any later version.
//
// Licensees holding a valid commercial license may use this file in
// accordance with the commercial license agreement provided with the software.
//
// This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
// WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
//
// $URL$
// $Id$
//
// Author(s) : Michael Hemmer
/*! \file CGAL/Residue.h
\brief Defines the class CGAL::Residue and CGAL::Modular_traits.
Provides the \c CGAL::Modular_traits specialization for the build in number
types.
*/
#ifndef CGAL_RESIDUE_H
#define CGAL_RESIDUE_H 1
#include <CGAL/basic.h>
#include <CGAL/Modular_arithmetic/Residue_type.h>
#include <CGAL/Coercion_traits.h>
namespace CGAL {
/*! \brief Specialization of CGAL::NT_traits for \c Residue, which is a model
* of the \c Field concept.
* \ingroup CGAL_NT_traits_spec
*/
template <>
class Algebraic_structure_traits<Residue>
: public Algebraic_structure_traits_base< Residue ,Field_tag >{
public:
typedef CGAL::Tag_true Is_exact;
};
CGAL_DEFINE_COERCION_TRAITS_FROM_TO(short,CGAL::Residue)
CGAL_DEFINE_COERCION_TRAITS_FROM_TO(int ,CGAL::Residue)
CGAL_DEFINE_COERCION_TRAITS_FROM_TO(long ,CGAL::Residue)
} //namespace CGAL
#endif //#ifnedef CGAL_RESIDUE_H 1
| {
"pile_set_name": "Github"
} |
/**
* Copyright (c) 2011-2020 libbitcoin developers (see AUTHORS)
*
* This file is part of metaverse.
*
* metaverse is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License with
* additional permissions to the one published by the Free Software
* Foundation, either version 3 of the License, or (at your option)
* any later version. For more information see LICENSE.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <metaverse/network/sessions/session_batch.hpp>
#include <atomic>
#include <cstdint>
#include <memory>
#include <metaverse/bitcoin.hpp>
#include <metaverse/network/connector.hpp>
#include <metaverse/network/p2p.hpp>
namespace libbitcoin {
namespace network {
#define CLASS session_batch
#define NAME "session_batch"
using namespace bc::config;
using namespace std::placeholders;
session_batch::session_batch(p2p& network, bool persistent)
: session(network, true, persistent),
batch_size_(std::max(settings_.connect_batch_size, 1u))
{
}
void session_batch::converge(const code& ec, channel::ptr channel,
atomic_counter_ptr counter, upgrade_mutex_ptr mutex,
channel_handler handler)
{
///////////////////////////////////////////////////////////////////////////
// Critical Section
mutex->lock_upgrade();
const auto initial_count = counter->load();
BITCOIN_ASSERT(initial_count <= batch_size_);
// Already completed, don't call handler.
if (initial_count == batch_size_)
{
mutex->unlock_upgrade();
//-----------------------------------------------------------------
if (!ec)
channel->stop(error::channel_stopped);
return;
}
const auto count = !ec ? batch_size_ : initial_count + 1;
const auto cleared = count == batch_size_;
//+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
mutex->unlock_upgrade_and_lock();
counter->store(count);
mutex->unlock();
///////////////////////////////////////////////////////////////////////
if (cleared)
{
// If the last connection attempt is an error, normalize the code.
auto result = ec ? error::operation_failed : error::success;
handler(result, channel);
}
}
// Connect sequence.
// ----------------------------------------------------------------------------
// protected:
void session_batch::connect_seed(connector::ptr connect, channel_handler handler)
{
// synchronizer state.
const auto mutex = std::make_shared<upgrade_mutex>();
const auto counter = std::make_shared<atomic_counter>(0);
const auto singular = BIND5(converge, _1, _2, counter, mutex, handler);
for (uint32_t host = 0; host < batch_size_; ++host) {
new_connect(connect, counter, singular, true);
}
}
// protected:
void session_batch::connect(connector::ptr connect, channel_handler handler)
{
// synchronizer state.
const auto mutex = std::make_shared<upgrade_mutex>();
const auto counter = std::make_shared<atomic_counter>(0);
const auto singular = BIND5(converge, _1, _2, counter, mutex, handler);
for (uint32_t host = 0; host < batch_size_; ++host)
new_connect(connect, counter, singular, false);
}
void session_batch::new_connect(connector::ptr connect,
atomic_counter_ptr counter, channel_handler handler, bool only_seed)
{
if (stopped())
{
log::debug(LOG_NETWORK)
<< "Suspended batch connection.";
return;
}
if (counter->load() == batch_size_)
return;
if (only_seed) {
fetch_seed_address(BIND5(start_connect, _1, _2, connect, counter, handler));
}
else {
fetch_address(BIND5(start_connect, _1, _2, connect, counter, handler));
}
}
void session_batch::start_connect(const code& ec, const authority& host,
connector::ptr connect, atomic_counter_ptr counter, channel_handler handler)
{
if (stopped(ec))
return;
if (counter->load() == batch_size_)
return;
// This termination prevents a tight loop in the empty address pool case.
if (ec)
{
if (ec.value() != error::not_found) {
log::warning(LOG_NETWORK)
<< "Failure fetching new address: " << ec.message();
}
handler(ec, nullptr);
return;
}
// This creates a tight loop in the case of a small address pool.
if (blacklisted(host))
{
handler(error::address_blocked, nullptr);
return;
}
log::trace(LOG_NETWORK)
<< "Connecting to [" << host << "]";
// CONNECT
connect->connect(host, BIND7(handle_connect, _1, _2, host, connect,
counter, 3, handler));
}
void session_batch::handle_connect(const code& ec, channel::ptr channel,
const authority& host, connector::ptr connect, atomic_counter_ptr counter, std::size_t count,
channel_handler handler)
{
if (counter->load() == batch_size_)
return;
if (ec)
{
log::trace(LOG_NETWORK)
<< "Failure connecting to [" << host << "] " << count << ","
<< ec.message();
if (ec.value() == error::channel_timeout) // if connect is not aviliable, change it into inactive state
remove(host.to_network_address(), [](const code&){});
handler(ec, channel);
return;
}
store(host.to_network_address());
log::trace(LOG_NETWORK)
<< "Connected to [" << host << "]";
// This is the end of the connect sequence.
handler(error::success, channel);
}
} // namespace network
} // namespace libbitcoin
| {
"pile_set_name": "Github"
} |
tinyMCEPopup.requireLangPack();
var MergeCellsDialog = {
init : function() {
var f = document.forms[0];
f.numcols.value = tinyMCEPopup.getWindowArg('cols', 1);
f.numrows.value = tinyMCEPopup.getWindowArg('rows', 1);
},
merge : function() {
var func, f = document.forms[0];
tinyMCEPopup.restoreSelection();
func = tinyMCEPopup.getWindowArg('onaction');
func({
cols : f.numcols.value,
rows : f.numrows.value
});
tinyMCEPopup.close();
}
};
tinyMCEPopup.onInit.add(MergeCellsDialog.init, MergeCellsDialog);
| {
"pile_set_name": "Github"
} |
<?php
defined('API') or exit('http://gwalker.cn');
if(!is_supper()){die('只有超级管理员才可进行导出操作');}
define('BASEURL',baseUrl());
//接口分类id
$tag = I($_GET['tag']);
//下载的文件名
$filename = find("select cname from cate where aid='{$tag}'");
$version = date('YmdHis');
$filename = $filename['cname'].$version.'.html';
//要抓取的接口分类url
$url = BASEURL.U(array('act'=>'api','tag'=>$tag));
// 如果file_get_contents函数不能用就用curl方式获取
function file_get_contents_fixed($url)
{
switch (true) {
case function_exists('file_get_contents'):
$res = file_get_contents($url);
break;
case function_exists('curl_init'):
$ch = curl_init();
$timeout = 10; // set to zero for no timeout
curl_setopt ($ch, CURLOPT_URL,$url);
curl_setopt ($ch, CURLOPT_RETURNTRANSFER, 1);
curl_setopt ($ch, CURLOPT_CONNECTTIMEOUT, $timeout);
$res = curl_exec($ch);
break;
default :
exit('导出不可用,请确保可用file_get_contents函数或CURL扩展,');
}
return $res;
}
//分类详情页的内容
$content = file_get_contents_fixed($url);
//========js与css静态文件替换start=======================================
//css文件替换--start
$pattern = '/<link href="(.+?\.css)" rel="stylesheet">/is';
function getCssFileContent($matches){
$filepath = BASEURL.ltrim($matches[1],'./');
$content = file_get_contents_fixed($filepath);
return "<style>".$content."</style>";
}
$content = preg_replace_callback($pattern,'getCssFileContent',$content);
//css文件替换--end
//js文件替换--start
$pattern = '/<script src="(.+?\.js)"><\/script>/is';
function getJSFileContent($matches){
$filepath = BASEURL.ltrim($matches[1],'./');
$content = file_get_contents_fixed($filepath);
return "<script>".$content."</script>";
}
$content = preg_replace_callback($pattern,'getJSFileContent',$content);
//js文件替换--end
//========js与css静态文件替换end=======================================
//=======页面锚点连接替换start=========================================
$pattern = '/href=".+?tag=\d#(\w+)"/i';
function changeLink($matches){
return "href=#{$matches[1]}";
}
$content = preg_replace_callback($pattern,'changeLink',$content);
//=======页面锚点连接替换end=========================================
$tag = C('version->no');
$headhtml=<<<START
<!--
=======================================================================
导出时间:{$version}
=======================================================================
此文档由API Manager {$tag} 导出
=======================================================================
github : https://github.com/gongwalker/ApiManager.git
=======================================================================
作者 : 路人庚
=======================================================================
QQ : 309581329
=======================================================================
-->
START;
$appendhtml=<<<END
<script>
$('.glyphicon').remove();
$('#topbutton').html('版本号:{$version}');
$('.home').attr('href','#');
</script>
END;
$content=$headhtml.$content.$appendhtml;
download($filename,$content);
exit;
| {
"pile_set_name": "Github"
} |
import numpy as np
import os
import pytest
import unittest
from geopyspark.geotrellis import SpatialKey, Extent, Tile
from geopyspark.geotrellis.layer import TiledRasterLayer
from geopyspark.tests.base_test_class import BaseTestClass
from geopyspark.geotrellis.constants import LayerType, Operation
class AggregateCellsTest(BaseTestClass):
first = np.array([[
[1.0, 2.0, 3.0, 4.0, 5.0],
[1.0, 2.0, 3.0, 4.0, 5.0],
[1.0, 2.0, 3.0, 4.0, 5.0],
[1.0, 2.0, 3.0, 4.0, 5.0],
[1.0, 2.0, 3.0, 4.0, 5.0]]])
second = np.array([[
[1.0, 1.0, 1.0, 1.0, 1.0],
[2.0, 2.0, 2.0, 2.0, 2.0],
[3.0, 3.0, 3.0, 3.0, 3.0],
[4.0, 4.0, 4.0, 4.0, 4.0],
[5.0, 5.0, 5.0, 5.0, 5.0]]])
cells_1 = np.array([first, second])
cells_2 = np.array([second, first])
tile_1 = Tile.from_numpy_array(cells_1, -1.0)
tile_2 = Tile.from_numpy_array(cells_2, -1.0)
layer = [(SpatialKey(0, 0), tile_1),
(SpatialKey(1, 0), tile_1),
(SpatialKey(1, 0), tile_2),
(SpatialKey(0, 1), tile_1),
(SpatialKey(1, 1), tile_1)]
rdd = BaseTestClass.pysc.parallelize(layer)
extent = {'xmin': 0.0, 'ymin': 0.0, 'xmax': 33.0, 'ymax': 33.0}
layout = {'layoutCols': 2, 'layoutRows': 2, 'tileCols': 5, 'tileRows': 5}
metadata = {'cellType': 'float32ud-1.0',
'extent': extent,
'crs': '+proj=longlat +datum=WGS84 +no_defs ',
'bounds': {
'minKey': {'col': 0, 'row': 0},
'maxKey': {'col': 1, 'row': 1}},
'layoutDefinition': {
'extent': extent,
'tileLayout': {'tileCols': 5, 'tileRows': 5, 'layoutCols': 2, 'layoutRows': 2}}}
raster_rdd = TiledRasterLayer.from_numpy_rdd(LayerType.SPATIAL, rdd, metadata)
@pytest.fixture(autouse=True)
def tearDown(self):
yield
BaseTestClass.pysc._gateway.close()
def test_aggregate_sum(self):
result = self.raster_rdd.aggregate_by_cell(operation=Operation.SUM)
expected = np.array([self.first + self.second, self.first + self.second])
self.assertTrue((result.lookup(1, 0)[0].cells == expected).all())
self.assertTrue((result.lookup(0, 0)[0].cells[0] == self.first).all())
self.assertTrue((result.lookup(0, 0)[0].cells[1] == self.second).all())
def test_aggregate_min(self):
result = self.raster_rdd.aggregate_by_cell(operation=Operation.MIN)
band = np.array([[
[1, 1, 1, 1, 1],
[1, 2, 2, 2, 2],
[1, 2, 3, 3, 3],
[1, 2, 3, 4, 4],
[1, 2, 3, 4, 5]]])
expected = np.array([band, band])
self.assertTrue((result.lookup(1, 0)[0].cells == expected).all())
self.assertTrue((result.lookup(0, 0)[0].cells[0] == self.first).all())
self.assertTrue((result.lookup(0, 0)[0].cells[1] == self.second).all())
def test_aggregate_max(self):
result = self.raster_rdd.aggregate_by_cell(operation=Operation.MAX)
band = np.array([[
[1, 2, 3, 4, 5],
[2, 2, 3, 4, 5],
[3, 3, 3, 4, 5],
[4, 4, 4, 4, 5],
[5, 5, 5, 5, 5]]])
expected = np.array([band, band])
self.assertTrue((result.lookup(1, 0)[0].cells == expected).all())
self.assertTrue((result.lookup(0, 0)[0].cells[0] == self.first).all())
self.assertTrue((result.lookup(0, 0)[0].cells[1] == self.second).all())
def test_aggregate_mean(self):
result = self.raster_rdd.aggregate_by_cell(Operation.MEAN)
band = np.array([[
[1, 1.5, 2, 2.5, 3],
[1.5, 2, 2.5, 3, 3.5],
[2, 2.5, 3, 3.5, 4],
[2.5, 3, 3.5, 4, 4.5],
[3, 3.5, 4, 4.5, 5]]])
expected = np.array([band, band])
self.assertTrue((result.lookup(1, 0)[0].cells == expected).all())
self.assertTrue((result.lookup(0, 0)[0].cells[0] == self.first).all())
self.assertTrue((result.lookup(0, 0)[0].cells[1] == self.second).all())
def test_aggregate_variance(self):
result = self.raster_rdd.aggregate_by_cell(Operation.VARIANCE)
band = np.array([[
[1, 1.5, 2, 2.5, 3],
[1.5, 2, 2.5, 3, 3.5],
[2, 2.5, 3, 3.5, 4],
[2.5, 3, 3.5, 4, 4.5],
[3, 3.5, 4, 4.5, 5]]])
expected = np.array([
((self.first - band) ** 2) + ((self.second - band) ** 2),
((self.first - band) ** 2) + ((self.second - band) ** 2)
])
expected_2 = np.full((5, 5), -1.0)
self.assertTrue((result.lookup(1, 0)[0].cells == expected).all())
self.assertTrue((result.lookup(0, 0)[0].cells == expected_2).all())
def test_aggregate_std(self):
result = self.raster_rdd.aggregate_by_cell(Operation.STANDARD_DEVIATION)
band = np.array([[
[1, 1.5, 2, 2.5, 3],
[1.5, 2, 2.5, 3, 3.5],
[2, 2.5, 3, 3.5, 4],
[2.5, 3, 3.5, 4, 4.5],
[3, 3.5, 4, 4.5, 5]]])
expected = np.array([
(((self.first - band) ** 2) + ((self.second - band) ** 2)) ** (1/2),
(((self.first - band) ** 2) + ((self.second - band) ** 2)) ** (1/2)
])
expected_2 = np.full((5, 5), -1.0)
self.assertTrue((result.lookup(1, 0)[0].cells == expected).all())
self.assertTrue((result.lookup(0, 0)[0].cells == expected_2).all())
if __name__ == "__main__":
unittest.main()
BaseTestClass.pysc.stop()
| {
"pile_set_name": "Github"
} |
# Croatian translation for pdfsam
# Copyright (c) 2015 Rosetta Contributors and Canonical Ltd 2015
# This file is distributed under the same license as the pdfsam package.
# FIRST AUTHOR <EMAIL@ADDRESS>, 2015.
#
msgid ""
msgstr ""
"Project-Id-Version: pdfsam\n"
"Report-Msgid-Bugs-To: FULL NAME <EMAIL@ADDRESS>\n"
"POT-Creation-Date: 2020-01-29 14:48+0100\n"
"PO-Revision-Date: 2020-05-26 15:05+0000\n"
"Last-Translator: Milo Ivir <Unknown>\n"
"Language-Team: Croatian <[email protected]>\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"X-Launchpad-Export-Date: 2020-07-20 08:36+0000\n"
"X-Generator: Launchpad (build 4809fcb62f445aaa3ae919f7f6c3cc7d156ea57a)\n"
"Language: hr\n"
msgid "No PDF document has been selected"
msgstr "Nije odabran nijedan PDF dokument"
msgid "Extract"
msgstr "Izdvoji"
msgid "Extract pages from PDF documents."
msgstr "Izdvoji stranice iz PDF dokumenta."
msgid "File names settings"
msgstr "Postavke imena datoteke"
msgid "Extract settings"
msgstr "Postavke izdvajanja"
msgid "Output settings"
msgstr "Postavke rezultata"
msgid "Pages to extract (ex: 2 or 5-23 or 2,5-7,12-)"
msgstr "Stranice koje želiš izdvojiti (npr. 2 ili 5-23 ili 2,5-7,12-)"
msgid "Invalid page ranges"
msgstr "Neispravni rasponi stranica"
msgid "Extract pages:"
msgstr "Izdvoji stranice:"
#, java-format
msgid "Installing theme {0}."
msgstr "Instaliranje teme {0}."
#, java-format
msgid "Started in {0}"
msgstr "Započeto u {0}"
msgid "Closing PDFsam..."
msgstr "Zatvaranje PDFsam-a …"
msgid "Log register"
msgstr "Log zapisnik"
msgid "Copy"
msgstr "Kopiraj"
msgid "Clear"
msgstr "Obriši"
msgid "Select all"
msgstr "Odaberi sve"
msgid "Save log"
msgstr "Spremi zapisnik"
msgid "Select where to save the log file"
msgstr "Odaberi mjesto za spremanje datoteke log zapisnika"
msgid "About"
msgstr "O programu"
msgid "Premium features"
msgstr "Premium funkcije"
msgid "Close"
msgstr "Zatvori"
msgid "Modules"
msgstr "Moduli"
msgid "Subscribe to the official news feed"
msgstr "Pretplati se na novosti"
msgid "Environment"
msgstr "Okruženje"
msgid "Vendor: %s"
msgstr "Isporučitelj: %s"
msgid "Java runtime path: %s"
msgstr "Staza za Java izvođenje: %s"
msgid "JavaFX runtime version %s"
msgstr "Verzija JavaFX izvođenja %s"
#, java-format
msgid "Max memory {0}"
msgstr "Maksimalna memorija {0}"
msgid "Copy to clipboard"
msgstr "Kopiraj u međuspremnik"
msgid "Support"
msgstr "Podrška"
msgid "Bug and feature requests"
msgstr "Prijava grešaka i predlaganje novih funkcija"
msgid "Documentation"
msgstr "Dokumentacija"
msgid "Contribute"
msgstr "Doprinesi"
msgid "Fork PDFsam on GitHub"
msgstr "Prati PDFsam na GitHub-u"
msgid "Translate"
msgstr "Prevedi"
msgid "Donate"
msgstr "Doniraj"
msgid "Social"
msgstr "Društvene mreže"
msgid "Follow us on Twitter"
msgstr "Prati nas na Twitter-u"
msgid "Like us on Facebook"
msgstr "Lajkaj nas na Facebook-u"
msgid "Log register rows:"
msgstr "Broj redaka u log zapisniku:"
msgid "Maximum number of rows displayed by the Log register"
msgstr "Maksimalan broj prikazanih redaka u log zapisniku"
msgid "Language:"
msgstr "Jezik:"
msgid "Set your preferred language (restart needed)"
msgstr "Postavi preferirani jezik (zahtijeva ponovno pokretanje programa)"
msgid "Startup module:"
msgstr "Početni modul:"
msgid "Set the module to open at application startup (restart needed)"
msgstr ""
"Odredi modul koji se otvara prilikom pokretanja programa (ponovo pokreni "
"program)"
msgid ""
"Usage statistics are used to populate the modules quick bar on the left with "
"the most used and most recently used modules."
msgstr ""
"Statistika korištenja se koristi za popunjavanje trake za brzi izbor modula "
"na lijevoj strani s najčešće korištenim i zadnje korištenim modulima."
msgid "Manually selected"
msgstr "Ručni odabir"
msgid ""
"Automatically set the destination directory to the selected PDF document "
"directory"
msgstr "Automatski postavi odredišnu mapu na mapu odabranog PDF dokumenta"
msgid "Clear usage statistics"
msgstr "Izbriši statistiku korištenja"
msgid "Appearance"
msgstr "Izgled"
msgid "Behavior"
msgstr "Ponašanje"
msgid "Workspace"
msgstr "Radno okruženje"
msgid "Output"
msgstr "Rezultat"
msgid "Check for updates now"
msgstr "Provjeri nadopune sada"
msgid ""
"Select a directory where documents will be saved and loaded by default"
msgstr "Odaberi mapu gdje će se dokumenti standardno spremati i učitati"
msgid "Select a directory"
msgstr "Odaberi mapu"
msgid ""
"Select a previously saved workspace that will be automatically loaded at "
"startup"
msgstr ""
"Odaberi prethodno spremljeno radno okruženje koje će se automatski učitati "
"prilikom pokretanja programa"
msgid "Select a workspace"
msgstr "Odaberi radno okruženje"
msgid "Default working directory:"
msgstr "Standardna radna mapa:"
msgid "Load default workspace at startup:"
msgstr "Učitaj standardno radno okruženje prilikom pokretanja programa:"
msgid "Dashboard"
msgstr "Kontrolna ploča"
msgid "Check for updates at startup"
msgstr "Provjeri nadopune prilikom pokretanja programa"
msgid ""
"Set whether new version availability should be checked on startup (restart "
"needed)"
msgstr ""
"Odredi, treba li provjeriti dostupnost nove verzije prilikom pokretanja "
"programa (ponovo pokreni program)"
msgid "Check for news at startup"
msgstr "Provjeri novosti prilikom pokretanja programa"
msgid ""
"Set whether the application should check for news availability on startup "
"(restart needed)"
msgstr ""
"Odredi, treba li program provjeriti novosti prilikom pokretanja (ponovo "
"pokreni program)"
msgid "Enabled PDF compression"
msgstr "Aktiviraj PDF kompresiju"
msgid "Set whether \"Compress output file\" should be enabled by default"
msgstr ""
"Odredi, treba li „Komprimiraj datoteke rezultata” biti standardno aktivirano"
msgid "Play alert sounds"
msgstr "Koristi zvukove upozorenja"
msgid "Turn on or off alert sounds"
msgstr "Uključi ili isključi zvukove upozorenja"
msgid "Store passwords when saving a workspace file"
msgstr "Spremi lozinku prilikom spremanja datoteke radnog okruženja"
msgid ""
"If an encrypted PDF document has been opened with a password, save the "
"password in the workspace file"
msgstr ""
"Ako je šifrirani PDF dokument otvoren uz pomoć lozinke, spremi lozinku u "
"datoteku radnog okruženja"
msgid "Show donation window"
msgstr "Prikaži prozor za donacije"
msgid ""
"Turn on or off the notification appearing once in a while and asking the "
"user to support PDFsam with a donation"
msgstr ""
"Uključi ili isključi obavijesti koje se povremeno pojavljuju u kojima se "
"moli korisnika da podrži razvoj PDFsama putem donacije"
msgid "Show premium features"
msgstr "Prikaži premium funkcije"
msgid ""
"Set whether the application should fetch and show premium features "
"description in the modules dashboard"
msgstr ""
"Odredi, treba li program dovatiti i prikazati opis premium funkcija u ploči "
"modula"
msgid "Use the selected PDF document directory as output directory"
msgstr "Koristi mapu odabrane PDF datoteke kao mapu rezultata"
msgid "Save default workspace on exit"
msgstr "Spremi standardno radno okruženje prilikom zatvaranja programa"
msgid "If a default workspace is set, save it on application exit"
msgstr ""
"Ako je standardno radno okruženje postavljeno, spremi ga prilikom zatvaranja "
"programa"
msgid "Maximum number of rows mast be a positive number"
msgstr "Maksimalni broj redaka mora biti pozitivan broj"
msgid "Generate high quality thumbnails (slower)"
msgstr "Izradi minijature visoke kvalitete (sporije)"
msgid "Size in px:"
msgstr "Veličina u px:"
msgid "Library used to generate thumbnails"
msgstr "Biblioteka korištena za izradu minijatura"
msgid "Thumbnails creator:"
msgstr "Izrada minijatura:"
msgid "Settings"
msgstr "Postavke"
msgid "Drag and drop PDF files or directories containing PDF files"
msgstr "Povuci i ispusti PDF datoteku ili mapu s PDF datotekama"
msgid "No PDF found"
msgstr "Nije pronađen niti jedan PDF"
msgid "What's new"
msgstr "Što je novo"
msgid "Open with"
msgstr "Otvori sa"
msgid "Select the task to perform on the following files"
msgstr "Odaberi zadatak koji će se obavljati nad sljedećim datotekama"
msgid "Overwrite"
msgstr "Prepiši"
msgid "Cancel"
msgstr "Odustani"
msgid "Directory not empty"
msgstr "Mapa nije prazna"
msgid "The selected directory is not empty"
msgstr "Odabrana mapa nije prazna"
msgid "Overwrite files with the same name as the generated ones?"
msgstr "Prepisati datoteke s istim imenom kao izrađene datoteke?"
msgid "Don't overwrite existing file"
msgstr "Nemoj prepisati postojeću datoteku"
msgid "Overwrite confirmation"
msgstr "Potvrda za prepisivanje datoteke"
msgid "A file with the given name already exists"
msgstr "Datoteka sa zadanim imenom već postoji"
msgid "Do you want to overwrite it?"
msgstr "Želiš li je prepisati?"
msgid "Yes"
msgstr "Da"
msgid "No"
msgstr "Ne"
msgid "Non existing directory"
msgstr "Nepostojeća mapa"
msgid "The selected output directory does not exist"
msgstr "Odabrana mapa rezultata ne postoji"
msgid "Do you want to create it?"
msgstr "Želiš li je stvoriti?"
msgid "Subdirectories"
msgstr "Podmape"
msgid "Subdirectories have been found"
msgstr "Podmape su pronađene"
msgid "Do you want to add PDF files found in subdirectories?"
msgstr "Želiš li dodati PDF datoteke koje su pornađene u podmapama?"
msgid "Task failed"
msgstr "Zadatak nije uspio"
msgid "PDFsam can try to overcome the failure"
msgstr "PDFsam može pokušati ispraviti grešku"
msgid ""
"It may result in PDF files with partial or missing data, proceed anyway?"
msgstr ""
"To može rezultirati PDF datotekama s djelomičnim ili izgubljenim podacima, "
"svejedno nastaviti?"
msgid "Summary"
msgstr "Sažetak"
msgid "Keywords"
msgstr "Ključne riječi"
msgid "Document details"
msgstr "Detalji dokumenta"
msgid "Invalid parameters"
msgstr "Neispravni parametri"
msgid ""
"Input parameters are invalid, open the application messages for details."
msgstr "Parametri unosa su neispravni, otvori poruke programa za detalje."
msgid "Access denied"
msgstr "Pristup zabranjen"
#, java-format
msgid ""
"Unable to access \"{0}\", please make sure you have write permissions or "
"open the application messages for details."
msgstr ""
"Nije moguće pristupiti „{0}”, osiguraj prava pisanja ili otvori poruke "
"programa za detalje."
#, java-format
msgid "You performed {0} tasks with PDFsam, did it help?"
msgstr "Izvršio/izvršila si {0} zadatka s PDFsam-om. Je li ti bio od pomoći?"
msgid "Give something back"
msgstr "Daj nešto zauzvrat"
msgid "PDFsam worked hard!"
msgstr "PDFsam se je naradio!"
msgid "Spread the word!"
msgstr "Širi vijest!"
#, java-format
msgid "PDFsam {0} is available for download"
msgstr "PDFsam {0} je dostupan za preuzimanje"
msgid "Download"
msgstr "Preuzmi"
msgid "New version available"
msgstr "Dostupna je nova verzija"
msgid "You are running the latest version of PDFsam Basic"
msgstr "Koristiš najnoviju verziju programa PDFsam Basic"
msgid "No update"
msgstr "Nema nadopuna"
msgid "Open"
msgstr "Otvori"
msgid "E_xit"
msgstr "_Zatvori program"
msgid "_Settings"
msgstr "_Postavke"
msgid "Application messages"
msgstr "Poruke programa"
msgid "_Modules"
msgstr "_Moduli"
msgid "_Workspace"
msgstr "_Radno okruženje"
msgid "_Load"
msgstr "_Učitaj"
msgid "_Save"
msgstr "_Spremi"
msgid "Recen_ts"
msgstr "_Nedavne"
msgid "_Clear recents"
msgstr "_Ukloni nedavne"
msgid "Select the workspace file to save"
msgstr "Odaberi datoteku radnog okruženja koju želiš spremiti"
msgid "Select the workspace to load"
msgstr "Odaberi datoteku radnog okruženja koju želiš učitati"
msgid "Split by bookmarks"
msgstr "Rastavi prema oznakama"
msgid ""
"Split a PDF document at bookmarked pages by specifying a bookmark level."
msgstr ""
"Rastavi PDF dokument na označenim stranicama, određivanjem razine oznake."
msgid "Select or drag and drop the PDF you want to split"
msgstr "Odaberi ili ispusti PDF koji želiš rastaviti"
msgid "Split settings"
msgstr "Postavke rastavljanja"
msgid "Regular expression the bookmark has to match"
msgstr "Regularni izraz kojem oznaka mora odgovarati"
msgid "Split at this bookmark level:"
msgstr "Rastavi pri ovoj razini oznaka:"
msgid "Matching regular expression:"
msgstr "Odgovarajući regularni izraz:"
msgid "A regular expression the bookmark text has to match"
msgstr "Odgovarajući izraz kojem tekstualna oznaka treba odgovarati"
msgid ""
"Example: use .*Chapter.* to match bookmarks containing the word \"Chapter\""
msgstr ""
"Primjer: koristi .*Poglavlje.* za poklapanje oznaka koje sadrže riječ "
"„Poglavlje”"
msgid "Invalid bookmarks level"
msgstr "Neispravna razina oznaka"
msgid "Show errors"
msgstr "Prikaži pogreške"
msgid "Run"
msgstr "Pokreni"
msgid "Requested"
msgstr "Traženo"
msgid "Completed"
msgstr "Dovršeno"
msgid "Failed"
msgstr "Neuspjelo"
msgid "Running"
msgstr "U tijeku"
#, java-format
msgid "Running {0}%"
msgstr "U tijeku {0} %"
msgid "The selected PDF document is invalid"
msgstr "Odabrani PDF dokument nije ispravan"
msgid "Select the PDF file"
msgstr "Odaberi PDF datoteku"
msgid "_Select PDF"
msgstr "_Odaberi PDF"
msgid "_Clear"
msgstr "_Izbriši"
msgid "Clear all settings"
msgstr "Izbriši sve postavke"
#, java-format
msgid "Pages: {0}, PDF Version: {1}"
msgstr "Stranice: {0}, PDF verzija: {1}"
msgid "Loading..."
msgstr "Učitava se …"
msgid "Document properties"
msgstr "Svojstva dokumenta"
msgid "Remove"
msgstr "Ukloni"
msgid "Set destination"
msgstr "Postavi odredište"
msgid "Open Folder"
msgstr "Otvori mapu"
msgid "Enter the user password"
msgstr "Upiši korisničku lozinku"
msgid "Unlock"
msgstr "Otključaj"
msgid "Drag and drop PDF files here"
msgstr "Povuci i ispusti PDF dokumente ovdje"
msgid "Set as range for all"
msgstr "Postavi kao raspon za sve"
msgid "Move to Top"
msgstr "Premjesti na vrh"
msgid "Move Up"
msgstr "Premjesti prema gore"
msgid "Move Down"
msgstr "Premjesti prema dolje"
msgid "Move to Bottom"
msgstr "Premjesti na kraj"
msgid "Duplicate"
msgstr "Dupliciraj"
msgid "Size"
msgstr "Veličina"
msgid "Modified"
msgstr "Promijenjeno"
msgid "Name"
msgstr "Ime"
msgid "Add documents to the table"
msgstr "Dodaj dokumente u tablicu"
msgid "_Add"
msgstr "_Dodaj"
msgid "PDF list from _text/csv file"
msgstr "Popis PDF-ova iz _text/csv datoteke"
msgid "Select pdf documents to load"
msgstr "Odaberi pdf dokumente koje želiš učitati"
msgid "Select a text or CSV file to load"
msgstr "Odaberi tekstualnu ili CSV datoteku koju želiš učitati"
msgid "Removes selected documents"
msgstr "Uklanja odabrane dokumente"
msgid "_Remove"
msgstr "_Ukloni"
msgid "Removes every document"
msgstr "Uklanja sve dokumente"
msgid "C_lear all settings"
msgstr "_Izbriši sve postavke"
msgid "Moves up selected documents"
msgstr "Premješta odabrane dokumente prema gore"
msgid "Move _Up"
msgstr "Premjesti prema _gore"
msgid "Moves down selected documents"
msgstr "Premješta odabrane dokumente prema dolje"
msgid "Move _Down"
msgstr "Premjesti prema _dolje"
msgid "Pace"
msgstr "Koraci"
msgid ""
"Double click to set the number of pages after which the task will switch to "
"the next file"
msgstr ""
"Dvostrukim klikom odredi broj stranica nakon kojih će se zadatak prebaciti "
"na sljedeću datoteku"
msgid "Reverse"
msgstr "Obrni"
msgid "Pages"
msgstr "Broj stranica"
msgid "Double click to set selected pages (ex: 2 or 5-23 or 2,5-7,12-)"
msgstr ""
"Dvostrukim klikom odredi odabrane stranice (npr: 2 or 5-23 or 2,5-7,12-)"
msgid "Page ranges"
msgstr "Raspon stranica"
msgid "The selected PDF file is invalid"
msgstr "Odabrana PDF datoteka nije ispravna"
msgid "Select a file"
msgstr "Odaberi datoteku"
msgid "The selected file must exist. "
msgstr "Odabrana datoteka mora postojati. "
#, java-format
msgid "Allowed extensions are {0}"
msgstr "Dopušteni nastavci su {0}"
msgid "Compress output file/files"
msgstr "Komprimiraj datoteke rezultata"
msgid "Discard bookmarks"
msgstr "Odbaci oznake"
msgid ""
"Tick the box if you don't want to retain any bookmark from the original PDF "
"document"
msgstr ""
"Označi kvadratić, ako ne želiš zadržati oznake iz originalnog PDF dokumenta"
msgid "Output PDF version:"
msgstr "PDF verzija rezultata:"
msgid "Show advanced settings"
msgstr "Prikaži napredne postavke"
msgid "Hide advanced settings"
msgstr "Sakrij napredne postavke"
msgid "Overwrite if already exists"
msgstr "Prepiši, ako već postoji"
msgid ""
"Tick the box if you want to overwrite the output files if they already exist."
msgstr "Označi kvadratić, ako želiš prepisati postojeće datoteke rezultata."
#, java-format
msgid "Version {0}"
msgstr "Verzija {0}"
msgid "Same as the input document"
msgstr "Jednaka kao dokument unosa"
msgid "An existing output directory is required"
msgstr "Potrebna je jedna postojeća mapa rezultata"
msgid "The output directory is required"
msgstr "Potrebna je mapa rezultata"
msgid "Select an existing directory"
msgstr "Odaberi jednu postojeću mapu"
msgid "Select"
msgstr "Odaberi"
msgid "Browse"
msgstr "Pretraži"
#, java-format
msgid "PDF version required: {0}"
msgstr "Potrebna PDF verzija: {0}"
msgid "Prefix for the generated files names"
msgstr "Prefiks za izrađena imena datoteka"
msgid "Add prefix"
msgstr "Dodaj prefiks"
msgid "Generated PDF documents name prefix:"
msgstr "Prefiks imena izrađenih PDF datoteka:"
msgid "Prefix for the output files name."
msgstr "Prefiks za imena datoteka rezultata."
msgid "Some special keywords are replaced with runtime values."
msgstr "Neke ključne riječi su zamijenjene s vrijednostima izvođenja."
msgid "Right click to add these keywords."
msgstr "Desni klik za dodavanje ovih ključnih riječi."
msgid "Split after"
msgstr "Rastavi nakon"
msgid "No page selected"
msgstr "Nijedna stranica odabrana"
msgid "Split"
msgstr "Rastavi"
msgid "Split a PDF document at the given page numbers."
msgstr "Rastavi PDF dokument pri zadanim brojevima stranica."
msgid "Every page"
msgstr "Svake stranice"
msgid "Even pages"
msgstr "Parnih stranica"
msgid "Odd pages"
msgstr "Neparnih stranica"
msgid "Split the document after the given page numbers"
msgstr "Rastavi dokument nakon zadanih brojeva stranica"
msgid ""
"Splits the PDF every \"n\" pages creating documents of \"n\" pages each"
msgstr ""
"Rastavi PDF svakih „n” stranica, izrađujući pojedinačne dokumente s „n” "
"stranicama"
msgid "Split after the following page numbers"
msgstr "Rastavi nakon sljedećih brojeva stranica"
msgid "Page numbers to split at (n1,n2,n3..)"
msgstr "Brojevi stranica pri kojima se rastavlja (n1, n2, n3 …)"
msgid "Invalid page numbers"
msgstr "Neispravni brojevi stranica"
msgid "Only valid positive page numbers are allowed"
msgstr "Samo ispravni pozitivni brojevi stranica su dopušteni"
msgid "Split by every \"n\" pages"
msgstr "Rastavi svakih „n” stranica"
msgid "Number of pages"
msgstr "Broj stranica"
msgid "Invalid number of pages"
msgstr "Neispravan broj stranica"
msgid "Checking for updates"
msgstr "Provjeravanje nadopuna"
msgid "Unable to find the latest available version."
msgstr "Nije moguće pronaći posljednju dostupnu verziju."
#, java-format
msgid "Unable to find any valid PDF file in the list: {0}"
msgstr "Nije moguće pronaći ispravnu PDF datoteku u popisu: {0}"
#, java-format
msgid "Unable to load PDF list file from {0}"
msgstr "Nije moguće učitati PDF datoteku popisa iz {0}"
msgid "Loading pdf documents"
msgstr "Učitavanje pdf dokumenata"
msgid "Documents loaded"
msgstr "Dokumenti učitani"
msgid "Unable to retrieve premium features description"
msgstr "Nije moguće dohvatiti opise premium funkcija"
msgid "Fetching premium modules"
msgstr "Dohvaćanje premium modula"
msgid "Unable to retrieve premium modules"
msgstr "Nije moguće dohvatiti premium module"
msgid "Unable to retrieve latest news"
msgstr "Nije moguće dohvatiti najnovije novosti"
msgid "Fetching latest news"
msgstr "Dohvaćanje najnovijih novosti"
msgid "Unable to retrieve the latest news"
msgstr "Nije moguće dohvatiti najnovije novosti"
#, java-format
msgid "Saving workspace data to {0}"
msgstr "Spremanje podataka radnog okruženja u {0}"
msgid "Workspace saved"
msgstr "Radno okruženje spremljeno"
msgid "Requesting modules state"
msgstr "Traženje stanja modula"
msgid "Unable to save modules workspace"
msgstr "Nije moguće spremiti radno okruženje modula"
#, java-format
msgid "Unable to save workspace to {0}"
msgstr "Nije moguće spremiti radno okruženje u {0}"
#, java-format
msgid "Loading workspace from {0}"
msgstr "Učitavanje radnog okruženja iz {0}"
msgid "Workspace loaded"
msgstr "Radno okruženje učitano"
#, java-format
msgid "Unable to load workspace from {0}"
msgstr "Nije moguće učitati radno okruženje iz {0}"
msgid "Split by size"
msgstr "Rastavi prema veličini"
msgid "Split a PDF document in files of the given size (roughly)."
msgstr "Rastavi PDF dokument u datoteke zadane veličine (približno)."
msgid "Megabytes"
msgstr "Megabajta"
msgid "MB"
msgstr "MB"
msgid "Kilobytes"
msgstr "Kilobajta"
msgid "KB"
msgstr "KB"
msgid "Set the size to split at"
msgstr "Postavi veličinu pri kojoj treba rastavljati"
msgid "Size must be a number"
msgstr "Veličina treba biti broj"
msgid "Split at this size:"
msgstr "Rastavi pri ovoj veličini:"
msgid "Invalid split size"
msgstr "Neispravna veličina razdvajanja"
msgid "Alternate Mix"
msgstr "Sjedini naizmjence"
msgid ""
"Merge two or more PDF documents taking pages alternately in natural or "
"reverse order."
msgstr ""
"Sjedini dvije ili više PDF datoteke, naizmjence uzimajući stranice u "
"normalnom ili obrnutom redoslijedu."
msgid "Destination file"
msgstr "Odredišna datoteka"
msgid ""
"Double click to set pages you want to mix (ex: 2 or 5-23 or 2,5-7,12-)"
msgstr ""
"Dvostrukim klikom odredi stranice koje želiš miješati (npr. 2 ili 5-23 ili "
"2,5-7,12-)"
msgid "Select a positive integer number as pace"
msgstr "Odaberi pozitivan cijeli broj za korake"
msgid "All pages"
msgstr "Sve stranice"
msgid "90 degrees clockwise"
msgstr "90 stupnjeva na desno"
msgid "180 degrees clockwise"
msgstr "180 stupnjeva"
msgid "90 degrees counterclockwise"
msgstr "90 stupnjeva na lijevo"
msgid "Rotate "
msgstr "Okreni "
msgid ""
"Double click to set pages you want to rotate (ex: 2 or 5-23 or 2,5-7,12-)"
msgstr ""
"Dvostrukim klikom odredi stranice koje želiš okrenuti (npr. 2 ili 5-23 ili "
"2,5-7,12-)"
msgid "Rotate"
msgstr "Okreni"
msgid "Rotate the pages of multiple PDF documents."
msgstr "Okreni stranice PDF dokumenata."
msgid "Rotate settings"
msgstr "Postavke okretanja"
msgid ""
"Double click to set pages you want to merge (ex: 2 or 5-23 or 2,5-7,12-)"
msgstr ""
"Dvostrukim klikom odredi stranice koje želiš sjediniti (npr. 2 ili 5-23 ili "
"2,5-7,12-)"
msgid "Add a blank page if page number is odd"
msgstr "Dodaj praznu stranicu, ukoliko je broj stranica neparan"
msgid ""
"Adds a blank page after each merged document if the document has an odd "
"number of pages"
msgstr ""
"Dodaje praznu stranicu nakon svakog sjedinjenog dokumenta, ukoliko dokument "
"ima neparan broj stranica"
msgid "Add a footer"
msgstr "Dodaj podnožje"
msgid "Adds a page footer with the name of the file the page belonged to."
msgstr ""
"Dodaje podnožje stranice s imemom datoteke kojoj je stranica pripadala."
msgid "Normalize pages size"
msgstr "Normaliziraj veličinu stranice"
msgid "Resizes all pages to have the same width as the first page."
msgstr "Postavlja širinu svih stranica na širinu prve stranice."
msgid "Merge fields"
msgstr "Sjedini polja"
msgid "Merge renaming existing fields"
msgstr "Sjedini preimenujući postojeća polja"
msgid "Flatten"
msgstr "Pretvori u slike"
msgid "Discard forms"
msgstr "Odbaci obrasce"
msgid "Interactive forms (AcroForms):"
msgstr "Interaktivni obrasci (AcroForms):"
msgid "What to do in case one or more input documents contain Acro Forms"
msgstr ""
"Što uraditi, ukoliko jedan ili više dokumenata unosa sadrže Acro obrasce"
msgid "Retain bookmarks"
msgstr "Zadrži oznake"
msgid "Create one entry for each merged document"
msgstr "Izradi jedan unos za svaki sjedinjeni dokument"
msgid "Retain bookmarks as one entry for each merged document"
msgstr "Zadrži oznake kao jedan unos za svaki sjedinjeni dokument"
msgid "Bookmarks handling:"
msgstr "Rukovanje oznakama:"
msgid "What to do in case one or more input documents contain bookmarks"
msgstr "Što uraditi, ukoliko jedan ili više dokumenata unosa sadrže oznake"
msgid "Don't generate"
msgstr "Nemoj izraditi"
msgid "Generate from file names"
msgstr "Izradi iz imena datoteka"
msgid "Generate from documents titles"
msgstr "Izradi iz naslova dokumenta"
msgid "Table of contents:"
msgstr "Stranica sadržaja:"
msgid ""
"Set if a table of contents should be added to the generated PDF document"
msgstr "Postavi, ako izrađenom PDF dokumentu želiš dodati stranicu sadržaja"
msgid "Merge"
msgstr "Sjedini"
msgid "Merge together multiple PDF documents or subsections of them."
msgstr "Sjedini više PDF dokumenata ili njihovih dijelova."
msgid "Merge settings"
msgstr "Postavke sjedinjavanja"
#, java-format
msgid "File \"{0}\" does not exist or is invalid"
msgstr "Datoteka „{0}” ne postoji ili nije ispravna"
msgid "Valid user password provided."
msgstr "Zadana je ispravna korisnička lozinka."
msgid "This document is encrypted, click to provide a password."
msgstr "Ovaj je dokument šifriran, klikni za upis lozinke."
msgid "An error has occurred, click for more details."
msgstr "Došlo je do pogreške, pritisni za detalje."
msgid "Security"
msgstr "Sigurnost"
msgid "Other"
msgstr "Ostalo"
msgid "Roundish"
msgstr "Okruglasto"
msgid "Directories"
msgstr "Mape"
msgid "Error saving log file."
msgstr "Pogreška tijekom spremanja dateteke log zapisnika."
#, java-format
msgid "File {0} saved."
msgstr "Datoteka {0} spremljena."
#, java-format
msgid "Invalid range: {0}."
msgstr "Neispravan raspon: {0}."
#, java-format
msgid ""
"Ambiguous page range definition: {0}. Use following formats: [n] or [n1-n2] "
"or [-n] or [n-]"
msgstr ""
"Definicija raspona stranica nie jednoznačna: {0}. Koristi sljedeće formate: "
"[n] ili [n1-n2] ili [-n] ili [n-]"
#, java-format
msgid "Invalid number: {0}."
msgstr "Neispravan broj: {0}."
#~ msgid "Split even pages"
#~ msgstr "Podijeli parne stranice"
#~ msgid "Split odd pages"
#~ msgstr "Podijeli neparne stranice"
#~ msgid "Output document pdf version:"
#~ msgstr "Pdf inačica izlaznog dokumenta:"
#~ msgid "Theme:"
#~ msgstr "Tema:"
#~ msgid "Never"
#~ msgstr "Nikada"
#~ msgid "Unable to get latest available version"
#~ msgstr "Ne mogu čitati posljednju dostupnu inačicu"
#~ msgid "Destination folder"
#~ msgstr "Odredišna mapa"
#~ msgid ""
#~ "Show a dialog box asking the user for confirmation when the \"overwrite\" is "
#~ "selected"
#~ msgstr ""
#~ "Prikaži dijaloški okvir za potvrdu korisnika kada je \"pisati preko\" "
#~ "označeno"
#~ msgid "Ask for confirmation when overwrite checkbox is selected"
#~ msgstr "Traži potvrdu kada je označena kučica za pisanje preko"
#~ msgid "Would you like to overwrite it?"
#~ msgstr "Želite li pisati preko?"
| {
"pile_set_name": "Github"
} |
var rb = require('crypto').randomBytes;
module.exports = function() {
return rb(16);
};
| {
"pile_set_name": "Github"
} |
#!/usr/bin/env bash
# A pipemenu base script/template for creating category based pipemenus
# written by Nathaniel Maia, December 2017 for ArchLabs
# Add something like this to your menu.xml
# <menu execute="SCRIPTNAME" id="NAME" label="PNAME"/>
NAME="settings"
PNAME="Settings"
FAIL_MSG="No Apps Installed"
CMD=(al-panel-chooser arandr nitrogen oomox-gui pavucontrol
gnome-system-monitor wpg rofi-theme-selector xfce4-power-manager-settings
gnome-disk-utility xfce4-appearance-settings qt5ct xfce4-settings-manager lxappearance)
DESC=("Change Panel/Dock" "Screen Settings" Wallpaper Oomox "Audio Settings"
"System Monitor" wpgtk "Rofi Theme" "Power Manager"
"Disk Uitlity" "Gtk Theme" "Qt Theme" "Settings Manager" LXappearance)
if ! . "/data/data/com.termux/files/usr/lib/archlabs/common/al-include.cfg" 2>/dev/null; then
echo -e "[ERROR]: Failed to source /data/data/com.termux/files/usr/lib/archlabs/common/al-include.cfg" ; exit 1
fi
menu_Body() {
for ((i=0; i<${#CMD[@]}; i++)); do
if hash "${CMD[$i]}" &>/dev/null; then
menuItem "${DESC[$i]}" "${CMD[$i]}"
fi
done
}
menuStart "$NAME" "$PNAME"
for x in "${CMD[@]}"; do
if hash "$x" &>/dev/null; then
num=1 ; break
fi
done
if [[ $num -gt 0 ]]; then
menu_Body
else
menuSeparator "$FAIL_MSG"
fi
menuEnd
exit 0
| {
"pile_set_name": "Github"
} |
// -*- C++ -*-
// Copyright (C) 2005-2016 Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the terms
// of the GNU General Public License as published by the Free Software
// Foundation; either version 3, or (at your option) any later
// version.
// This library is distributed in the hope that it will be useful, but
// WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// General Public License for more details.
// Under Section 7 of GPL version 3, you are granted additional
// permissions described in the GCC Runtime Library Exception, version
// 3.1, as published by the Free Software Foundation.
// You should have received a copy of the GNU General Public License and
// a copy of the GCC Runtime Library Exception along with this program;
// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
// <http://www.gnu.org/licenses/>.
// Copyright (C) 2004 Ami Tavory and Vladimir Dreizin, IBM-HRL.
// Permission to use, copy, modify, sell, and distribute this software
// is hereby granted without fee, provided that the above copyright
// notice appears in all copies, and that both that copyright notice
// and this permission notice appear in supporting documentation. None
// of the above authors, nor IBM Haifa Research Laboratories, make any
// representation about the suitability of this software for any
// purpose. It is provided "as is" without express or implied
// warranty.
/**
* @file thin_heap_/constructors_destructor_fn_imps.hpp
* Contains an implementation for thin_heap_.
*/
PB_DS_CLASS_T_DEC
template<typename It>
void
PB_DS_CLASS_C_DEC::
copy_from_range(It first_it, It last_it)
{
while (first_it != last_it)
push(*(first_it++));
PB_DS_ASSERT_VALID((*this))
}
PB_DS_CLASS_T_DEC
PB_DS_CLASS_C_DEC::
thin_heap() : m_p_max(0)
{
initialize();
PB_DS_ASSERT_VALID((*this))
}
PB_DS_CLASS_T_DEC
PB_DS_CLASS_C_DEC::
thin_heap(const Cmp_Fn& r_cmp_fn)
: base_type(r_cmp_fn), m_p_max(0)
{
initialize();
PB_DS_ASSERT_VALID((*this))
}
PB_DS_CLASS_T_DEC
PB_DS_CLASS_C_DEC::
thin_heap(const PB_DS_CLASS_C_DEC& other)
: base_type(other)
{
initialize();
m_p_max = base_type::m_p_root;
for (node_pointer p_nd = base_type::m_p_root; p_nd != 0;
p_nd = p_nd->m_p_next_sibling)
if (Cmp_Fn::operator()(m_p_max->m_value, p_nd->m_value))
m_p_max = p_nd;
PB_DS_ASSERT_VALID((*this))
}
PB_DS_CLASS_T_DEC
void
PB_DS_CLASS_C_DEC::
swap(PB_DS_CLASS_C_DEC& other)
{
PB_DS_ASSERT_VALID((*this))
base_type::swap(other);
std::swap(m_p_max, other.m_p_max);
PB_DS_ASSERT_VALID((*this))
}
PB_DS_CLASS_T_DEC
PB_DS_CLASS_C_DEC::
~thin_heap()
{ }
PB_DS_CLASS_T_DEC
void
PB_DS_CLASS_C_DEC::
initialize()
{ std::fill(m_a_aux, m_a_aux + max_rank, static_cast<node_pointer>(0)); }
| {
"pile_set_name": "Github"
} |
Subject: re : entex transistion
thanks so much for the memo . i would like to reiterate my support on two key
issues :
1 ) . thu - best of luck on this new assignment . howard has worked hard and
done a great job ! please don ' t be shy on asking questions . entex is
critical to the texas business , and it is critical to our team that we are
timely and accurate .
2 ) . rita : thanks for setting up the account team . communication is
critical to our success , and i encourage you all to keep each other informed
at all times . the p & l impact to our business can be significant .
additionally , this is high profile , so we want to assure top quality .
thanks to all of you for all of your efforts . let me know if there is
anything i can do to help provide any additional support .
rita wynne
12 / 14 / 99 02 : 38 : 45 pm
to : janet h wallis / hou / ect @ ect , ami chokshi / corp / enron @ enron , howard b
camp / hou / ect @ ect , thu nguyen / hou / ect @ ect , kyle r lilly / hou / ect @ ect , stacey
neuweiler / hou / ect @ ect , george grant / hou / ect @ ect , julie meyers / hou / ect @ ect
cc : daren j farmer / hou / ect @ ect , kathryn cordes / hou / ect @ ect , rita
wynne / hou / ect , lisa csikos / hou / ect @ ect , brenda f herod / hou / ect @ ect , pamela
chambers / corp / enron @ enron
subject : entex transistion
the purpose of the email is to recap the kickoff meeting held on yesterday
with members from commercial and volume managment concernig the entex account :
effective january 2000 , thu nguyen ( x 37159 ) in the volume managment group ,
will take over the responsibility of allocating the entex contracts . howard
and thu began some training this month and will continue to transition the
account over the next few months . entex will be thu ' s primary account
especially during these first few months as she learns the allocations
process and the contracts .
howard will continue with his lead responsibilites within the group and be
available for questions or as a backup , if necessary ( thanks howard for all
your hard work on the account this year ! ) .
in the initial phases of this transistion , i would like to organize an entex
" account " team . the team ( members from front office to back office ) would
meet at some point in the month to discuss any issues relating to the
scheduling , allocations , settlements , contracts , deals , etc . this hopefully
will give each of you a chance to not only identify and resolve issues before
the finalization process , but to learn from each other relative to your
respective areas and allow the newcomers to get up to speed on the account as
well . i would encourage everyone to attend these meetings initially as i
believe this is a critical part to the success of the entex account .
i will have my assistant to coordinate the initial meeting for early 1 / 2000 .
if anyone has any questions or concerns , please feel free to call me or stop
by . thanks in advance for everyone ' s cooperation . . . . . . . . . . .
julie - please add thu to the confirmations distributions list | {
"pile_set_name": "Github"
} |
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1beta1
import (
v1 "k8s.io/api/core/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StorageClass) DeepCopyInto(out *StorageClass) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
if in.Parameters != nil {
in, out := &in.Parameters, &out.Parameters
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.ReclaimPolicy != nil {
in, out := &in.ReclaimPolicy, &out.ReclaimPolicy
if *in == nil {
*out = nil
} else {
*out = new(v1.PersistentVolumeReclaimPolicy)
**out = **in
}
}
if in.MountOptions != nil {
in, out := &in.MountOptions, &out.MountOptions
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.AllowVolumeExpansion != nil {
in, out := &in.AllowVolumeExpansion, &out.AllowVolumeExpansion
if *in == nil {
*out = nil
} else {
*out = new(bool)
**out = **in
}
}
if in.VolumeBindingMode != nil {
in, out := &in.VolumeBindingMode, &out.VolumeBindingMode
if *in == nil {
*out = nil
} else {
*out = new(VolumeBindingMode)
**out = **in
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageClass.
func (in *StorageClass) DeepCopy() *StorageClass {
if in == nil {
return nil
}
out := new(StorageClass)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *StorageClass) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StorageClassList) DeepCopyInto(out *StorageClassList) {
*out = *in
out.TypeMeta = in.TypeMeta
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]StorageClass, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageClassList.
func (in *StorageClassList) DeepCopy() *StorageClassList {
if in == nil {
return nil
}
out := new(StorageClassList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *StorageClassList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VolumeAttachment) DeepCopyInto(out *VolumeAttachment) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttachment.
func (in *VolumeAttachment) DeepCopy() *VolumeAttachment {
if in == nil {
return nil
}
out := new(VolumeAttachment)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *VolumeAttachment) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VolumeAttachmentList) DeepCopyInto(out *VolumeAttachmentList) {
*out = *in
out.TypeMeta = in.TypeMeta
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]VolumeAttachment, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttachmentList.
func (in *VolumeAttachmentList) DeepCopy() *VolumeAttachmentList {
if in == nil {
return nil
}
out := new(VolumeAttachmentList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *VolumeAttachmentList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VolumeAttachmentSource) DeepCopyInto(out *VolumeAttachmentSource) {
*out = *in
if in.PersistentVolumeName != nil {
in, out := &in.PersistentVolumeName, &out.PersistentVolumeName
if *in == nil {
*out = nil
} else {
*out = new(string)
**out = **in
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttachmentSource.
func (in *VolumeAttachmentSource) DeepCopy() *VolumeAttachmentSource {
if in == nil {
return nil
}
out := new(VolumeAttachmentSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VolumeAttachmentSpec) DeepCopyInto(out *VolumeAttachmentSpec) {
*out = *in
in.Source.DeepCopyInto(&out.Source)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttachmentSpec.
func (in *VolumeAttachmentSpec) DeepCopy() *VolumeAttachmentSpec {
if in == nil {
return nil
}
out := new(VolumeAttachmentSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VolumeAttachmentStatus) DeepCopyInto(out *VolumeAttachmentStatus) {
*out = *in
if in.AttachmentMetadata != nil {
in, out := &in.AttachmentMetadata, &out.AttachmentMetadata
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.AttachError != nil {
in, out := &in.AttachError, &out.AttachError
if *in == nil {
*out = nil
} else {
*out = new(VolumeError)
(*in).DeepCopyInto(*out)
}
}
if in.DetachError != nil {
in, out := &in.DetachError, &out.DetachError
if *in == nil {
*out = nil
} else {
*out = new(VolumeError)
(*in).DeepCopyInto(*out)
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttachmentStatus.
func (in *VolumeAttachmentStatus) DeepCopy() *VolumeAttachmentStatus {
if in == nil {
return nil
}
out := new(VolumeAttachmentStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VolumeError) DeepCopyInto(out *VolumeError) {
*out = *in
in.Time.DeepCopyInto(&out.Time)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeError.
func (in *VolumeError) DeepCopy() *VolumeError {
if in == nil {
return nil
}
out := new(VolumeError)
in.DeepCopyInto(out)
return out
}
| {
"pile_set_name": "Github"
} |
<div id="div_sub_{$rand_name}"></div>
<script language="javascript" type="application/javascript">
var id_{$rand_name} = document.getElementById('div_sub_{$rand_name}').parentNode.id.replace('_content', '');
function submit_{$rand_name}(){ldelim}
get_hax(
{ldelim}
url:document.forms['f{$rand_name}'].action,
method: document.forms['f{$rand_name}'].method,
form: 'f{$rand_name}',
id: id_{$rand_name} + '_content',
{rdelim}
);
{rdelim}
</script>
<div>
<form method="post" name="f{$rand_name}" id="f{$rand_name}" action="/cabs/sclear.html?window=1" enctype="application/x-www-form-urlencoded">
<select name="types[]" multiple="multiple" style="width:100%; height: 500px;">
{foreach from=$items item=item name=items}
<option value="{$item->md5_type}">{$item->type}</option>
{/foreach}
</select>
<input type="button" onclick="submit_{$rand_name}();" value="удалить" style="width:100%;" />
</form>
</div> | {
"pile_set_name": "Github"
} |
///////////////////////////////////////////////////////////////////////////////
// $Id: eth_parser.v 5240 2009-03-14 01:50:42Z grg $
//
// Module: eth_parser.v
// Project: NF2.1
// Description: decides if the MAC dstof the ingress pkt is us, and if it's an
// ARP or IP packet. Assume that NUM_QUEUES < MIN_PKT_SIZE-2
//
///////////////////////////////////////////////////////////////////////////////
module eth_parser
#(parameter DATA_WIDTH = 64,
parameter NUM_QUEUES = 8,
parameter NUM_QUEUES_WIDTH = log2(NUM_QUEUES)
)
(// --- Interface to the previous stage
input [DATA_WIDTH-1:0] in_data,
// --- Interface to process block
output is_arp_pkt,
output is_ip_pkt,
output is_for_us,
output is_broadcast,
output [NUM_QUEUES_WIDTH-1:0] mac_dst_port_num,
input eth_parser_rd_info,
output eth_parser_info_vld,
// --- Interface to preprocess block
input word_MAC_DA_HI,
input word_MAC_DASA,
input word_ETH_IP_VER,
// --- Interface to registers
input [47:0] mac_0, // address of rx queue 0
input [47:0] mac_1, // address of rx queue 1
input [47:0] mac_2, // address of rx queue 2
input [47:0] mac_3, // address of rx queue 3
// --- Misc
input reset,
input clk
);
function integer log2;
input integer number;
begin
log2=0;
while(2**log2<number) begin
log2=log2+1;
end
end
endfunction // log2
//------------------ Internal Parameter ---------------------------
parameter ETH_ARP = 16'h0806;
parameter ETH_IP = 16'h0800;
parameter IDLE = 0;
parameter DO_SEARCH = 1;
//---------------------- Wires/Regs -------------------------------
reg [47:0] dst_MAC;
reg [47:0] mac_sel;
reg [15:0] ethertype;
reg search_req;
reg state, state_next;
reg [log2(NUM_QUEUES/2):0] mac_count, mac_count_next;
reg wr_en;
reg port_found;
wire broadcast_bit;
//----------------------- Modules ---------------------------------
fallthrough_small_fifo #(.WIDTH(4+NUM_QUEUES_WIDTH), .MAX_DEPTH_BITS(2))
eth_fifo
(.din ({port_found, // is for us
(ethertype==ETH_ARP), // is ARP
(ethertype==ETH_IP), // is IP
(broadcast_bit), // is broadcast
{mac_count[log2(NUM_QUEUES/2)-1:0], 1'b0}}), // dst port num
.wr_en (wr_en), // Write enable
.rd_en (eth_parser_rd_info), // Read the next word
.dout ({is_for_us, is_arp_pkt, is_ip_pkt, is_broadcast, mac_dst_port_num}),
.full (),
.nearly_full (),
.prog_full (),
.empty (empty),
.reset (reset),
.clk (clk)
);
//------------------------ Logic ----------------------------------
assign eth_parser_info_vld = !empty;
assign broadcast_bit = dst_MAC[40];
always @(*) begin
mac_sel = mac_0;
case(mac_count)
0: mac_sel = mac_0;
1: mac_sel = mac_1;
2: mac_sel = mac_2;
3: mac_sel = mac_3;
4: mac_sel = ~48'h0;
endcase // case(mac_count)
end // always @ (*)
/******************************************************************
* Get the destination, source and ethertype of the pkt
*****************************************************************/
always @(posedge clk) begin
if(reset) begin
dst_MAC <= 0;
ethertype <= 0;
search_req <= 0;
end
else begin
if(word_MAC_DA_HI) begin
dst_MAC[47:16] <= in_data[DATA_WIDTH-1:DATA_WIDTH-32];
end
if(word_MAC_DASA) begin
dst_MAC[15:0] <= in_data[31:16];
end
if(word_ETH_IP_VER) begin
ethertype <= in_data[31:16];
search_req <= 1;
end
else begin
search_req <= 0;
end
end // else: !if(reset)
end // always @ (posedge clk)
/*************************************************************
* check to see if the destination port matches any of our port
* MAC addresses. We need to make sure that this search is
* completed before the end of the packet.
*************************************************************/
always @(*) begin
state_next = state;
mac_count_next = mac_count;
wr_en = 0;
port_found = 0;
case(state)
IDLE: begin
if(search_req) begin
state_next = DO_SEARCH;
mac_count_next = NUM_QUEUES/2;
end
end
DO_SEARCH: begin
mac_count_next = mac_count-1;
if(mac_sel==dst_MAC || broadcast_bit) begin
wr_en = 1;
state_next = IDLE;
port_found = 1;
end
else if(mac_count == 0) begin
state_next = IDLE;
wr_en = 1;
end
end
endcase // case(state)
end // always @(*)
always @(posedge clk) begin
if(reset) begin
state <= IDLE;
mac_count <= 0;
end
else begin
state <= state_next;
mac_count <= mac_count_next;
end
end
// synthesis translate_off
always @(posedge clk) begin
if(state==DO_SEARCH && word_MAC_DA_HI) begin
$display("%t %m ERROR: Latched new address before the last search was done!", $time);
$stop;
end
end
// synthesis translate_on
endmodule // eth_parser
| {
"pile_set_name": "Github"
} |
/* NSPAPI.H -- winsock 1.1
* not supported on win95
*
* Copyright (C) 2001 Stefan Leichter
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
*/
#ifndef _WINE_NSPAPI_
#define _WINE_NSPAPI_
#ifdef __cplusplus
extern "C" {
#endif /* defined(__cplusplus) */
/*
* constants
*/
#define XP_CONNECTIONLESS 0x00000001
#define XP_GUARANTEED_DELIVERY 0x00000002
#define XP_GUARANTEED_ORDER 0x00000004
#define XP_MESSAGE_ORIENTED 0x00000008
#define XP_PSEUDO_STREAM 0x00000010
#define XP_GRACEFUL_CLOSE 0x00000020
#define XP_EXPEDITED_DATA 0x00000040
#define XP_CONNECT_DATA 0x00000080
#define XP_DISCONNECT_DATA 0x00000100
#define XP_SUPPORTS_BROADCAST 0x00000200
#define XP_SUPPORTS_MULTICAST 0x00000400
#define XP_BANDWIDTH_ALLOCATION 0x00000800
#define XP_FRAGMENTATION 0x00001000
#define XP_ENCRYPTS 0x00002000
/*
* structures
*/
typedef struct _PROTOCOL_INFOA
{
DWORD dwServiceFlags;
INT iAddressFamily;
INT iMaxSockAddr;
INT iMinSockAddr;
INT iSocketType;
INT iProtocol;
DWORD dwMessageSize;
LPSTR lpProtocol;
} PROTOCOL_INFOA, *PPROTOCOL_INFOA, *LPPROTOCOL_INFOA;
typedef struct _PROTOCOL_INFOW
{
DWORD dwServiceFlags;
INT iAddressFamily;
INT iMaxSockAddr;
INT iMinSockAddr;
INT iSocketType;
INT iProtocol;
DWORD dwMessageSize;
LPWSTR lpProtocol;
} PROTOCOL_INFOW, *PPROTOCOL_INFOW, *LPPROTOCOL_INFOW;
DECL_WINELIB_TYPE_AW(PROTOCOL_INFO)
DECL_WINELIB_TYPE_AW(PPROTOCOL_INFO)
DECL_WINELIB_TYPE_AW(LPPROTOCOL_INFO)
typedef struct _SERVICE_ADDRESS
{
DWORD dwAddressType;
DWORD dwAddressFlags;
DWORD dwAddressLength;
DWORD dwPrincipalLength;
BYTE* lpAddress;
BYTE* lpPrincipal;
} SERVICE_ADDRESS, *PSERVICE_ADDRESS, *LPSERVICE_ADDRESS;
typedef struct _SERVICE_ADDRESSES
{
DWORD dwAddressCount;
SERVICE_ADDRESS Addresses[1];
} SERVICE_ADDRESSES, *PSERVICE_ADDRESSES, *LPSERVICE_ADDRESSES;
typedef struct _SERVICE_INFOA
{
LPGUID lpServiceType;
LPSTR lpServiceName;
LPSTR lpComment;
LPSTR lpLocale;
DWORD dwDisplayHint;
DWORD dwVersion;
DWORD dwTime;
LPSTR lpMachineName;
LPSERVICE_ADDRESSES lpServiceAddress;
BLOB ServiceSpecificInfo;
} SERVICE_INFOA, *PSERVICE_INFOA, *LPSERVICE_INFOA;
typedef struct _SERVICE_INFOW
{
LPGUID lpServiceType;
LPWSTR lpServiceName;
LPWSTR lpComment;
LPWSTR lpLocale;
DWORD dwDisplayHint;
DWORD dwVersion;
DWORD dwTime;
LPSTR lpMachineName;
LPSERVICE_ADDRESSES lpServiceAddress;
BLOB ServiceSpecificInfo; /* May point to SERVICE_TYPE_INFO_ABS */
} SERVICE_INFOW, *PSERVICE_INFOW, *LPSERVICE_INFOW;
DECL_WINELIB_TYPE_AW(SERVICE_INFO)
DECL_WINELIB_TYPE_AW(PSERVICE_INFO)
DECL_WINELIB_TYPE_AW(LPSERVICE_INFO)
typedef struct _SERVICE_TYPE_VALUE_ABSA
{
DWORD dwNameSpace; /* Name space or set of name spaces */
DWORD dwValueType; /* Type of the value data */
DWORD dwValueSize; /* Size of the value data */
LPSTR lpValueName; /* Name of the value */
PVOID lpValue; /* Pointer to the value data */
} SERVICE_TYPE_VALUE_ABSA, *PSERVICE_TYPE_VALUE_ABSA, *LPSERVICE_TYPE_VALUE_ABSA;
typedef struct _SERVICE_TYPE_VALUE_ABSW
{
DWORD dwNameSpace; /* Name space or set of name spaces */
DWORD dwValueType; /* Type of the value data */
DWORD dwValueSize; /* Size of the value data */
LPWSTR lpValueName; /* Name of the value */
PVOID lpValue; /* Pointer to the value data */
} SERVICE_TYPE_VALUE_ABSW, *PSERVICE_TYPE_VALUE_ABSW, *LPSERVICE_TYPE_VALUE_ABSW;
DECL_WINELIB_TYPE_AW(SERVICE_TYPE_VALUE_ABS)
DECL_WINELIB_TYPE_AW(PSERVICE_TYPE_VALUE_ABS)
DECL_WINELIB_TYPE_AW(LPSERVICE_TYPE_VALUE_ABS)
typedef struct _SERVICE_TYPE_INFO_ABSA
{
LPSTR lpTypeName; /* Name of the network service type */
DWORD dwValueCount; /* Number of SERVICE_TYPE_VALUE_ABS structures */
SERVICE_TYPE_VALUE_ABSA Values[1]; /* Array of SERVICE_TYPE_VALUE_ABS structures */
} SERVICE_TYPE_INFO_ABSA, *PSERVICE_TYPE_INFO_ABSA, *LPSERVICE_TYPE_INFO_ABSA;
typedef struct _SERVICE_TYPE_INFO_ABSW
{
LPWSTR lpTypeName; /* Name of the network service type */
DWORD dwValueCount; /* Number of SERVICE_TYPE_VALUE_ABS structures */
SERVICE_TYPE_VALUE_ABSW Values[1]; /* Array of SERVICE_TYPE_VALUE_ABS structures */
} SERVICE_TYPE_INFO_ABSW, *PSERVICE_TYPE_INFO_ABSW, *LPSERVICE_TYPE_INFO_ABSW;
DECL_WINELIB_TYPE_AW(SERVICE_TYPE_INFO_ABS)
DECL_WINELIB_TYPE_AW(PSERVICE_TYPE_INFO_ABS)
DECL_WINELIB_TYPE_AW(LPSERVICE_TYPE_INFO_ABS)
typedef void (*LPSERVICE_CALLBACK_PROC)(LPARAM lParam, HANDLE hAsyncTaskHandle);
typedef struct _SERVICE_ASYNC_INFO
{
LPSERVICE_CALLBACK_PROC lpServiceCallbackProc;
LPARAM lParam;
HANDLE hAsyncTaskHandle;
} SERVICE_ASYNC_INFO, *PSERVICE_ASYNC_INFO, *LPSERVICE_ASYNC_INFO;
/*
* function prototypes
*/
INT WINAPI GetAddressByNameA(DWORD dwNameSpace, LPGUID lpServiceType, LPSTR lpServiceName,
LPINT lpiProtocols, DWORD dwResolution, LPSERVICE_ASYNC_INFO lpServiceAsyncInfo,
LPVOID lpCsaddrBuffer, LPDWORD lpdwBufferLength, LPSTR lpAliasBuffer,
LPDWORD lpdwAliasBufferLength);
INT WINAPI GetAddressByNameW(DWORD dwNameSpace, LPGUID lpServiceType, LPWSTR lpServiceName,
LPINT lpiProtocols, DWORD dwResolution, LPSERVICE_ASYNC_INFO lpServiceAsyncInfo,
LPVOID lpCsaddrBuffer, LPDWORD lpdwBufferLength, LPWSTR lpAliasBuffer,
LPDWORD lpdwAliasBufferLength);
#define GetAddressByName WINELIB_NAME_AW(GetAddressByName)
INT WINAPI GetTypeByNameA(LPSTR lpServiceName, LPGUID lpServiceType);
INT WINAPI GetTypeByNameW(LPWSTR lpServiceName, LPGUID lpServiceType);
#define GetTypeByName WINELIB_NAME_AW(GetTypeByName)
INT WINAPI SetServiceA(DWORD dwNameSpace, DWORD dwOperation, DWORD dwFlags, LPSERVICE_INFOA lpServiceInfo,
LPSERVICE_ASYNC_INFO lpServiceAsyncInfo, LPDWORD lpdwStatusFlags);
INT WINAPI SetServiceW(DWORD dwNameSpace, DWORD dwOperation, DWORD dwFlags, LPSERVICE_INFOW lpServiceInfo,
LPSERVICE_ASYNC_INFO lpServiceAsyncInfo, LPDWORD lpdwStatusFlags);
#define SetService WINELIB_NAME_AW(SetService)
INT WINAPI GetServiceA(DWORD dwNameSpace, LPGUID lpGuid, LPSTR lpServiceName,
DWORD dwProperties, LPVOID lpBuffer, LPDWORD lpdwBufferSize,
LPSERVICE_ASYNC_INFO lpServiceAsyncInfo);
INT WINAPI GetServiceW(DWORD dwNameSpace, LPGUID lpGuid, LPSTR lpServiceName,
DWORD dwProperties, LPVOID lpBuffer, LPDWORD lpdwBufferSize,
LPSERVICE_ASYNC_INFO lpServiceAsyncInfo);
#define GetService WINELIB_NAME_AW(GetService)
#ifdef __cplusplus
} /* extern "C" */
#endif /* defined(__cplusplus) */
#endif /* _WINE_NSPAPI_ */
| {
"pile_set_name": "Github"
} |
/*
* Copyright 2014 Goldman Sachs.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.gs.collections.impl.lazy.parallel;
import java.util.Comparator;
import com.gs.collections.api.ParallelIterable;
import com.gs.collections.api.RichIterable;
import com.gs.collections.api.bag.MutableBag;
import com.gs.collections.api.bag.sorted.MutableSortedBag;
import com.gs.collections.api.block.function.Function;
import com.gs.collections.api.block.function.Function0;
import com.gs.collections.api.block.function.Function2;
import com.gs.collections.api.block.function.primitive.DoubleFunction;
import com.gs.collections.api.block.function.primitive.FloatFunction;
import com.gs.collections.api.block.function.primitive.IntFunction;
import com.gs.collections.api.block.function.primitive.LongFunction;
import com.gs.collections.api.block.predicate.Predicate;
import com.gs.collections.api.block.predicate.Predicate2;
import com.gs.collections.api.block.procedure.Procedure;
import com.gs.collections.api.block.procedure.Procedure2;
import com.gs.collections.api.list.MutableList;
import com.gs.collections.api.map.MapIterable;
import com.gs.collections.api.map.MutableMap;
import com.gs.collections.api.map.sorted.MutableSortedMap;
import com.gs.collections.api.set.MutableSet;
import com.gs.collections.api.set.sorted.MutableSortedSet;
public abstract class NonParallelIterable<T, RI extends RichIterable<T>> implements ParallelIterable<T>
{
protected final RI delegate;
protected NonParallelIterable(RI delegate)
{
this.delegate = delegate;
}
public void forEach(Procedure<? super T> procedure)
{
this.delegate.forEach(procedure);
}
public <P> void forEachWith(Procedure2<? super T, ? super P> procedure, P parameter)
{
this.delegate.forEachWith(procedure, parameter);
}
public T detect(Predicate<? super T> predicate)
{
return this.delegate.detect(predicate);
}
public <P> T detectWith(Predicate2<? super T, ? super P> predicate, P parameter)
{
return this.delegate.detectWith(predicate, parameter);
}
public T detectIfNone(Predicate<? super T> predicate, Function0<? extends T> function)
{
return this.delegate.detectIfNone(predicate, function);
}
public <P> T detectWithIfNone(Predicate2<? super T, ? super P> predicate, P parameter, Function0<? extends T> function)
{
return this.delegate.detectWithIfNone(predicate, parameter, function);
}
public int count(Predicate<? super T> predicate)
{
return this.delegate.count(predicate);
}
public <P> int countWith(Predicate2<? super T, ? super P> predicate, P parameter)
{
return this.delegate.countWith(predicate, parameter);
}
public boolean anySatisfy(Predicate<? super T> predicate)
{
return this.delegate.anySatisfy(predicate);
}
public <P> boolean anySatisfyWith(Predicate2<? super T, ? super P> predicate, P parameter)
{
return this.delegate.anySatisfyWith(predicate, parameter);
}
public boolean allSatisfy(Predicate<? super T> predicate)
{
return this.delegate.allSatisfy(predicate);
}
public <P> boolean allSatisfyWith(Predicate2<? super T, ? super P> predicate, P parameter)
{
return this.delegate.allSatisfyWith(predicate, parameter);
}
public boolean noneSatisfy(Predicate<? super T> predicate)
{
return this.delegate.noneSatisfy(predicate);
}
public <P> boolean noneSatisfyWith(Predicate2<? super T, ? super P> predicate, P parameter)
{
return this.delegate.noneSatisfyWith(predicate, parameter);
}
public MutableList<T> toList()
{
return this.delegate.toList();
}
public MutableList<T> toSortedList()
{
return this.delegate.toSortedList();
}
public MutableList<T> toSortedList(Comparator<? super T> comparator)
{
return this.delegate.toSortedList(comparator);
}
public <V extends Comparable<? super V>> MutableList<T> toSortedListBy(Function<? super T, ? extends V> function)
{
return this.delegate.toSortedListBy(function);
}
public MutableSet<T> toSet()
{
return this.delegate.toSet();
}
public MutableSortedSet<T> toSortedSet()
{
return this.delegate.toSortedSet();
}
public MutableSortedSet<T> toSortedSet(Comparator<? super T> comparator)
{
return this.delegate.toSortedSet(comparator);
}
public <V extends Comparable<? super V>> MutableSortedSet<T> toSortedSetBy(Function<? super T, ? extends V> function)
{
return this.delegate.toSortedSetBy(function);
}
public MutableBag<T> toBag()
{
return this.delegate.toBag();
}
public MutableSortedBag<T> toSortedBag()
{
return this.delegate.toSortedBag();
}
public MutableSortedBag<T> toSortedBag(Comparator<? super T> comparator)
{
return this.delegate.toSortedBag(comparator);
}
public <V extends Comparable<? super V>> MutableSortedBag<T> toSortedBagBy(Function<? super T, ? extends V> function)
{
return this.delegate.toSortedBagBy(function);
}
public <NK, NV> MutableMap<NK, NV> toMap(Function<? super T, ? extends NK> keyFunction, Function<? super T, ? extends NV> valueFunction)
{
return this.delegate.toMap(keyFunction, valueFunction);
}
public <NK, NV> MutableSortedMap<NK, NV> toSortedMap(Function<? super T, ? extends NK> keyFunction, Function<? super T, ? extends NV> valueFunction)
{
return this.delegate.toSortedMap(keyFunction, valueFunction);
}
public <NK, NV> MutableSortedMap<NK, NV> toSortedMap(Comparator<? super NK> comparator, Function<? super T, ? extends NK> keyFunction, Function<? super T, ? extends NV> valueFunction)
{
return this.delegate.toSortedMap(comparator, keyFunction, valueFunction);
}
public Object[] toArray()
{
return this.delegate.toArray();
}
public <T1> T1[] toArray(T1[] target)
{
return this.delegate.toArray(target);
}
public T min(Comparator<? super T> comparator)
{
return this.delegate.min(comparator);
}
public T max(Comparator<? super T> comparator)
{
return this.delegate.max(comparator);
}
public T min()
{
return this.delegate.min();
}
public T max()
{
return this.delegate.max();
}
public <V extends Comparable<? super V>> T minBy(Function<? super T, ? extends V> function)
{
return this.delegate.minBy(function);
}
public <V extends Comparable<? super V>> T maxBy(Function<? super T, ? extends V> function)
{
return this.delegate.maxBy(function);
}
public long sumOfInt(IntFunction<? super T> function)
{
return this.delegate.sumOfInt(function);
}
public double sumOfFloat(FloatFunction<? super T> function)
{
return this.delegate.sumOfFloat(function);
}
public long sumOfLong(LongFunction<? super T> function)
{
return this.delegate.sumOfLong(function);
}
public double sumOfDouble(DoubleFunction<? super T> function)
{
return this.delegate.sumOfDouble(function);
}
@Override
public String toString()
{
return this.delegate.toString();
}
public String makeString()
{
return this.delegate.makeString();
}
public String makeString(String separator)
{
return this.delegate.makeString(separator);
}
public String makeString(String start, String separator, String end)
{
return this.delegate.makeString(start, separator, end);
}
public void appendString(Appendable appendable)
{
this.delegate.appendString(appendable);
}
public void appendString(Appendable appendable, String separator)
{
this.delegate.appendString(appendable, separator);
}
public void appendString(Appendable appendable, String start, String separator, String end)
{
this.delegate.appendString(appendable, start, separator, end);
}
public <V> MapIterable<V, T> groupByUniqueKey(Function<? super T, ? extends V> function)
{
return this.delegate.groupByUniqueKey(function);
}
public <K, V> MapIterable<K, V> aggregateInPlaceBy(Function<? super T, ? extends K> groupBy, Function0<? extends V> zeroValueFactory, Procedure2<? super V, ? super T> mutatingAggregator)
{
return this.delegate.aggregateInPlaceBy(groupBy, zeroValueFactory, mutatingAggregator);
}
public <K, V> MapIterable<K, V> aggregateBy(Function<? super T, ? extends K> groupBy, Function0<? extends V> zeroValueFactory, Function2<? super V, ? super T, ? extends V> nonMutatingAggregator)
{
return this.delegate.aggregateBy(groupBy, zeroValueFactory, nonMutatingAggregator);
}
}
| {
"pile_set_name": "Github"
} |
// cgo -godefs types_darwin.go | go run mkpost.go
// Code generated by the command above; see README.md. DO NOT EDIT.
// +build amd64,darwin
package unix
const (
sizeofPtr = 0x8
sizeofShort = 0x2
sizeofInt = 0x4
sizeofLong = 0x8
sizeofLongLong = 0x8
)
type (
_C_short int16
_C_int int32
_C_long int64
_C_long_long int64
)
type Timespec struct {
Sec int64
Nsec int64
}
type Timeval struct {
Sec int64
Usec int32
Pad_cgo_0 [4]byte
}
type Timeval32 struct {
Sec int32
Usec int32
}
type Rusage struct {
Utime Timeval
Stime Timeval
Maxrss int64
Ixrss int64
Idrss int64
Isrss int64
Minflt int64
Majflt int64
Nswap int64
Inblock int64
Oublock int64
Msgsnd int64
Msgrcv int64
Nsignals int64
Nvcsw int64
Nivcsw int64
}
type Rlimit struct {
Cur uint64
Max uint64
}
type _Gid_t uint32
type Stat_t struct {
Dev int32
Mode uint16
Nlink uint16
Ino uint64
Uid uint32
Gid uint32
Rdev int32
Pad_cgo_0 [4]byte
Atimespec Timespec
Mtimespec Timespec
Ctimespec Timespec
Birthtimespec Timespec
Size int64
Blocks int64
Blksize int32
Flags uint32
Gen uint32
Lspare int32
Qspare [2]int64
}
type Statfs_t struct {
Bsize uint32
Iosize int32
Blocks uint64
Bfree uint64
Bavail uint64
Files uint64
Ffree uint64
Fsid Fsid
Owner uint32
Type uint32
Flags uint32
Fssubtype uint32
Fstypename [16]int8
Mntonname [1024]int8
Mntfromname [1024]int8
Reserved [8]uint32
}
type Flock_t struct {
Start int64
Len int64
Pid int32
Type int16
Whence int16
}
type Fstore_t struct {
Flags uint32
Posmode int32
Offset int64
Length int64
Bytesalloc int64
}
type Radvisory_t struct {
Offset int64
Count int32
Pad_cgo_0 [4]byte
}
type Fbootstraptransfer_t struct {
Offset int64
Length uint64
Buffer *byte
}
type Log2phys_t struct {
Flags uint32
Pad_cgo_0 [8]byte
Pad_cgo_1 [8]byte
}
type Fsid struct {
Val [2]int32
}
type Dirent struct {
Ino uint64
Seekoff uint64
Reclen uint16
Namlen uint16
Type uint8
Name [1024]int8
Pad_cgo_0 [3]byte
}
type RawSockaddrInet4 struct {
Len uint8
Family uint8
Port uint16
Addr [4]byte /* in_addr */
Zero [8]int8
}
type RawSockaddrInet6 struct {
Len uint8
Family uint8
Port uint16
Flowinfo uint32
Addr [16]byte /* in6_addr */
Scope_id uint32
}
type RawSockaddrUnix struct {
Len uint8
Family uint8
Path [104]int8
}
type RawSockaddrDatalink struct {
Len uint8
Family uint8
Index uint16
Type uint8
Nlen uint8
Alen uint8
Slen uint8
Data [12]int8
}
type RawSockaddr struct {
Len uint8
Family uint8
Data [14]int8
}
type RawSockaddrAny struct {
Addr RawSockaddr
Pad [92]int8
}
type _Socklen uint32
type Linger struct {
Onoff int32
Linger int32
}
type Iovec struct {
Base *byte
Len uint64
}
type IPMreq struct {
Multiaddr [4]byte /* in_addr */
Interface [4]byte /* in_addr */
}
type IPv6Mreq struct {
Multiaddr [16]byte /* in6_addr */
Interface uint32
}
type Msghdr struct {
Name *byte
Namelen uint32
Pad_cgo_0 [4]byte
Iov *Iovec
Iovlen int32
Pad_cgo_1 [4]byte
Control *byte
Controllen uint32
Flags int32
}
type Cmsghdr struct {
Len uint32
Level int32
Type int32
}
type Inet4Pktinfo struct {
Ifindex uint32
Spec_dst [4]byte /* in_addr */
Addr [4]byte /* in_addr */
}
type Inet6Pktinfo struct {
Addr [16]byte /* in6_addr */
Ifindex uint32
}
type IPv6MTUInfo struct {
Addr RawSockaddrInet6
Mtu uint32
}
type ICMPv6Filter struct {
Filt [8]uint32
}
const (
SizeofSockaddrInet4 = 0x10
SizeofSockaddrInet6 = 0x1c
SizeofSockaddrAny = 0x6c
SizeofSockaddrUnix = 0x6a
SizeofSockaddrDatalink = 0x14
SizeofLinger = 0x8
SizeofIPMreq = 0x8
SizeofIPv6Mreq = 0x14
SizeofMsghdr = 0x30
SizeofCmsghdr = 0xc
SizeofInet4Pktinfo = 0xc
SizeofInet6Pktinfo = 0x14
SizeofIPv6MTUInfo = 0x20
SizeofICMPv6Filter = 0x20
)
const (
PTRACE_TRACEME = 0x0
PTRACE_CONT = 0x7
PTRACE_KILL = 0x8
)
type Kevent_t struct {
Ident uint64
Filter int16
Flags uint16
Fflags uint32
Data int64
Udata *byte
}
type FdSet struct {
Bits [32]int32
}
const (
SizeofIfMsghdr = 0x70
SizeofIfData = 0x60
SizeofIfaMsghdr = 0x14
SizeofIfmaMsghdr = 0x10
SizeofIfmaMsghdr2 = 0x14
SizeofRtMsghdr = 0x5c
SizeofRtMetrics = 0x38
)
type IfMsghdr struct {
Msglen uint16
Version uint8
Type uint8
Addrs int32
Flags int32
Index uint16
Pad_cgo_0 [2]byte
Data IfData
}
type IfData struct {
Type uint8
Typelen uint8
Physical uint8
Addrlen uint8
Hdrlen uint8
Recvquota uint8
Xmitquota uint8
Unused1 uint8
Mtu uint32
Metric uint32
Baudrate uint32
Ipackets uint32
Ierrors uint32
Opackets uint32
Oerrors uint32
Collisions uint32
Ibytes uint32
Obytes uint32
Imcasts uint32
Omcasts uint32
Iqdrops uint32
Noproto uint32
Recvtiming uint32
Xmittiming uint32
Lastchange Timeval32
Unused2 uint32
Hwassist uint32
Reserved1 uint32
Reserved2 uint32
}
type IfaMsghdr struct {
Msglen uint16
Version uint8
Type uint8
Addrs int32
Flags int32
Index uint16
Pad_cgo_0 [2]byte
Metric int32
}
type IfmaMsghdr struct {
Msglen uint16
Version uint8
Type uint8
Addrs int32
Flags int32
Index uint16
Pad_cgo_0 [2]byte
}
type IfmaMsghdr2 struct {
Msglen uint16
Version uint8
Type uint8
Addrs int32
Flags int32
Index uint16
Pad_cgo_0 [2]byte
Refcount int32
}
type RtMsghdr struct {
Msglen uint16
Version uint8
Type uint8
Index uint16
Pad_cgo_0 [2]byte
Flags int32
Addrs int32
Pid int32
Seq int32
Errno int32
Use int32
Inits uint32
Rmx RtMetrics
}
type RtMetrics struct {
Locks uint32
Mtu uint32
Hopcount uint32
Expire int32
Recvpipe uint32
Sendpipe uint32
Ssthresh uint32
Rtt uint32
Rttvar uint32
Pksent uint32
Filler [4]uint32
}
const (
SizeofBpfVersion = 0x4
SizeofBpfStat = 0x8
SizeofBpfProgram = 0x10
SizeofBpfInsn = 0x8
SizeofBpfHdr = 0x14
)
type BpfVersion struct {
Major uint16
Minor uint16
}
type BpfStat struct {
Recv uint32
Drop uint32
}
type BpfProgram struct {
Len uint32
Pad_cgo_0 [4]byte
Insns *BpfInsn
}
type BpfInsn struct {
Code uint16
Jt uint8
Jf uint8
K uint32
}
type BpfHdr struct {
Tstamp Timeval32
Caplen uint32
Datalen uint32
Hdrlen uint16
Pad_cgo_0 [2]byte
}
type Termios struct {
Iflag uint64
Oflag uint64
Cflag uint64
Lflag uint64
Cc [20]uint8
Pad_cgo_0 [4]byte
Ispeed uint64
Ospeed uint64
}
type Winsize struct {
Row uint16
Col uint16
Xpixel uint16
Ypixel uint16
}
const (
AT_FDCWD = -0x2
AT_REMOVEDIR = 0x80
AT_SYMLINK_FOLLOW = 0x40
AT_SYMLINK_NOFOLLOW = 0x20
)
type PollFd struct {
Fd int32
Events int16
Revents int16
}
const (
POLLERR = 0x8
POLLHUP = 0x10
POLLIN = 0x1
POLLNVAL = 0x20
POLLOUT = 0x4
POLLPRI = 0x2
POLLRDBAND = 0x80
POLLRDNORM = 0x40
POLLWRBAND = 0x100
POLLWRNORM = 0x4
)
| {
"pile_set_name": "Github"
} |
/*
Simple DirectMedia Layer
Copyright (C) 1997-2019 Sam Lantinga <[email protected]>
This software is provided 'as-is', without any express or implied
warranty. In no event will the authors be held liable for any damages
arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it
freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not
claim that you wrote the original software. If you use this software
in a product, an acknowledgment in the product documentation would be
appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be
misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
*/
#include "../../SDL_internal.h"
#ifndef SDL_x11clipboard_h_
#define SDL_x11clipboard_h_
extern int X11_SetClipboardText(_THIS, const char *text);
extern char *X11_GetClipboardText(_THIS);
extern SDL_bool X11_HasClipboardText(_THIS);
extern Atom X11_GetSDLCutBufferClipboardType(Display *display);
#endif /* SDL_x11clipboard_h_ */
/* vi: set ts=4 sw=4 expandtab: */
| {
"pile_set_name": "Github"
} |
{
"cells": [
{
"cell_type": "markdown",
"metadata": {
"colab_type": "text",
"id": "bOChJSNXtC9g"
},
"source": [
"# PyTorch"
]
},
{
"cell_type": "markdown",
"metadata": {
"colab_type": "text",
"id": "OLIxEDq6VhvZ"
},
"source": [
"In this lesson we'll learn about PyTorch which is a machine learning library used to build dynamic neural networks. We'll learn about the basics, like creating and using Tensors, in this lesson but we'll be making models with it in the next lesson.\n",
"\n",
""
]
},
{
"cell_type": "markdown",
"metadata": {
"colab_type": "text",
"id": "VoMq0eFRvugb"
},
"source": [
"# Tensor basics"
]
},
{
"cell_type": "code",
"execution_count": 0,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
"height": 34
},
"colab_type": "code",
"id": "0-dXQiLlTIgz",
"outputId": "d4ed17af-40a8-41db-ba6e-825ff9db2187"
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Requirement already satisfied: torch in /usr/local/lib/python3.6/dist-packages (1.0.0)\n"
]
}
],
"source": [
"# Load PyTorch library\n",
"!pip3 install torch"
]
},
{
"cell_type": "code",
"execution_count": 0,
"metadata": {
"colab": {},
"colab_type": "code",
"id": "rX7Vs1JxL9wX"
},
"outputs": [],
"source": [
"import numpy as np\n",
"import torch"
]
},
{
"cell_type": "code",
"execution_count": 0,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
"height": 119
},
"colab_type": "code",
"id": "Nv0xryLkKujV",
"outputId": "d46d5e58-2195-40a8-841c-26b627541a83"
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Type: torch.FloatTensor\n",
"Size: torch.Size([3, 4])\n",
"Values: \n",
"tensor([[1.1744e-35, 0.0000e+00, 2.8026e-44, 0.0000e+00],\n",
" [ nan, 0.0000e+00, 1.3733e-14, 4.7429e+30],\n",
" [1.9431e-19, 4.7429e+30, 5.0938e-14, 0.0000e+00]])\n"
]
}
],
"source": [
"# Creating a zero tensor\n",
"x = torch.Tensor(3, 4)\n",
"print(\"Type: {}\".format(x.type()))\n",
"print(\"Size: {}\".format(x.shape))\n",
"print(\"Values: \\n{}\".format(x))"
]
},
{
"cell_type": "code",
"execution_count": 0,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
"height": 51
},
"colab_type": "code",
"id": "vnyzY4PHL7c5",
"outputId": "70ed373d-e7e0-43cd-e732-51be86377721"
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"tensor([[ 0.7434, -1.0611, -0.3752],\n",
" [ 0.2613, -1.7051, 0.9118]])\n"
]
}
],
"source": [
"# Creating a random tensor\n",
"x = torch.randn(2, 3) # normal distribution (rand(2,3) -> uniform distribution)\n",
"print (x)"
]
},
{
"cell_type": "code",
"execution_count": 0,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
"height": 85
},
"colab_type": "code",
"id": "DVwGNeKxMXI8",
"outputId": "6a185aa3-96f2-4e29-b116-3de3025cff4d"
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"tensor([[0., 0., 0.],\n",
" [0., 0., 0.]])\n",
"tensor([[1., 1., 1.],\n",
" [1., 1., 1.]])\n"
]
}
],
"source": [
"# Zero and Ones tensor\n",
"x = torch.zeros(2, 3)\n",
"print (x)\n",
"x = torch.ones(2, 3)\n",
"print (x)"
]
},
{
"cell_type": "code",
"execution_count": 0,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
"height": 85
},
"colab_type": "code",
"id": "BPjHnDmCMXLm",
"outputId": "c14c494e-b714-4983-eb90-665064830a14"
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Size: torch.Size([2, 3])\n",
"Values: \n",
"tensor([[1., 2., 3.],\n",
" [4., 5., 6.]])\n"
]
}
],
"source": [
"# List → Tensor\n",
"x = torch.Tensor([[1, 2, 3],[4, 5, 6]])\n",
"print(\"Size: {}\".format(x.shape)) \n",
"print(\"Values: \\n{}\".format(x))"
]
},
{
"cell_type": "code",
"execution_count": 0,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
"height": 85
},
"colab_type": "code",
"id": "mG4-CHkgMXOE",
"outputId": "2b9ed2e5-9862-480e-d0ce-d231676d7f49"
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Size: torch.Size([2, 3])\n",
"Values: \n",
"tensor([[0.0372, 0.6757, 0.9554],\n",
" [0.5651, 0.2336, 0.8303]], dtype=torch.float64)\n"
]
}
],
"source": [
"# NumPy array → Tensor\n",
"x = torch.from_numpy(np.random.rand(2, 3))\n",
"print(\"Size: {}\".format(x.shape)) \n",
"print(\"Values: \\n{}\".format(x))"
]
},
{
"cell_type": "code",
"execution_count": 0,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
"height": 51
},
"colab_type": "code",
"id": "L8X2-5cqMXRA",
"outputId": "af1c82ab-b8d7-4ea6-e142-7f8ed50fda40"
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Type: torch.FloatTensor\n",
"Type: torch.LongTensor\n"
]
}
],
"source": [
"# Changing tensor type\n",
"x = torch.Tensor(3, 4)\n",
"print(\"Type: {}\".format(x.type()))\n",
"x = x.long()\n",
"print(\"Type: {}\".format(x.type()))"
]
},
{
"cell_type": "markdown",
"metadata": {
"colab_type": "text",
"id": "S2BRPaMvPbe3"
},
"source": [
"# Tensor operations"
]
},
{
"cell_type": "code",
"execution_count": 0,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
"height": 85
},
"colab_type": "code",
"id": "Xrn8I76TMXT1",
"outputId": "556b9d7f-79da-415c-f85d-648c5394e3a3"
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Size: torch.Size([2, 3])\n",
"Values: \n",
"tensor([[ 0.5650, -0.0173, 1.1263],\n",
" [ 3.4274, 1.3610, -0.9262]])\n"
]
}
],
"source": [
"# Addition\n",
"x = torch.randn(2, 3)\n",
"y = torch.randn(2, 3)\n",
"z = x + y\n",
"print(\"Size: {}\".format(z.shape)) \n",
"print(\"Values: \\n{}\".format(z))"
]
},
{
"cell_type": "code",
"execution_count": 0,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
"height": 85
},
"colab_type": "code",
"id": "157fC9WsMXWf",
"outputId": "a6890b43-4c74-42c6-d654-f62b8c130403"
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Size: torch.Size([2, 2])\n",
"Values: \n",
"tensor([[ 1.3294, -2.4559],\n",
" [-0.4337, 4.9667]])\n"
]
}
],
"source": [
"# Dot product\n",
"x = torch.randn(2, 3)\n",
"y = torch.randn(3, 2)\n",
"z = torch.mm(x, y)\n",
"print(\"Size: {}\".format(z.shape)) \n",
"print(\"Values: \\n{}\".format(z))"
]
},
{
"cell_type": "code",
"execution_count": 0,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
"height": 170
},
"colab_type": "code",
"id": "G6316lAmMXZG",
"outputId": "3dce79e7-1b9f-4218-84cd-afbb16af7dd4"
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Size: torch.Size([2, 3])\n",
"Values: \n",
"tensor([[ 0.0257, -0.5716, -0.9207],\n",
" [-1.0590, 0.2942, -0.7114]])\n",
"Size: torch.Size([3, 2])\n",
"Values: \n",
"tensor([[ 0.0257, -1.0590],\n",
" [-0.5716, 0.2942],\n",
" [-0.9207, -0.7114]])\n"
]
}
],
"source": [
"# Transpose\n",
"x = torch.randn(2, 3)\n",
"print(\"Size: {}\".format(x.shape)) \n",
"print(\"Values: \\n{}\".format(x))\n",
"y = torch.t(x)\n",
"print(\"Size: {}\".format(y.shape)) \n",
"print(\"Values: \\n{}\".format(y))"
]
},
{
"cell_type": "code",
"execution_count": 0,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
"height": 102
},
"colab_type": "code",
"id": "FCgDCOCjMXcF",
"outputId": "ff1e16f5-bcd9-407f-9c99-361a0b7f27f6"
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Size: torch.Size([3, 2])\n",
"Values: \n",
"tensor([[ 0.0257, -0.5716],\n",
" [-0.9207, -1.0590],\n",
" [ 0.2942, -0.7114]])\n"
]
}
],
"source": [
"# Reshape\n",
"z = x.view(3, 2)\n",
"print(\"Size: {}\".format(z.shape)) \n",
"print(\"Values: \\n{}\".format(z))"
]
},
{
"cell_type": "code",
"execution_count": 0,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
"height": 561
},
"colab_type": "code",
"id": "T3-6nGgvECH9",
"outputId": "9599adaf-1feb-4a42-d4b5-af23f1de5b2d"
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Size: torch.Size([2, 3, 4])\n",
"Values: \n",
"tensor([[[ 1, 1, 1, 1],\n",
" [ 2, 2, 2, 2],\n",
" [ 3, 3, 3, 3]],\n",
"\n",
" [[10, 10, 10, 10],\n",
" [20, 20, 20, 20],\n",
" [30, 30, 30, 30]]])\n",
"\n",
"Size: torch.Size([3, 8])\n",
"Values: \n",
"tensor([[ 1, 1, 1, 1, 2, 2, 2, 2],\n",
" [ 3, 3, 3, 3, 10, 10, 10, 10],\n",
" [20, 20, 20, 20, 30, 30, 30, 30]])\n",
"\n",
"Size: torch.Size([3, 2, 4])\n",
"Values: \n",
"tensor([[[ 1, 1, 1, 1],\n",
" [10, 10, 10, 10]],\n",
"\n",
" [[ 2, 2, 2, 2],\n",
" [20, 20, 20, 20]],\n",
"\n",
" [[ 3, 3, 3, 3],\n",
" [30, 30, 30, 30]]])\n",
"\n",
"Size: torch.Size([3, 8])\n",
"Values: \n",
"tensor([[ 1, 1, 1, 1, 10, 10, 10, 10],\n",
" [ 2, 2, 2, 2, 20, 20, 20, 20],\n",
" [ 3, 3, 3, 3, 30, 30, 30, 30]])\n"
]
}
],
"source": [
"# Dangers of reshaping (unintended consequences)\n",
"x = torch.tensor([\n",
" [[1,1,1,1], [2,2,2,2], [3,3,3,3]],\n",
" [[10,10,10,10], [20,20,20,20], [30,30,30,30]]\n",
"])\n",
"print(\"Size: {}\".format(x.shape)) \n",
"print(\"Values: \\n{}\\n\".format(x))\n",
"a = x.view(x.size(1), -1)\n",
"print(\"Size: {}\".format(a.shape)) \n",
"print(\"Values: \\n{}\\n\".format(a))\n",
"b = x.transpose(0,1).contiguous()\n",
"print(\"Size: {}\".format(b.shape)) \n",
"print(\"Values: \\n{}\\n\".format(b))\n",
"c = b.view(b.size(0), -1)\n",
"print(\"Size: {}\".format(c.shape)) \n",
"print(\"Values: \\n{}\".format(c))"
]
},
{
"cell_type": "code",
"execution_count": 0,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
"height": 136
},
"colab_type": "code",
"id": "hRtG5LShMXew",
"outputId": "b54e520a-8cd5-40a9-8b38-64919574dce0"
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Values: \n",
"tensor([[ 0.4295, 0.2223, 0.1772],\n",
" [ 2.1602, -0.8891, -0.5011]])\n",
"Values: \n",
"tensor([ 2.5897, -0.6667, -0.3239])\n",
"Values: \n",
"tensor([0.8290, 0.7700])\n"
]
}
],
"source": [
"# Dimensional operations\n",
"x = torch.randn(2, 3)\n",
"print(\"Values: \\n{}\".format(x))\n",
"y = torch.sum(x, dim=0) # add each row's value for every column\n",
"print(\"Values: \\n{}\".format(y))\n",
"z = torch.sum(x, dim=1) # add each columns's value for every row\n",
"print(\"Values: \\n{}\".format(z))"
]
},
{
"cell_type": "markdown",
"metadata": {
"colab_type": "text",
"id": "zI0ZV45PrYmw"
},
"source": [
"# Indexing, Splicing and Joining"
]
},
{
"cell_type": "code",
"execution_count": 0,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
"height": 153
},
"colab_type": "code",
"id": "iM3UFrs0MXhL",
"outputId": "bfcbbf13-d8a1-4fc1-f244-fd54068ca74b"
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"x: \n",
"tensor([[-1.0305, 0.0368, 1.2809, 1.2346],\n",
" [-0.8837, 1.3678, -0.0971, 1.2528],\n",
" [ 0.3382, -1.4948, -0.7058, 1.3378]])\n",
"x[:1]: \n",
"tensor([[-1.0305, 0.0368, 1.2809, 1.2346]])\n",
"x[:1, 1:3]: \n",
"tensor([[0.0368, 1.2809]])\n"
]
}
],
"source": [
"x = torch.randn(3, 4)\n",
"print(\"x: \\n{}\".format(x))\n",
"print (\"x[:1]: \\n{}\".format(x[:1]))\n",
"print (\"x[:1, 1:3]: \\n{}\".format(x[:1, 1:3]))"
]
},
{
"cell_type": "code",
"execution_count": 0,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
"height": 153
},
"colab_type": "code",
"id": "_tbpwGxcMXj0",
"outputId": "678e805f-f5ec-49fe-d8d6-0986a3c41672"
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Values: \n",
"tensor([[ 0.0720, 0.4266, -0.5351],\n",
" [ 0.9672, 0.3691, -0.7332]])\n",
"Values: \n",
"tensor([[ 0.0720, -0.5351],\n",
" [ 0.9672, -0.7332]])\n",
"Values: \n",
"tensor([ 0.0720, -0.7332])\n"
]
}
],
"source": [
"# Select with dimensional indicies\n",
"x = torch.randn(2, 3)\n",
"print(\"Values: \\n{}\".format(x))\n",
"col_indices = torch.LongTensor([0, 2])\n",
"chosen = torch.index_select(x, dim=1, index=col_indices) # values from column 0 & 2\n",
"print(\"Values: \\n{}\".format(chosen)) \n",
"row_indices = torch.LongTensor([0, 1])\n",
"chosen = x[row_indices, col_indices] # values from (0, 0) & (2, 1)\n",
"print(\"Values: \\n{}\".format(chosen)) "
]
},
{
"cell_type": "code",
"execution_count": 0,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
"height": 153
},
"colab_type": "code",
"id": "tMeqSQtuMXmH",
"outputId": "9fa99c82-78d9-41f8-d070-710cf1b045c7"
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Values: \n",
"tensor([[-0.8443, 0.9883, 2.2796],\n",
" [-0.0482, -0.1147, -0.5290]])\n",
"Values: \n",
"tensor([[-0.8443, 0.9883, 2.2796],\n",
" [-0.0482, -0.1147, -0.5290],\n",
" [-0.8443, 0.9883, 2.2796],\n",
" [-0.0482, -0.1147, -0.5290]])\n"
]
}
],
"source": [
"# Concatenation\n",
"x = torch.randn(2, 3)\n",
"print(\"Values: \\n{}\".format(x))\n",
"y = torch.cat([x, x], dim=0) # stack by rows (dim=1 to stack by columns)\n",
"print(\"Values: \\n{}\".format(y))"
]
},
{
"cell_type": "markdown",
"metadata": {
"colab_type": "text",
"id": "JqiDuIC-ByvO"
},
"source": [
"# Gradients"
]
},
{
"cell_type": "code",
"execution_count": 0,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
"height": 153
},
"colab_type": "code",
"id": "qxpGB7-VL7fs",
"outputId": "a7964762-60d4-4e0e-bed2-b2d392804494"
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Values: \n",
"tensor([[0.7014, 0.2477, 0.5928, 0.5314],\n",
" [0.2832, 0.0825, 0.5684, 0.3090],\n",
" [0.1591, 0.0049, 0.0439, 0.7602]], requires_grad=True)\n",
"x.grad: \n",
" tensor([[0.2500, 0.2500, 0.2500, 0.2500],\n",
" [0.2500, 0.2500, 0.2500, 0.2500],\n",
" [0.2500, 0.2500, 0.2500, 0.2500]])\n"
]
}
],
"source": [
"# Tensors with gradient bookkeeping\n",
"x = torch.rand(3, 4, requires_grad=True)\n",
"y = 3*x + 2\n",
"z = y.mean()\n",
"z.backward() # z has to be scalar\n",
"print(\"Values: \\n{}\".format(x))\n",
"print(\"x.grad: \\n\", x.grad)"
]
},
{
"cell_type": "markdown",
"metadata": {
"colab_type": "text",
"id": "uf7htaAMDcRV"
},
"source": [
"* $ y = 3x + 2 $\n",
"* $ z = \\sum{y}/N $\n",
"* $ \\frac{\\partial(z)}{\\partial(x)} = \\frac{\\partial(z)}{\\partial(y)} \\frac{\\partial(y)}{\\partial(x)} = \\frac{1}{N} * 3 = \\frac{1}{12} * 3 = 0.25 $"
]
},
{
"cell_type": "markdown",
"metadata": {
"colab_type": "text",
"id": "VQtpZh1YD-kz"
},
"source": [
"# CUDA tensors"
]
},
{
"cell_type": "code",
"execution_count": 0,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
"height": 34
},
"colab_type": "code",
"id": "E_C3en05L7iT",
"outputId": "01b0eddc-db28-4786-ae48-a1004c838186"
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"True\n"
]
}
],
"source": [
"# Is CUDA available?\n",
"print (torch.cuda.is_available())"
]
},
{
"cell_type": "markdown",
"metadata": {
"colab_type": "text",
"id": "za47KWEJ6en2"
},
"source": [
"If the code above return False, then go to `Runtime` → `Change runtime type` and select `GPU` under `Hardware accelerator`. "
]
},
{
"cell_type": "code",
"execution_count": 0,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
"height": 34
},
"colab_type": "code",
"id": "BY2DdN3j6ZxO",
"outputId": "ec0ac0bd-461d-4b45-e131-cbf1d19c955b"
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Type: torch.FloatTensor\n"
]
}
],
"source": [
"# Creating a zero tensor\n",
"x = torch.Tensor(3, 4).to(\"cpu\")\n",
"print(\"Type: {}\".format(x.type()))"
]
},
{
"cell_type": "code",
"execution_count": 0,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
"height": 34
},
"colab_type": "code",
"id": "EcmdTggzEFPi",
"outputId": "0e3326db-8d3d-40aa-accd-b31ab841b572"
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Type: torch.cuda.FloatTensor\n"
]
}
],
"source": [
"# Creating a zero tensor\n",
"x = torch.Tensor(3, 4).to(\"cuda\")\n",
"print(\"Type: {}\".format(x.type()))"
]
}
],
"metadata": {
"accelerator": "GPU",
"colab": {
"collapsed_sections": [],
"name": "07_PyTorch",
"provenance": [],
"toc_visible": true,
"version": "0.3.2"
},
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.5.6"
}
},
"nbformat": 4,
"nbformat_minor": 1
}
| {
"pile_set_name": "Github"
} |
import time
import re
from netmiko.cisco.cisco_ios import CiscoIosBase
from netmiko.ssh_exception import NetmikoAuthenticationException
class KeymileNOSSSH(CiscoIosBase):
def session_preparation(self):
"""Prepare the session after the connection has been established."""
self.set_base_prompt()
self.disable_paging()
time.sleep(0.3 * self.global_delay_factor)
self.clear_buffer()
def _test_channel_read(self, count=40, pattern=""):
"""Since Keymile NOS always returns True on paramiko.connect() we
check the output for substring Login incorrect after connecting."""
output = super()._test_channel_read(count=count, pattern=pattern)
pattern = r"Login incorrect"
if re.search(pattern, output):
self.paramiko_cleanup()
msg = "Authentication failure: unable to connect"
msg += f"{self.device_type} {self.host}:{self.port}"
msg += self.RESPONSE_RETURN + "Login incorrect"
raise NetmikoAuthenticationException(msg)
else:
return output
def special_login_handler(self, delay_factor=1):
"""Since Keymile NOS always returns True on paramiko.connect() we
check the output for substring Login incorrect after connecting."""
self._test_channel_read(pattern=r"(>|Login incorrect)")
| {
"pile_set_name": "Github"
} |
MODULE := engines/macventure
MODULE_OBJS := \
container.o \
controls.o \
cursor.o \
datafiles.o \
detection.o \
dialog.o \
gui.o \
image.o \
macventure.o \
prebuilt_dialogs.o \
saveload.o \
script.o \
sound.o \
text.o \
windows.o \
world.o
MODULE_DIRS += \
engines/macventure
# This module can be built as a plugin
ifeq ($(ENABLE_MACVENTURE), DYNAMIC_PLUGIN)
PLUGIN := 1
endif
# Include common rules
include $(srcdir)/rules.mk
| {
"pile_set_name": "Github"
} |
package convert
import (
"github.com/zclconf/go-cty/cty"
)
// The current unify implementation is somewhat inefficient, but we accept this
// under the assumption that it will generally be used with small numbers of
// types and with types of reasonable complexity. However, it does have a
// "happy path" where all of the given types are equal.
//
// This function is likely to have poor performance in cases where any given
// types are very complex (lots of deeply-nested structures) or if the list
// of types itself is very large. In particular, it will walk the nested type
// structure under the given types several times, especially when given a
// list of types for which unification is not possible, since each permutation
// will be tried to determine that result.
func unify(types []cty.Type, unsafe bool) (cty.Type, []Conversion) {
if len(types) == 0 {
// Degenerate case
return cty.NilType, nil
}
// If all of the given types are of the same structural kind, we may be
// able to construct a new type that they can all be unified to, even if
// that is not one of the given types. We must try this before the general
// behavior below because in unsafe mode we can convert an object type to
// a subset of that type, which would be a much less useful conversion for
// unification purposes.
{
objectCt := 0
tupleCt := 0
dynamicCt := 0
for _, ty := range types {
switch {
case ty.IsObjectType():
objectCt++
case ty.IsTupleType():
tupleCt++
case ty == cty.DynamicPseudoType:
dynamicCt++
default:
break
}
}
switch {
case objectCt > 0 && (objectCt+dynamicCt) == len(types):
return unifyObjectTypes(types, unsafe, dynamicCt > 0)
case tupleCt > 0 && (tupleCt+dynamicCt) == len(types):
return unifyTupleTypes(types, unsafe, dynamicCt > 0)
case objectCt > 0 && tupleCt > 0:
// Can never unify object and tuple types since they have incompatible kinds
return cty.NilType, nil
}
}
prefOrder := sortTypes(types)
// sortTypes gives us an order where earlier items are preferable as
// our result type. We'll now walk through these and choose the first
// one we encounter for which conversions exist for all source types.
conversions := make([]Conversion, len(types))
Preferences:
for _, wantTypeIdx := range prefOrder {
wantType := types[wantTypeIdx]
for i, tryType := range types {
if i == wantTypeIdx {
// Don't need to convert our wanted type to itself
conversions[i] = nil
continue
}
if tryType.Equals(wantType) {
conversions[i] = nil
continue
}
if unsafe {
conversions[i] = GetConversionUnsafe(tryType, wantType)
} else {
conversions[i] = GetConversion(tryType, wantType)
}
if conversions[i] == nil {
// wantType is not a suitable unification type, so we'll
// try the next one in our preference order.
continue Preferences
}
}
return wantType, conversions
}
// If we fall out here, no unification is possible
return cty.NilType, nil
}
func unifyObjectTypes(types []cty.Type, unsafe bool, hasDynamic bool) (cty.Type, []Conversion) {
// If we had any dynamic types in the input here then we can't predict
// what path we'll take through here once these become known types, so
// we'll conservatively produce DynamicVal for these.
if hasDynamic {
return unifyAllAsDynamic(types)
}
// There are two different ways we can succeed here:
// - If all of the given object types have the same set of attribute names
// and the corresponding types are all unifyable, then we construct that
// type.
// - If the given object types have different attribute names or their
// corresponding types are not unifyable, we'll instead try to unify
// all of the attribute types together to produce a map type.
//
// Our unification behavior is intentionally stricter than our conversion
// behavior for subset object types because user intent is different with
// unification use-cases: it makes sense to allow {"foo":true} to convert
// to emptyobjectval, but unifying an object with an attribute with the
// empty object type should be an error because unifying to the empty
// object type would be suprising and useless.
firstAttrs := types[0].AttributeTypes()
for _, ty := range types[1:] {
thisAttrs := ty.AttributeTypes()
if len(thisAttrs) != len(firstAttrs) {
// If number of attributes is different then there can be no
// object type in common.
return unifyObjectTypesToMap(types, unsafe)
}
for name := range thisAttrs {
if _, ok := firstAttrs[name]; !ok {
// If attribute names don't exactly match then there can be
// no object type in common.
return unifyObjectTypesToMap(types, unsafe)
}
}
}
// If we get here then we've proven that all of the given object types
// have exactly the same set of attribute names, though the types may
// differ.
retAtys := make(map[string]cty.Type)
atysAcross := make([]cty.Type, len(types))
for name := range firstAttrs {
for i, ty := range types {
atysAcross[i] = ty.AttributeType(name)
}
retAtys[name], _ = unify(atysAcross, unsafe)
if retAtys[name] == cty.NilType {
// Cannot unify this attribute alone, which means that unification
// of everything down to a map type can't be possible either.
return cty.NilType, nil
}
}
retTy := cty.Object(retAtys)
conversions := make([]Conversion, len(types))
for i, ty := range types {
if ty.Equals(retTy) {
continue
}
if unsafe {
conversions[i] = GetConversionUnsafe(ty, retTy)
} else {
conversions[i] = GetConversion(ty, retTy)
}
if conversions[i] == nil {
// Shouldn't be reachable, since we were able to unify
return unifyObjectTypesToMap(types, unsafe)
}
}
return retTy, conversions
}
func unifyObjectTypesToMap(types []cty.Type, unsafe bool) (cty.Type, []Conversion) {
// This is our fallback case for unifyObjectTypes, where we see if we can
// construct a map type that can accept all of the attribute types.
var atys []cty.Type
for _, ty := range types {
for _, aty := range ty.AttributeTypes() {
atys = append(atys, aty)
}
}
ety, _ := unify(atys, unsafe)
if ety == cty.NilType {
return cty.NilType, nil
}
retTy := cty.Map(ety)
conversions := make([]Conversion, len(types))
for i, ty := range types {
if ty.Equals(retTy) {
continue
}
if unsafe {
conversions[i] = GetConversionUnsafe(ty, retTy)
} else {
conversions[i] = GetConversion(ty, retTy)
}
if conversions[i] == nil {
return cty.NilType, nil
}
}
return retTy, conversions
}
func unifyTupleTypes(types []cty.Type, unsafe bool, hasDynamic bool) (cty.Type, []Conversion) {
// If we had any dynamic types in the input here then we can't predict
// what path we'll take through here once these become known types, so
// we'll conservatively produce DynamicVal for these.
if hasDynamic {
return unifyAllAsDynamic(types)
}
// There are two different ways we can succeed here:
// - If all of the given tuple types have the same sequence of element types
// and the corresponding types are all unifyable, then we construct that
// type.
// - If the given tuple types have different element types or their
// corresponding types are not unifyable, we'll instead try to unify
// all of the elements types together to produce a list type.
firstEtys := types[0].TupleElementTypes()
for _, ty := range types[1:] {
thisEtys := ty.TupleElementTypes()
if len(thisEtys) != len(firstEtys) {
// If number of elements is different then there can be no
// tuple type in common.
return unifyTupleTypesToList(types, unsafe)
}
}
// If we get here then we've proven that all of the given tuple types
// have the same number of elements, though the types may differ.
retEtys := make([]cty.Type, len(firstEtys))
atysAcross := make([]cty.Type, len(types))
for idx := range firstEtys {
for tyI, ty := range types {
atysAcross[tyI] = ty.TupleElementTypes()[idx]
}
retEtys[idx], _ = unify(atysAcross, unsafe)
if retEtys[idx] == cty.NilType {
// Cannot unify this element alone, which means that unification
// of everything down to a map type can't be possible either.
return cty.NilType, nil
}
}
retTy := cty.Tuple(retEtys)
conversions := make([]Conversion, len(types))
for i, ty := range types {
if ty.Equals(retTy) {
continue
}
if unsafe {
conversions[i] = GetConversionUnsafe(ty, retTy)
} else {
conversions[i] = GetConversion(ty, retTy)
}
if conversions[i] == nil {
// Shouldn't be reachable, since we were able to unify
return unifyTupleTypesToList(types, unsafe)
}
}
return retTy, conversions
}
func unifyTupleTypesToList(types []cty.Type, unsafe bool) (cty.Type, []Conversion) {
// This is our fallback case for unifyTupleTypes, where we see if we can
// construct a list type that can accept all of the element types.
var etys []cty.Type
for _, ty := range types {
for _, ety := range ty.TupleElementTypes() {
etys = append(etys, ety)
}
}
ety, _ := unify(etys, unsafe)
if ety == cty.NilType {
return cty.NilType, nil
}
retTy := cty.List(ety)
conversions := make([]Conversion, len(types))
for i, ty := range types {
if ty.Equals(retTy) {
continue
}
if unsafe {
conversions[i] = GetConversionUnsafe(ty, retTy)
} else {
conversions[i] = GetConversion(ty, retTy)
}
if conversions[i] == nil {
// Shouldn't be reachable, since we were able to unify
return unifyObjectTypesToMap(types, unsafe)
}
}
return retTy, conversions
}
func unifyAllAsDynamic(types []cty.Type) (cty.Type, []Conversion) {
conversions := make([]Conversion, len(types))
for i := range conversions {
conversions[i] = func(cty.Value) (cty.Value, error) {
return cty.DynamicVal, nil
}
}
return cty.DynamicPseudoType, conversions
}
| {
"pile_set_name": "Github"
} |
# CVS $Revision$ $Author$ -- Sun Mar 2 22:49:32 2008 -- reformated by prettylst.pl v1.38 (build 5225)
SOURCELONG:Rise of the Runelords - Chapter 1: Burnt Offerings SOURCESHORT:PF1 SOURCEWEB:http://paizo.com/pathfinder/v5748btpy7zkr SOURCEDATE:2007-08
STARTPACK:Attic Whisperer Default TYPE:DefaultMonster.Undead VISIBLE:QUALIFY EQUIPBUY:0 PREMULT:1,[!PRERACE:1,%],[PRERACE:1,Attic Whisperer] SOURCEPAGE:p.92
RACE:Attic Whisperer !PRERACE:1,%
NAME:Attic Whisperer
ALIGN:NE
SKILL:Bluff RANK:9
SKILL:Climb RANK:6
SKILL:Hide RANK:9
SKILL:Knowledge (Local) RANK:6
SKILL:Listen RANK:9
SKILL:Move Silently RANK:9
SKILL:Spot RANK:6
ABILITY:CATEGORY=FEAT|Dodge
ABILITY:CATEGORY=FEAT|Improved Initiative
ABILITY:CATEGORY=FEAT|Weapon Finesse
STAT:STR=11|DEX=11|CON=10|INT=10|WIS=11|CHA=11
STARTPACK:Gecko (Giant) Default TYPE:DefaultMonster.Animal VISIBLE:QUALIFY EQUIPBUY:0 PREMULT:1,[!PRERACE:1,%],[PRERACE:1,Gecko (Giant)] SOURCEPAGE:p.89
RACE:Gecko (Giant) !PRERACE:1,%
NAME:Giant Gecko
ALIGN:TN
SKILL:Climb RANK:5
ABILITY:CATEGORY=FEAT|Improved Initiative
STAT:STR=11|DEX=11|CON=10|INT=10|WIS=10|CHA=10
STARTPACK:Goblin Dog Default TYPE:DefaultMonster.Animal VISIBLE:QUALIFY EQUIPBUY:0 PREMULT:1,[!PRERACE:1,%],[PRERACE:1,Goblin Dog] SOURCEPAGE:p.87
RACE:Goblin Dog !PRERACE:1,%
NAME:Goblin Dog
ALIGN:TN
SKILL:Jump RANK:4
ABILITY:CATEGORY=FEAT|Toughness
STAT:STR=11|DEX=10|CON=11|INT=10|WIS=10|CHA=10
STARTPACK:Goblin Snake Default TYPE:DefaultMonster.Aberration VISIBLE:QUALIFY EQUIPBUY:0 PREMULT:1,[!PRERACE:1,%],[PRERACE:1,Goblin Snake] SOURCEPAGE:p.88
RACE:Goblin Snake !PRERACE:1,%
NAME:Goblin Snake
ALIGN:CE
SKILL:Bluff RANK:1
SKILL:Hide RANK:1
SKILL:Intimidate RANK:1
SKILL:Spot RANK:2
ABILITY:CATEGORY=FEAT|Alertness
STAT:STR=10|DEX=11|CON=10|INT=10|WIS=10|CHA=11
STARTPACK:Sandpoint Devil Default TYPE:DefaultMonster.Outsider VISIBLE:QUALIFY EQUIPBUY:0 PREMULT:1,[!PRERACE:1,%],[PRERACE:1,Sandpoint Devil] SOURCEPAGE:p.94
RACE:Sandpoint Devil !PRERACE:1,%
NAME:The Sandpoint Devil
ALIGN:NE
SKILL:Climb RANK:7
SKILL:Disguise RANK:6
SKILL:Hide RANK:15
SKILL:Intimidate RANK:5
SKILL:Knowledge (Geography) RANK:8
SKILL:Listen RANK:15
SKILL:Move Silently RANK:15
SKILL:Search RANK:7
SKILL:Spot RANK:15
SKILL:Survival RANK:8
ABILITY:CATEGORY=FEAT|Alertness
ABILITY:CATEGORY=FEAT|Dodge
ABILITY:CATEGORY=FEAT|Mobility
ABILITY:CATEGORY=FEAT|Spring Attack
ABILITY:CATEGORY=FEAT|Stealthy
STAT:STR=10|DEX=10|CON=10|INT=10|WIS=10|CHA=10
STARTPACK:Sinspawn Default TYPE:DefaultMonster.Aberration VISIBLE:QUALIFY EQUIPBUY:0 PREMULT:1,[!PRERACE:1,%],[PRERACE:1,Sinspawn] SOURCEPAGE:p.90
RACE:Sinspawn !PRERACE:1,%
NAME:Sinspawn
ALIGN:NE
SKILL:Hide RANK:6
SKILL:Move Silently RANK:6
ABILITY:CATEGORY=FEAT|Improved Initiative
ABILITY:CATEGORY=FEAT|Multiattack
STAT:STR=11|DEX=10|CON=10|INT=10|WIS=11|CHA=10
STARTPACK:Sinspawn (Envyspawn) Default TYPE:DefaultMonster.Aberration VISIBLE:QUALIFY EQUIPBUY:0 PREMULT:1,[!PRERACE:1,%],[PRERACE:1,Sinspawn (Envyspawn)] SOURCEPAGE:p.91
RACE:Sinspawn (Envyspawn) !PRERACE:1,%
NAME:Envyspawn
ALIGN:NE
SKILL:Hide RANK:6
SKILL:Move Silently RANK:6
ABILITY:CATEGORY=FEAT|Improved Initiative
ABILITY:CATEGORY=FEAT|Multiattack
STAT:STR=11|DEX=10|CON=10|INT=10|WIS=11|CHA=10
STARTPACK:Sinspawn (Gluttonspawn) Default TYPE:DefaultMonster.Aberration VISIBLE:QUALIFY EQUIPBUY:0 PREMULT:1,[!PRERACE:1,%],[PRERACE:1,Sinspawn (Gluttonspawn)] SOURCEPAGE:p.91
RACE:Sinspawn (Gluttonspawn) !PRERACE:1,%
NAME:Envyspawn
ALIGN:NE
SKILL:Hide RANK:6
SKILL:Move Silently RANK:6
ABILITY:CATEGORY=FEAT|Improved Initiative
ABILITY:CATEGORY=FEAT|Multiattack
STAT:STR=11|DEX=10|CON=10|INT=10|WIS=11|CHA=10
STARTPACK:Sinspawn (Greedspawn) Default TYPE:DefaultMonster.Aberration VISIBLE:QUALIFY EQUIPBUY:0 PREMULT:1,[!PRERACE:1,%],[PRERACE:1,Sinspawn (Greedspawn)] SOURCEPAGE:p.91
RACE:Sinspawn (Greedspawn) !PRERACE:1,%
NAME:Envyspawn
ALIGN:NE
SKILL:Hide RANK:6
SKILL:Move Silently RANK:6
ABILITY:CATEGORY=FEAT|Improved Initiative
ABILITY:CATEGORY=FEAT|Multiattack
STAT:STR=11|DEX=10|CON=10|INT=10|WIS=11|CHA=10
STARTPACK:Sinspawn (Lustspawn) Default TYPE:DefaultMonster.Aberration VISIBLE:QUALIFY EQUIPBUY:0 PREMULT:1,[!PRERACE:1,%],[PRERACE:1,Sinspawn (Lustspawn)] SOURCEPAGE:p.91
RACE:Sinspawn (Lustspawn) !PRERACE:1,%
NAME:Envyspawn
ALIGN:NE
SKILL:Hide RANK:6
SKILL:Move Silently RANK:6
ABILITY:CATEGORY=FEAT|Improved Initiative
ABILITY:CATEGORY=FEAT|Multiattack
STAT:STR=11|DEX=10|CON=10|INT=10|WIS=11|CHA=10
STARTPACK:Sinspawn (Pridespawn) Default TYPE:DefaultMonster.Aberration VISIBLE:QUALIFY EQUIPBUY:0 PREMULT:1,[!PRERACE:1,%],[PRERACE:1,Sinspawn (Pridespawn)] SOURCEPAGE:p.91
RACE:Sinspawn (Pridespawn) !PRERACE:1,%
NAME:Envyspawn
ALIGN:NE
SKILL:Hide RANK:6
SKILL:Move Silently RANK:6
ABILITY:CATEGORY=FEAT|Improved Initiative
ABILITY:CATEGORY=FEAT|Multiattack
STAT:STR=11|DEX=10|CON=10|INT=10|WIS=11|CHA=10
STARTPACK:Sinspawn (Slothspawn) Default TYPE:DefaultMonster.Aberration VISIBLE:QUALIFY EQUIPBUY:0 PREMULT:1,[!PRERACE:1,%],[PRERACE:1,Sinspawn (Slothspawn)] SOURCEPAGE:p.91
RACE:Sinspawn (Slothspawn) !PRERACE:1,%
NAME:Envyspawn
ALIGN:NE
SKILL:Hide RANK:6
SKILL:Move Silently RANK:6
ABILITY:CATEGORY=FEAT|Improved Initiative
ABILITY:CATEGORY=FEAT|Multiattack
STAT:STR=11|DEX=10|CON=10|INT=10|WIS=11|CHA=10
| {
"pile_set_name": "Github"
} |
<?php
/**
* Zend Framework (http://framework.zend.com/)
*
* @link http://github.com/zendframework/zf2 for the canonical source repository
* @copyright Copyright (c) 2005-2013 Zend Technologies USA Inc. (http://www.zend.com)
* @license http://framework.zend.com/license/new-bsd New BSD License
*/
namespace Zend\Mail\Header;
use Zend\Mail\AddressList;
use Zend\Mail\Headers;
/**
* Base class for headers composing address lists (to, from, cc, bcc, reply-to)
*/
abstract class AbstractAddressList implements HeaderInterface
{
/**
* @var AddressList
*/
protected $addressList;
/**
* @var string Normalized field name
*/
protected $fieldName;
/**
* Header encoding
*
* @var string
*/
protected $encoding = 'ASCII';
/**
* @var string lower case field name
*/
protected static $type;
public static function fromString($headerLine)
{
$decodedLine = iconv_mime_decode($headerLine, ICONV_MIME_DECODE_CONTINUE_ON_ERROR, 'UTF-8');
// split into name/value
list($fieldName, $fieldValue) = explode(':', $decodedLine, 2);
$fieldName = trim($fieldName);
$fieldValue = trim($fieldValue);
if (strtolower($fieldName) !== static::$type) {
throw new Exception\InvalidArgumentException(sprintf(
'Invalid header line for "%s" string',
__CLASS__
));
}
$header = new static();
if ($decodedLine != $headerLine) {
$header->setEncoding('UTF-8');
}
// split value on ","
$fieldValue = str_replace(Headers::FOLDING, ' ', $fieldValue);
$values = explode(',', $fieldValue);
array_walk($values, 'trim');
$addressList = $header->getAddressList();
foreach ($values as $address) {
// split values into name/email
if (!preg_match('/^((?P<name>.*?)<(?P<namedEmail>[^>]+)>|(?P<email>.+))$/', $address, $matches)) {
// Should we raise an exception here?
continue;
}
$name = null;
if (isset($matches['name'])) {
$name = trim($matches['name']);
}
if (empty($name)) {
$name = null;
}
if (isset($matches['namedEmail'])) {
$email = $matches['namedEmail'];
}
if (isset($matches['email'])) {
$email = $matches['email'];
}
$email = trim($email); // we may have leading whitespace
// populate address list
$addressList->add($email, $name);
}
return $header;
}
public function getFieldName()
{
return $this->fieldName;
}
public function getFieldValue($format = HeaderInterface::FORMAT_RAW)
{
$emails = array();
$encoding = $this->getEncoding();
foreach ($this->getAddressList() as $address) {
$email = $address->getEmail();
$name = $address->getName();
if (empty($name)) {
$emails[] = $email;
} else {
if (false !== strstr($name, ',')) {
$name = sprintf('"%s"', $name);
}
if ($format == HeaderInterface::FORMAT_ENCODED
&& 'ASCII' !== $encoding
) {
$name = HeaderWrap::mimeEncodeValue($name, $encoding);
}
$emails[] = sprintf('%s <%s>', $name, $email);
}
}
return implode(',' . Headers::FOLDING, $emails);
}
public function setEncoding($encoding)
{
$this->encoding = $encoding;
return $this;
}
public function getEncoding()
{
return $this->encoding;
}
/**
* Set address list for this header
*
* @param AddressList $addressList
*/
public function setAddressList(AddressList $addressList)
{
$this->addressList = $addressList;
}
/**
* Get address list managed by this header
*
* @return AddressList
*/
public function getAddressList()
{
if (null === $this->addressList) {
$this->setAddressList(new AddressList());
}
return $this->addressList;
}
public function toString()
{
$name = $this->getFieldName();
$value = $this->getFieldValue(HeaderInterface::FORMAT_ENCODED);
return (empty($value)) ? '' : sprintf('%s: %s', $name, $value);
}
}
| {
"pile_set_name": "Github"
} |
@HD VN:1.6 SO:queryname
@RG ID:HiMom.1 SM:SA_ATTATCAA LB:LN_ATTATCAA PL:ILLUMINA PU:HiMom.1.ATTATCAA CN:BI
HiMom:1:1101:1100:2207 77 * 0 0 * * 0 0 ACGACAGACGTTCTTTCTTTGCTGC CCCFFFFFHHFHHJIJJJJJHIJJH RG:Z:HiMom.1
HiMom:1:1101:1100:2207 141 * 0 0 * * 0 0 AGGCT............G....... ######################### RG:Z:HiMom.1
HiMom:1:1101:1157:2135 77 * 0 0 * * 0 0 .GGACATTGTAATCATTTCTTACAA #1=DD?DDHHHHHGGHIIIIIIIII RG:Z:HiMom.1
HiMom:1:1101:1157:2135 141 * 0 0 * * 0 0 TTTAAAGTCTTAATCAAAGATGATA CCCFFFFFHHHHHJJJJJJJJJJJJ RG:Z:HiMom.1
HiMom:1:1101:1269:2170 77 * 0 0 * * 0 0 ACAGTGTGGGAGGCAGACGAAGAGA @@@DDDDDFA:C@EGA?FD<FFHII RG:Z:HiMom.1
HiMom:1:1101:1269:2170 141 * 0 0 * * 0 0 TTCCAAGCCTGTGCTTTAAGGAAAA @@<ADBDBDF8DDCFH@GIE@@GGH RG:Z:HiMom.1
HiMom:1:1201:1018:2217 589 * 0 0 * * 0 0 .TTTCTCTGGGCGCAAAGATGTTCA #07;8=8<<99(:=@@/@7>>6=?> RG:Z:HiMom.1
HiMom:1:1201:1018:2217 653 * 0 0 * * 0 0 ......................... ######################### RG:Z:HiMom.1 XN:i:1
HiMom:1:1201:1118:2198 77 * 0 0 * * 0 0 CAAGTGTACAGGATTAGACTGGGTT BCCFDEBDHHHHHIJJJGIIIJJGH RG:Z:HiMom.1
HiMom:1:1201:1118:2198 141 * 0 0 * * 0 0 AATAAACTTTATTAAAGCAGTTAAA C@CFFFFFHDHHHGIIIJJJIJJJJ RG:Z:HiMom.1
| {
"pile_set_name": "Github"
} |
#!/bin/ksh -p
#
# CDDL HEADER START
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
# CDDL HEADER END
#
#
# Copyright 2018, loli10K <[email protected]>. All rights reserved.
#
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/tests/functional/events/events_common.kshlib
. $STF_SUITE/tests/functional/fault/fault.cfg
#
# DESCRIPTION:
# Testing Fault Management Agent ZED Logic - Physically removed device is
# made unavail and onlined when reattached
#
# STRATEGY:
# 1. Create a pool
# 2. Simulate physical removal of one device
# 3. Verify the device is unvailable
# 4. Reattach the device
# 5. Verify the device is onlined
# 6. Repeat the same tests with a spare device:
# zed will use the spare to handle the removed data device
# 7. Repeat the same tests again with a faulted spare device:
# the removed data device should be unavailable
#
# NOTE: the use of 'block_device_wait' throughout the test helps avoid race
# conditions caused by mixing creation/removal events from partitioning the
# disk (zpool create) and events from physically removing it (remove_disk).
#
# NOTE: the test relies on 'zpool sync' to prompt the kmods to transition a
# vdev to the unavailable state. The ZED does receive a removal notification
# but only relies on it to activate a hot spare. Additional work is planned
# to extend an existing ioctl interface to allow the ZED to transition the
# vdev in to a removed state.
#
verify_runnable "both"
if is_linux; then
# Add one 512b scsi_debug device (4Kn would generate IO errors)
# NOTE: must be larger than other "file" vdevs and minimum SPA devsize:
# add 32m of fudge
load_scsi_debug $(($SPA_MINDEVSIZE/1024/1024+32)) 1 1 1 '512b'
else
log_unsupported "scsi debug module unsupported"
fi
function cleanup
{
destroy_pool $TESTPOOL
rm -f $filedev1
rm -f $filedev2
rm -f $filedev3
rm -f $sparedev
unload_scsi_debug
}
log_assert "ZED detects physically removed devices"
log_onexit cleanup
filedev1="$TEST_BASE_DIR/file-vdev-1"
filedev2="$TEST_BASE_DIR/file-vdev-2"
filedev3="$TEST_BASE_DIR/file-vdev-3"
sparedev="$TEST_BASE_DIR/file-vdev-spare"
removedev=$(get_debug_device)
typeset poolconfs=(
"mirror $filedev1 $removedev"
"raidz3 $filedev1 $filedev2 $filedev3 $removedev"
"mirror $filedev1 $filedev2 special mirror $filedev3 $removedev"
)
log_must truncate -s $SPA_MINDEVSIZE $filedev1
log_must truncate -s $SPA_MINDEVSIZE $filedev2
log_must truncate -s $SPA_MINDEVSIZE $filedev3
log_must truncate -s $SPA_MINDEVSIZE $sparedev
for conf in "${poolconfs[@]}"
do
# 1. Create a pool
log_must zpool create -f $TESTPOOL $conf
block_device_wait ${DEV_DSKDIR}/${removedev}
mntpnt=$(get_prop mountpoint /$TESTPOOL) ||
log_fail "get_prop mountpoint /$TESTPOOL"
# 2. Simulate physical removal of one device
remove_disk $removedev
log_must mkfile 1m $mntpnt/file
log_must zpool sync $TESTPOOL
# 3. Verify the device is unvailable.
log_must wait_vdev_state $TESTPOOL $removedev "UNAVAIL"
# 4. Reattach the device
insert_disk $removedev
# 5. Verify the device is onlined
log_must wait_vdev_state $TESTPOOL $removedev "ONLINE"
# cleanup
destroy_pool $TESTPOOL
log_must parted "${DEV_DSKDIR}/${removedev}" -s -- mklabel msdos
block_device_wait ${DEV_DSKDIR}/${removedev}
done
# 6. Repeat the same tests with a spare device: zed will use the spare to handle
# the removed data device
for conf in "${poolconfs[@]}"
do
# 1. Create a pool with a spare
log_must zpool create -f $TESTPOOL $conf
block_device_wait ${DEV_DSKDIR}/${removedev}
log_must zpool add $TESTPOOL spare $sparedev
mntpnt=$(get_prop mountpoint /$TESTPOOL) ||
log_fail "get_prop mountpoint /$TESTPOOL"
# 2. Simulate physical removal of one device
remove_disk $removedev
log_must mkfile 1m $mntpnt/file
log_must zpool sync $TESTPOOL
# 3. Verify the device is handled by the spare.
log_must wait_hotspare_state $TESTPOOL $sparedev "INUSE"
log_must wait_vdev_state $TESTPOOL $removedev "UNAVAIL"
# 4. Reattach the device
insert_disk $removedev
# 5. Verify the device is onlined
log_must wait_vdev_state $TESTPOOL $removedev "ONLINE"
# cleanup
destroy_pool $TESTPOOL
log_must parted "${DEV_DSKDIR}/${removedev}" -s -- mklabel msdos
block_device_wait ${DEV_DSKDIR}/${removedev}
done
# 7. Repeat the same tests again with a faulted spare device: zed should offline
# the removed data device if no spare is available
for conf in "${poolconfs[@]}"
do
# 1. Create a pool with a spare
log_must zpool create -f $TESTPOOL $conf
block_device_wait ${DEV_DSKDIR}/${removedev}
log_must zpool add $TESTPOOL spare $sparedev
mntpnt=$(get_prop mountpoint /$TESTPOOL) ||
log_fail "get_prop mountpoint /$TESTPOOL"
# 2. Fault the spare device making it unavailable
log_must zpool offline -f $TESTPOOL $sparedev
log_must wait_hotspare_state $TESTPOOL $sparedev "FAULTED"
# 3. Simulate physical removal of one device
remove_disk $removedev
log_must mkfile 1m $mntpnt/file
log_must zpool sync $TESTPOOL
# 4. Verify the device is unavailable
log_must wait_vdev_state $TESTPOOL $removedev "UNAVAIL"
# 5. Reattach the device
insert_disk $removedev
# 6. Verify the device is onlined
log_must wait_vdev_state $TESTPOOL $removedev "ONLINE"
# cleanup
destroy_pool $TESTPOOL
log_must parted "${DEV_DSKDIR}/${removedev}" -s -- mklabel msdos
block_device_wait ${DEV_DSKDIR}/${removedev}
done
log_pass "ZED detects physically removed devices"
| {
"pile_set_name": "Github"
} |
(module D_SOD123_axial (layer F.Cu) (tedit 561B6A12)
(attr smd)
(fp_text reference D** (at 0 1.925) (layer F.SilkS)
(effects (font (size 0.8 0.8) (thickness 0.15)))
)
(fp_text value D (at 0 -1.925) (layer F.SilkS) hide
(effects (font (size 0.8 0.8) (thickness 0.15)))
)
(fp_line (start -2.275 -1.2) (end -2.275 1.2) (layer F.SilkS) (width 0.2))
(fp_line (start -2.45 -1.2) (end -2.45 1.2) (layer F.SilkS) (width 0.2))
(fp_line (start -2.625 -1.2) (end -2.625 1.2) (layer F.SilkS) (width 0.2))
(fp_line (start -3.025 1.2) (end -3.025 -1.2) (layer F.SilkS) (width 0.2))
(fp_line (start -2.8 -1.2) (end -2.8 1.2) (layer F.SilkS) (width 0.2))
(fp_line (start -2.925 -1.2) (end -2.925 1.2) (layer F.SilkS) (width 0.2))
(fp_line (start -3 -1.2) (end 2.8 -1.2) (layer F.SilkS) (width 0.2))
(fp_line (start 2.8 -1.2) (end 2.8 1.2) (layer F.SilkS) (width 0.2))
(fp_line (start 2.8 1.2) (end -3 1.2) (layer F.SilkS) (width 0.2))
(pad 2 smd rect (at 1.575 0) (size 1.2 1.2) (layers F.Cu F.Paste F.Mask))
(pad 1 smd rect (at -1.575 0) (size 1.2 1.2) (layers F.Cu F.Paste F.Mask))
(pad 1 thru_hole rect (at -3.9 0) (size 1.6 1.6) (drill 0.7) (layers *.Cu *.Mask F.SilkS))
(pad 2 thru_hole circle (at 3.9 0) (size 1.6 1.6) (drill 0.7) (layers *.Cu *.Mask F.SilkS))
(pad 1 smd rect (at -2.7 0) (size 2.5 0.5) (layers F.Cu)
(solder_mask_margin -999))
(pad 2 smd rect (at 2.7 0) (size 2.5 0.5) (layers F.Cu)
(solder_mask_margin -999))
)
| {
"pile_set_name": "Github"
} |
invisible = 1
col_layer = -1
repeat = 2
on ground_collision()
shoot_particles ( geminstaticfat.obj , 1)
create_explosion ( dragonexps.exp )
remove()
on timer(0)
shoot_particles ( settingtheblurofflame.obj , 1)
create_explosion ( dragonexp.exp )
on timer(25)
shoot_particles ( geminstaticfat.obj , 1)
create_explosion ( dragonexps.exp )
remove()
| {
"pile_set_name": "Github"
} |
---
cve: 2013-2160
title: "Denial of Service Attacks on Apache CXF"
references:
- http://cxf.apache.org/security-advisories.data/CVE-2013-2160.txt.asc
- https://bugzilla.redhat.com/show_bug.cgi?id=CVE-2013-2160
affected:
- groupId: "org.apache.cxf"
artifactId: "cxf-api"
version:
- "<=2.5.9,2.5"
- "<=2.6.6,2.6"
- "<=2.7.3,2.7"
fixedin:
- ">=2.5.10,2.5"
- "==2.6.6-jbossorg-1"
- ">=2.6.7,2.6"
- ">=2.7.4,2.7"
- groupId: "org.apache.cxf"
artifactId: "cxf-parent"
version:
- "<=2.5.9,2.5"
- "<=2.6.6,2.6"
- "<=2.7.3,2.7"
fixedin:
- ">=2.5.10,2.5"
- "==2.6.6-jbossorg-1"
- ">=2.6.7,2.6"
- ">=2.7.4,2.7"
- groupId: "org.apache.cxf"
artifactId: "apache-cxf"
version:
- "<=2.5.9,2.5"
- "<=2.6.6,2.6"
- "<=2.7.3,2.7"
fixedin:
- ">=2.5.10,2.5"
- "==2.6.6-jbossorg-1"
- ">=2.6.7,2.6"
- ">=2.7.4,2.7"
package_urls:
- https://maven.repository.redhat.com/ga/org/apache/cxf/cxf-api/2.6.0.redhat-60092/cxf-api-2.6.0.redhat-60092.jar
- https://maven.repository.redhat.com/ga/org/apache/cxf/cxf-api/2.6.0.redhat-60024/cxf-api-2.6.0.redhat-60024.jar
- https://maven.repository.redhat.com/ga/org/apache/cxf/cxf-api/2.6.0.redhat-60083/cxf-api-2.6.0.redhat-60083.jar
- http://central.maven.org/maven2/org/apache/cxf/cxf-api/2.5.4/cxf-api-2.5.4.jar
- https://maven.repository.redhat.com/ga/org/apache/cxf/cxf-api/2.6.6-redhat-3/cxf-api-2.6.6-redhat-3.jar
- http://central.maven.org/maven2/org/apache/cxf/cxf-api/2.5.5/cxf-api-2.5.5.jar
- http://central.maven.org/maven2/org/apache/cxf/cxf-api/2.6.0/cxf-api-2.6.0.jar
- http://central.maven.org/maven2/org/apache/cxf/cxf-api/2.6.6/cxf-api-2.6.6.jar
- http://central.maven.org/maven2/org/apache/cxf/cxf-api/2.6.1/cxf-api-2.6.1.jar
- https://maven.repository.redhat.com/ga/org/apache/cxf/cxf-api/2.6.0.redhat-60065/cxf-api-2.6.0.redhat-60065.jar
- http://central.maven.org/maven2/org/apache/cxf/cxf-api/2.6.4/cxf-api-2.6.4.jar
- http://central.maven.org/maven2/org/apache/cxf/cxf-api/2.7.0/cxf-api-2.7.0.jar
- http://central.maven.org/maven2/org/apache/cxf/cxf-api/2.7.1/cxf-api-2.7.1.jar
- http://central.maven.org/maven2/org/apache/cxf/cxf-api/2.5.8/cxf-api-2.5.8.jar
- http://central.maven.org/maven2/org/apache/cxf/cxf-api/2.7.2/cxf-api-2.7.2.jar
- http://central.maven.org/maven2/org/apache/cxf/cxf-api/2.5.1/cxf-api-2.5.1.jar
- http://central.maven.org/maven2/org/apache/cxf/cxf-api/2.7.3/cxf-api-2.7.3.jar
- http://central.maven.org/maven2/org/apache/cxf/cxf-api/2.5.6/cxf-api-2.5.6.jar
- http://central.maven.org/maven2/org/apache/cxf/cxf-api/2.5.0/cxf-api-2.5.0.jar
- http://central.maven.org/maven2/org/apache/cxf/cxf-api/2.5.7/cxf-api-2.5.7.jar
- http://central.maven.org/maven2/org/apache/cxf/cxf-api/2.5.2/cxf-api-2.5.2.jar
- http://central.maven.org/maven2/org/apache/cxf/cxf-api/2.5.3/cxf-api-2.5.3.jar
- http://central.maven.org/maven2/org/apache/cxf/cxf-api/2.6.3/cxf-api-2.6.3.jar
- http://central.maven.org/maven2/org/apache/cxf/cxf-api/2.5.9/cxf-api-2.5.9.jar
- http://central.maven.org/maven2/org/apache/cxf/cxf-api/2.6.2/cxf-api-2.6.2.jar
- http://central.maven.org/maven2/org/apache/cxf/cxf-api/2.6.5/cxf-api-2.6.5.jar
- http://central.maven.org/maven2/org/apache/cxf/cxf-parent/2.6.0/cxf-parent-2.6.0.jar
- http://central.maven.org/maven2/org/apache/cxf/cxf-parent/2.5.3/cxf-parent-2.5.3.jar
- https://maven.repository.redhat.com/ga/org/apache/cxf/cxf-parent/2.6.0.redhat-60065/cxf-parent-2.6.0.redhat-60065.jar
- http://central.maven.org/maven2/org/apache/cxf/cxf-parent/2.5.6/cxf-parent-2.5.6.jar
- http://central.maven.org/maven2/org/apache/cxf/cxf-parent/2.7.2/cxf-parent-2.7.2.jar
- https://maven.repository.redhat.com/ga/org/apache/cxf/cxf-parent/2.6.6-redhat-3/cxf-parent-2.6.6-redhat-3.jar
- http://central.maven.org/maven2/org/apache/cxf/cxf-parent/2.5.9/cxf-parent-2.5.9.jar
- http://central.maven.org/maven2/org/apache/cxf/cxf-parent/2.5.7/cxf-parent-2.5.7.jar
- http://central.maven.org/maven2/org/apache/cxf/cxf-parent/2.7.3/cxf-parent-2.7.3.jar
- http://central.maven.org/maven2/org/apache/cxf/cxf-parent/2.7.1/cxf-parent-2.7.1.jar
- http://central.maven.org/maven2/org/apache/cxf/cxf-parent/2.5.4/cxf-parent-2.5.4.jar
- http://central.maven.org/maven2/org/apache/cxf/cxf-parent/2.5.5/cxf-parent-2.5.5.jar
- http://central.maven.org/maven2/org/apache/cxf/cxf-parent/2.7.0/cxf-parent-2.7.0.jar
- http://central.maven.org/maven2/org/apache/cxf/cxf-parent/2.5.1/cxf-parent-2.5.1.jar
- http://central.maven.org/maven2/org/apache/cxf/cxf-parent/2.5.0/cxf-parent-2.5.0.jar
- http://central.maven.org/maven2/org/apache/cxf/cxf-parent/2.5.2/cxf-parent-2.5.2.jar
- http://central.maven.org/maven2/org/apache/cxf/cxf-parent/2.5.8/cxf-parent-2.5.8.jar
- https://maven.repository.redhat.com/ga/org/apache/cxf/cxf-parent/2.6.0.redhat-60092/cxf-parent-2.6.0.redhat-60092.jar
- https://maven.repository.redhat.com/ga/org/apache/cxf/cxf-parent/2.6.0.redhat-60024/cxf-parent-2.6.0.redhat-60024.jar
- http://central.maven.org/maven2/org/apache/cxf/cxf-parent/2.6.6/cxf-parent-2.6.6.jar
- http://central.maven.org/maven2/org/apache/cxf/cxf-parent/2.6.2/cxf-parent-2.6.2.jar
- http://central.maven.org/maven2/org/apache/cxf/cxf-parent/2.6.5/cxf-parent-2.6.5.jar
- http://central.maven.org/maven2/org/apache/cxf/cxf-parent/2.6.1/cxf-parent-2.6.1.jar
- http://central.maven.org/maven2/org/apache/cxf/cxf-parent/2.6.3/cxf-parent-2.6.3.jar
- http://central.maven.org/maven2/org/apache/cxf/cxf-parent/2.6.4/cxf-parent-2.6.4.jar
- https://maven.repository.redhat.com/ga/org/apache/cxf/cxf-parent/2.6.0.redhat-60083/cxf-parent-2.6.0.redhat-60083.jar
- http://central.maven.org/maven2/org/apache/cxf/apache-cxf/2.7.0/apache-cxf-2.7.0.jar
- http://central.maven.org/maven2/org/apache/cxf/apache-cxf/2.7.3/apache-cxf-2.7.3.jar
- http://central.maven.org/maven2/org/apache/cxf/apache-cxf/2.5.4/apache-cxf-2.5.4.jar
- http://central.maven.org/maven2/org/apache/cxf/apache-cxf/2.5.3/apache-cxf-2.5.3.jar
- http://central.maven.org/maven2/org/apache/cxf/apache-cxf/2.7.1/apache-cxf-2.7.1.jar
- http://central.maven.org/maven2/org/apache/cxf/apache-cxf/2.7.2/apache-cxf-2.7.2.jar
- http://central.maven.org/maven2/org/apache/cxf/apache-cxf/2.6.6/apache-cxf-2.6.6.jar
- http://central.maven.org/maven2/org/apache/cxf/apache-cxf/2.6.3/apache-cxf-2.6.3.jar
- http://central.maven.org/maven2/org/apache/cxf/apache-cxf/2.5.6/apache-cxf-2.5.6.jar
- http://central.maven.org/maven2/org/apache/cxf/apache-cxf/2.6.1/apache-cxf-2.6.1.jar
- http://central.maven.org/maven2/org/apache/cxf/apache-cxf/2.5.9/apache-cxf-2.5.9.jar
- http://central.maven.org/maven2/org/apache/cxf/apache-cxf/2.6.4/apache-cxf-2.6.4.jar
- http://central.maven.org/maven2/org/apache/cxf/apache-cxf/2.6.2/apache-cxf-2.6.2.jar
- http://central.maven.org/maven2/org/apache/cxf/apache-cxf/2.6.0/apache-cxf-2.6.0.jar
- http://central.maven.org/maven2/org/apache/cxf/apache-cxf/2.5.7/apache-cxf-2.5.7.jar
- http://central.maven.org/maven2/org/apache/cxf/apache-cxf/2.5.1/apache-cxf-2.5.1.jar
- http://central.maven.org/maven2/org/apache/cxf/apache-cxf/2.6.5/apache-cxf-2.6.5.jar
- http://central.maven.org/maven2/org/apache/cxf/apache-cxf/2.5.0/apache-cxf-2.5.0.jar
- http://central.maven.org/maven2/org/apache/cxf/apache-cxf/2.5.2/apache-cxf-2.5.2.jar
- http://central.maven.org/maven2/org/apache/cxf/apache-cxf/2.5.8/apache-cxf-2.5.8.jar
- http://central.maven.org/maven2/org/apache/cxf/apache-cxf/2.5.5/apache-cxf-2.5.5.jar
| {
"pile_set_name": "Github"
} |
//-------------------------------------------------------------------------------------------------------
// Copyright (C) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE.txt file in the project root for full license information.
//-------------------------------------------------------------------------------------------------------
function Tm()
{
var n = arguments[0];
for(var s = 0; s<n.length; s++)
{
var f = n.charCodeAt(s);
}
}
Tm("reallyLongTestString" + Math.random());
Tm("reallyLongTestString" + Math.random());
Tm("reallyLongTestString" + Math.random());
Tm("reallyLongTestString" + Math.random());
Tm("reallyLongTestString" + Math.random());
Tm("reallyLongTestString" + Math.random());
Tm("reallyLongTestString" + Math.random());
Tm("reallyLongTestString" + Math.random());
Tm("reallyLongTestString" + Math.random());
Tm("reallyLongTestString" + Math.random());
WScript.Echo("pass");
| {
"pile_set_name": "Github"
} |
{
"created_at": "2015-02-27T22:28:30.880888",
"description": "This project is DEACTIVE",
"fork": false,
"full_name": "lepture/Vealous",
"language": "Python",
"updated_at": "2015-02-27T23:42:54.740715"
} | {
"pile_set_name": "Github"
} |
/*
* Copyright (C) 2015, Google Inc. and others
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Distribution License v. 1.0 which is available at
* https://www.eclipse.org/org/documents/edl-v10.php.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
package org.eclipse.jgit.annotations;
import static java.lang.annotation.ElementType.FIELD;
import static java.lang.annotation.ElementType.LOCAL_VARIABLE;
import static java.lang.annotation.ElementType.METHOD;
import static java.lang.annotation.ElementType.PARAMETER;
import java.lang.annotation.Documented;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
/**
* Marks types that can hold the value {@code null} at run time.
* <p>
* Unlike {@code org.eclipse.jdt.annotation.Nullable}, this has run-time
* retention, allowing the annotation to be recognized by
* <a href="https://github.com/google/guice/wiki/UseNullable">Guice</a>. Unlike
* {@code javax.annotation.Nullable}, this does not involve importing new classes
* to a standard (Java EE) package, so it can be deployed in an OSGi container
* without running into
* <a href="http://wiki.osgi.org/wiki/Split_Packages">split-package</a>
* <a href="https://gerrit-review.googlesource.com/50112">problems</a>.
* <p>
* You can use this annotation to qualify a type in a method signature or local
* variable declaration. The entity whose type has this annotation is allowed to
* hold the value {@code null} at run time. This allows annotation based null
* analysis to infer that
* <ul>
* <li>Binding a {@code null} value to the entity is legal.
* <li>Dereferencing the entity is unsafe and can trigger a
* {@code NullPointerException}.
* </ul>
* <p>
* To avoid a dependency on Java 8, this annotation does not use
* {@link Target @Target} {@code TYPE_USE}. That may change when JGit starts
* requiring Java 8.
* <p>
* <b>Warning:</b> Please do not use this annotation on arrays. Different
* annotation processors treat {@code @Nullable Object[]} differently: some
* treat it as an array of nullable objects, for consistency with versions of
* {@code Nullable} defined with {@code @Target} {@code TYPE_USE}, while others
* treat it as a nullable array of objects. JGit therefore avoids using this
* annotation on arrays altogether.
*
* @see <a href=
* "http://types.cs.washington.edu/checker-framework/current/checker-framework-manual.html#faq-array-syntax-meaning">
* The checker-framework manual</a>
* @since 4.2
*/
@Documented
@Retention(RetentionPolicy.RUNTIME)
@Target({ FIELD, METHOD, PARAMETER, LOCAL_VARIABLE })
public @interface Nullable {
// marker annotation with no members
}
| {
"pile_set_name": "Github"
} |
eclipse.preferences.version=1
showIntro=false
| {
"pile_set_name": "Github"
} |
<?php
namespace bandwidthThrottle\tokenBucket\util;
/**
* Tests for DoublePacker.
*
* @author Markus Malkusch <[email protected]>
* @link bitcoin:1335STSwu9hST4vcMRppEPgENMHD2r1REK Donations
* @license WTFPL
* @see DoublePacker
*/
class DoublePackerTest extends \PHPUnit_Framework_TestCase
{
/**
* Tests pack().
*
* @param string $expected The expected string.
* @param double $input The input double.
*
* @test
* @dataProvider provideTestPack
*/
public function testPack($expected, $input)
{
$this->assertEquals($expected, DoublePacker::pack($input));
}
/**
* Provides test cases for testPack().
*
* @return array Test cases.
*/
public function provideTestPack()
{
return [
[pack("d", 0) , 0],
[pack("d", 0.1), 0.1],
[pack("d", 1) , 1],
];
}
/**
* Tests unpack() fails.
*
* @param string $input The input string.
*
* @test
* @dataProvider provideTestUnpackFails
* @expectedException \bandwidthThrottle\tokenBucket\storage\StorageException
*/
public function testUnpackFails($input)
{
DoublePacker::unpack($input);
}
/**
* Provides test cases for testUnpackFails().
*
* @return array Test cases.
*/
public function provideTestUnpackFails()
{
return [
[""],
["1234567"],
["123456789"],
];
}
/**
* Tests unpack().
*
* @param double $expected The expected double.
* @param string $input The input string.
*
* @test
* @dataProvider provideTestUnpack
*/
public function testUnpack($expected, $input)
{
$this->assertEquals($expected, DoublePacker::unpack($input));
}
/**
* Provides test cases for testConvert().
*
* @return array Test cases.
*/
public function provideTestUnpack()
{
return [
[0, pack("d", 0)],
[0.1, pack("d", 0.1)],
[1, pack("d", 1)],
[1.1, pack("d", 1.1)],
];
}
}
| {
"pile_set_name": "Github"
} |
<?php
/**
* @package Joomla.Libraries
* @subpackage Module
*
* @copyright Copyright (C) 2005 - 2016 Open Source Matters, Inc. All rights reserved.
* @license GNU General Public License version 2 or later; see LICENSE
*/
defined('JPATH_PLATFORM') or die;
use Joomla\Registry\Registry;
/**
* Module helper class
*
* @since 1.5
*/
abstract class JModuleHelper
{
/**
* Get module by name (real, eg 'Breadcrumbs' or folder, eg 'mod_breadcrumbs')
*
* @param string $name The name of the module
* @param string $title The title of the module, optional
*
* @return stdClass The Module object
*
* @since 1.5
*/
public static function &getModule($name, $title = null)
{
$result = null;
$modules =& static::load();
$total = count($modules);
for ($i = 0; $i < $total; $i++)
{
// Match the name of the module
if ($modules[$i]->name == $name || $modules[$i]->module == $name)
{
// Match the title if we're looking for a specific instance of the module
if (!$title || $modules[$i]->title == $title)
{
// Found it
$result = &$modules[$i];
break;
}
}
}
// If we didn't find it, and the name is mod_something, create a dummy object
if (is_null($result) && substr($name, 0, 4) == 'mod_')
{
$result = new stdClass;
$result->id = 0;
$result->title = '';
$result->module = $name;
$result->position = '';
$result->content = '';
$result->showtitle = 0;
$result->control = '';
$result->params = '';
}
return $result;
}
/**
* Get modules by position
*
* @param string $position The position of the module
*
* @return array An array of module objects
*
* @since 1.5
*/
public static function &getModules($position)
{
$position = strtolower($position);
$result = array();
$input = JFactory::getApplication()->input;
$modules =& static::load();
$total = count($modules);
for ($i = 0; $i < $total; $i++)
{
if ($modules[$i]->position == $position)
{
$result[] = &$modules[$i];
}
}
if (count($result) == 0)
{
if ($input->getBool('tp') && JComponentHelper::getParams('com_templates')->get('template_positions_display'))
{
$result[0] = static::getModule('mod_' . $position);
$result[0]->title = $position;
$result[0]->content = $position;
$result[0]->position = $position;
}
}
return $result;
}
/**
* Checks if a module is enabled. A given module will only be returned
* if it meets the following criteria: it is enabled, it is assigned to
* the current menu item or all items, and the user meets the access level
* requirements.
*
* @param string $module The module name
*
* @return boolean See description for conditions.
*
* @since 1.5
*/
public static function isEnabled($module)
{
$result = static::getModule($module);
return !is_null($result) && $result->id !== 0;
}
/**
* Render the module.
*
* @param object $module A module object.
* @param array $attribs An array of attributes for the module (probably from the XML).
*
* @return string The HTML content of the module output.
*
* @since 1.5
*/
public static function renderModule($module, $attribs = array())
{
static $chrome;
// Check that $module is a valid module object
if (!is_object($module) || !isset($module->module) || !isset($module->params))
{
if (JDEBUG)
{
JLog::addLogger(array('text_file' => 'jmodulehelper.log.php'), JLog::ALL, array('modulehelper'));
JLog::add('JModuleHelper::renderModule($module) expects a module object', JLog::DEBUG, 'modulehelper');
}
return;
}
if (JDEBUG)
{
JProfiler::getInstance('Application')->mark('beforeRenderModule ' . $module->module . ' (' . $module->title . ')');
}
$app = JFactory::getApplication();
// Record the scope.
$scope = $app->scope;
// Set scope to component name
$app->scope = $module->module;
// Get module parameters
if (class_exists('Registry')) {
$params = new Registry;
} else {
$params = new JRegistry;
}
$params->loadString($module->params);
// Get the template
$template = $app->getTemplate();
// Get module path
$module->module = preg_replace('/[^A-Z0-9_\.-]/i', '', $module->module);
$path = JPATH_BASE . '/modules/' . $module->module . '/' . $module->module . '.php';
// Load the module
if (file_exists($path))
{
$lang = JFactory::getLanguage();
$coreLanguageDirectory = JPATH_BASE;
$extensionLanguageDirectory = dirname($path);
$langPaths = $lang->getPaths();
// Only load the module's language file if it hasn't been already
if (!$langPaths || (!isset($langPaths[$coreLanguageDirectory]) && !isset($langPaths[$extensionLanguageDirectory])))
{
// 1.5 or Core then 1.6 3PD
$lang->load($module->module, $coreLanguageDirectory, null, false, true) ||
$lang->load($module->module, $extensionLanguageDirectory, null, false, true);
}
$content = '';
ob_start();
include $path;
$module->content = ob_get_contents() . $content;
ob_end_clean();
}
// Load the module chrome functions
if (!$chrome)
{
$chrome = array();
}
include_once JPATH_THEMES . '/system/html/modules.php';
$chromePath = JPATH_THEMES . '/' . $template . '/html/modules.php';
if (!isset($chrome[$chromePath]))
{
if (file_exists($chromePath))
{
include_once $chromePath;
}
$chrome[$chromePath] = true;
}
// Check if the current module has a style param to override template module style
$paramsChromeStyle = $params->get('style');
if ($paramsChromeStyle)
{
$attribs['style'] = preg_replace('/^(system|' . $template . ')\-/i', '', $paramsChromeStyle);
}
// Make sure a style is set
if (!isset($attribs['style']))
{
$attribs['style'] = 'none';
}
// Dynamically add outline style
if ($app->input->getBool('tp') && JComponentHelper::getParams('com_templates')->get('template_positions_display'))
{
$attribs['style'] .= ' outline';
}
// If the $module is nulled it will return an empty content, otherwise it will render the module normally.
$app->triggerEvent('onRenderModule', array(&$module, &$attribs));
if (is_null($module) || !isset($module->content))
{
return '';
}
foreach (explode(' ', $attribs['style']) as $style)
{
$chromeMethod = 'modChrome_' . $style;
// Apply chrome and render module
if (function_exists($chromeMethod))
{
$module->style = $attribs['style'];
ob_start();
$chromeMethod($module, $params, $attribs);
$module->content = ob_get_contents();
ob_end_clean();
}
}
// Revert the scope
$app->scope = $scope;
$app->triggerEvent('onAfterRenderModule', array(&$module, &$attribs));
if (JDEBUG)
{
JProfiler::getInstance('Application')->mark('afterRenderModule ' . $module->module . ' (' . $module->title . ')');
}
return $module->content;
}
/**
* Get the path to a layout for a module
*
* @param string $module The name of the module
* @param string $layout The name of the module layout. If alternative layout, in the form template:filename.
*
* @return string The path to the module layout
*
* @since 1.5
*/
public static function getLayoutPath($module, $layout = 'default')
{
$template = JFactory::getApplication()->getTemplate();
$defaultLayout = $layout;
if (strpos($layout, ':') !== false)
{
// Get the template and file name from the string
$temp = explode(':', $layout);
$template = ($temp[0] == '_') ? $template : $temp[0];
$layout = $temp[1];
$defaultLayout = ($temp[1]) ? $temp[1] : 'default';
}
// Do 3rd party stuff to detect layout path for the module
// onGetLayoutPath should return the path to the $layout of $module or false
// $results holds an array of results returned from plugins, 1 from each plugin.
// if a path to the $layout is found and it is a file, return that path
$app = JFactory::getApplication();
$result = $app->triggerEvent( 'onGetLayoutPath', array( $module, $layout ) );
if (is_array($result))
{
foreach ($result as $path)
{
if ($path !== false && is_file ($path))
{
return $path;
}
}
}
// Build the template and base path for the layout
$tPath = JPATH_THEMES . '/' . $template . '/html/' . $module . '/' . $layout . '.php';
$bPath = JPATH_BASE . '/modules/' . $module . '/tmpl/' . $defaultLayout . '.php';
$dPath = JPATH_BASE . '/modules/' . $module . '/tmpl/default.php';
// If the template has a layout override use it
if (file_exists($tPath))
{
return $tPath;
}
if (file_exists($bPath))
{
return $bPath;
}
return $dPath;
}
/**
* Load published modules.
*
* @return array
*
* @since 1.5
* @deprecated 4.0 Use JModuleHelper::load() instead
*/
protected static function &_load()
{
return static::load();
}
/**
* Load published modules.
*
* @return array
*
* @since 3.2
*/
protected static function &load()
{
static $modules;
if (isset($modules))
{
return $modules;
}
$app = JFactory::getApplication();
$modules = null;
$app->triggerEvent('onPrepareModuleList', array(&$modules));
// If the onPrepareModuleList event returns an array of modules, then ignore the default module list creation
if (!is_array($modules))
{
$modules = static::getModuleList();
}
$app->triggerEvent('onAfterModuleList', array(&$modules));
$modules = static::cleanModuleList($modules);
$app->triggerEvent('onAfterCleanModuleList', array(&$modules));
return $modules;
}
/**
* Module list
*
* @return array
*/
public static function getModuleList()
{
$app = JFactory::getApplication();
$Itemid = $app->input->getInt('Itemid');
$groups = implode(',', JFactory::getUser()->getAuthorisedViewLevels());
$lang = JFactory::getLanguage()->getTag();
$clientId = (int) $app->getClientId();
// Build a cache ID for the resulting data object
$cacheId = $groups . $clientId . (int) $Itemid;
$db = JFactory::getDbo();
$query = $db->getQuery(true)
->select('m.id, m.title, m.module, m.position, m.content, m.showtitle, m.params, mm.menuid')
->from('#__modules AS m')
->join('LEFT', '#__modules_menu AS mm ON mm.moduleid = m.id')
->where('m.published = 1')
->join('LEFT', '#__extensions AS e ON e.element = m.module AND e.client_id = m.client_id')
->where('e.enabled = 1');
$date = JFactory::getDate();
$now = $date->toSql();
$nullDate = $db->getNullDate();
$query->where('(m.publish_up = ' . $db->quote($nullDate) . ' OR m.publish_up <= ' . $db->quote($now) . ')')
->where('(m.publish_down = ' . $db->quote($nullDate) . ' OR m.publish_down >= ' . $db->quote($now) . ')')
->where('m.access IN (' . $groups . ')')
->where('m.client_id = ' . $clientId)
->where('(mm.menuid = ' . (int) $Itemid . ' OR mm.menuid <= 0)');
// Filter by language
if ($app->isSite() && $app->getLanguageFilter())
{
$query->where('m.language IN (' . $db->quote($lang) . ',' . $db->quote('*') . ')');
$cacheId .= $lang . '*';
}
$query->order('m.position, m.ordering');
// Set the query
$db->setQuery($query);
try
{
/** @var JCacheControllerCallback $cache */
$cache = JFactory::getCache('com_modules', 'callback');
$modules = $cache->get(array($db, 'loadObjectList'), array(), md5($cacheId), false);
}
catch (RuntimeException $e)
{
JLog::add(JText::sprintf('JLIB_APPLICATION_ERROR_MODULE_LOAD', $e->getMessage()), JLog::WARNING, 'jerror');
return array();
}
return $modules;
}
/**
* Clean the module list
*
* @param array $modules Array with module objects
*
* @return array
*/
public static function cleanModuleList($modules)
{
// Apply negative selections and eliminate duplicates
$Itemid = JFactory::getApplication()->input->getInt('Itemid');
$negId = $Itemid ? -(int) $Itemid : false;
$clean = array();
$dupes = array();
foreach ($modules as $i => $module)
{
// The module is excluded if there is an explicit prohibition
$negHit = ($negId === (int) $module->menuid);
if (isset($dupes[$module->id]))
{
// If this item has been excluded, keep the duplicate flag set,
// but remove any item from the modules array.
if ($negHit)
{
unset($clean[$module->id]);
}
continue;
}
$dupes[$module->id] = true;
// Only accept modules without explicit exclusions.
if ($negHit)
{
continue;
}
$module->name = substr($module->module, 4);
$module->style = null;
$module->position = strtolower($module->position);
$clean[$module->id] = $module;
}
unset($dupes);
// Return to simple indexing that matches the query order.
return array_values($clean);
}
/**
* Module cache helper
*
* Caching modes:
* To be set in XML:
* 'static' One cache file for all pages with the same module parameters
* 'oldstatic' 1.5 definition of module caching, one cache file for all pages
* with the same module id and user aid,
* 'itemid' Changes on itemid change, to be called from inside the module:
* 'safeuri' Id created from $cacheparams->modeparams array,
* 'id' Module sets own cache id's
*
* @param object $module Module object
* @param object $moduleparams Module parameters
* @param object $cacheparams Module cache parameters - id or url parameters, depending on the module cache mode
*
* @return string
*
* @see JFilterInput::clean()
* @since 1.6
*/
public static function moduleCache($module, $moduleparams, $cacheparams)
{
if (!isset($cacheparams->modeparams))
{
$cacheparams->modeparams = null;
}
if (!isset($cacheparams->cachegroup))
{
$cacheparams->cachegroup = $module->module;
}
$user = JFactory::getUser();
$conf = JFactory::getConfig();
/** @var JCacheControllerCallback $cache */
$cache = JFactory::getCache($cacheparams->cachegroup, 'callback');
// Turn cache off for internal callers if parameters are set to off and for all logged in users
if ($moduleparams->get('owncache', null) === '0' || $conf->get('caching') == 0 || $user->get('id'))
{
$cache->setCaching(false);
}
// Module cache is set in seconds, global cache in minutes, setLifeTime works in minutes
$cache->setLifeTime($moduleparams->get('cache_time', $conf->get('cachetime') * 60) / 60);
$wrkaroundoptions = array('nopathway' => 1, 'nohead' => 0, 'nomodules' => 1, 'modulemode' => 1, 'mergehead' => 1);
$wrkarounds = true;
$view_levels = md5(serialize($user->getAuthorisedViewLevels()));
switch ($cacheparams->cachemode)
{
case 'id':
$ret = $cache->get(
array($cacheparams->class, $cacheparams->method),
$cacheparams->methodparams,
$cacheparams->modeparams,
$wrkarounds,
$wrkaroundoptions
);
break;
case 'safeuri':
$secureid = null;
if (is_array($cacheparams->modeparams))
{
$input = JFactory::getApplication()->input;
$uri = $input->getArray();
$safeuri = new stdClass;
$noHtmlFilter = JFilterInput::getInstance();
foreach ($cacheparams->modeparams as $key => $value)
{
// Use int filter for id/catid to clean out spamy slugs
if (isset($uri[$key]))
{
$safeuri->$key = $noHtmlFilter->clean($uri[$key], $value);
}
}
}
$secureid = md5(serialize(array($safeuri, $cacheparams->method, $moduleparams)));
$ret = $cache->get(
array($cacheparams->class, $cacheparams->method),
$cacheparams->methodparams,
$module->id . $view_levels . $secureid,
$wrkarounds,
$wrkaroundoptions
);
break;
case 'static':
$ret = $cache->get(
array($cacheparams->class, $cacheparams->method),
$cacheparams->methodparams,
$module->module . md5(serialize($cacheparams->methodparams)),
$wrkarounds,
$wrkaroundoptions
);
break;
// Provided for backward compatibility, not really useful.
case 'oldstatic':
$ret = $cache->get(
array($cacheparams->class, $cacheparams->method),
$cacheparams->methodparams,
$module->id . $view_levels,
$wrkarounds,
$wrkaroundoptions
);
break;
case 'itemid':
default:
$ret = $cache->get(
array($cacheparams->class, $cacheparams->method),
$cacheparams->methodparams,
$module->id . $view_levels . JFactory::getApplication()->input->getInt('Itemid', null),
$wrkarounds,
$wrkaroundoptions
);
break;
}
return $ret;
}
}
| {
"pile_set_name": "Github"
} |
/*
* Miscelaneous DaVinci functions.
*
* Copyright (C) 2009 Nick Thompson, GE Fanuc Ltd, <[email protected]>
* Copyright (C) 2007 Sergey Kubushyn <[email protected]>
* Copyright (C) 2008 Lyrtech <www.lyrtech.com>
* Copyright (C) 2004 Texas Instruments.
*
* See file CREDITS for list of people who contributed to this
* project.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <common.h>
#include <i2c.h>
#include <net.h>
#include <asm/arch/hardware.h>
#include <asm/io.h>
#include <asm/arch/davinci_misc.h>
DECLARE_GLOBAL_DATA_PTR;
#ifndef CONFIG_SPL_BUILD
int dram_init(void)
{
/* dram_init must store complete ramsize in gd->ram_size */
gd->ram_size = get_ram_size(
(void *)CONFIG_SYS_SDRAM_BASE,
CONFIG_MAX_RAM_BANK_SIZE);
return 0;
}
void dram_init_banksize(void)
{
gd->bd->bi_dram[0].start = CONFIG_SYS_SDRAM_BASE;
gd->bd->bi_dram[0].size = gd->ram_size;
}
#endif
#ifdef CONFIG_DRIVER_TI_EMAC
/*
* Read ethernet MAC address from EEPROM for DVEVM compatible boards.
* Returns 1 if found, 0 otherwise.
*/
int dvevm_read_mac_address(uint8_t *buf)
{
#ifdef CONFIG_SYS_I2C_EEPROM_ADDR
/* Read MAC address. */
if (i2c_read(CONFIG_SYS_I2C_EEPROM_ADDR, 0x7F00,
CONFIG_SYS_I2C_EEPROM_ADDR_LEN, (uint8_t *) &buf[0], 6))
goto i2cerr;
/* Check that MAC address is valid. */
if (!is_valid_ether_addr(buf))
goto err;
return 1; /* Found */
i2cerr:
printf("Read from EEPROM @ 0x%02x failed\n",
CONFIG_SYS_I2C_EEPROM_ADDR);
err:
#endif /* CONFIG_SYS_I2C_EEPROM_ADDR */
return 0;
}
/*
* Set the mii mode as MII or RMII
*/
#if defined(CONFIG_SOC_DA8XX)
void davinci_emac_mii_mode_sel(int mode_sel)
{
int val;
val = readl(&davinci_syscfg_regs->cfgchip3);
if (mode_sel == 0)
val &= ~(1 << 8);
else
val |= (1 << 8);
writel(val, &davinci_syscfg_regs->cfgchip3);
}
#endif
/*
* If there is no MAC address in the environment, then it will be initialized
* (silently) from the value in the EEPROM.
*/
void davinci_sync_env_enetaddr(uint8_t *rom_enetaddr)
{
uint8_t env_enetaddr[6];
int ret;
ret = eth_getenv_enetaddr_by_index("eth", 0, env_enetaddr);
if (ret) {
/*
* There is no MAC address in the environment, so we
* initialize it from the value in the EEPROM.
*/
debug("### Setting environment from EEPROM MAC address = "
"\"%pM\"\n",
env_enetaddr);
ret = !eth_setenv_enetaddr("ethaddr", rom_enetaddr);
}
if (!ret)
printf("Failed to set mac address from EEPROM\n");
}
#endif /* CONFIG_DRIVER_TI_EMAC */
#if defined(CONFIG_SOC_DA8XX)
#ifndef CONFIG_USE_IRQ
void irq_init(void)
{
/*
* Mask all IRQs by clearing the global enable and setting
* the enable clear for all the 90 interrupts.
*/
writel(0, &davinci_aintc_regs->ger);
writel(0, &davinci_aintc_regs->hier);
writel(0xffffffff, &davinci_aintc_regs->ecr1);
writel(0xffffffff, &davinci_aintc_regs->ecr2);
writel(0xffffffff, &davinci_aintc_regs->ecr3);
}
#endif
/*
* Enable PSC for various peripherals.
*/
int da8xx_configure_lpsc_items(const struct lpsc_resource *item,
const int n_items)
{
int i;
for (i = 0; i < n_items; i++)
lpsc_on(item[i].lpsc_no);
return 0;
}
#endif
| {
"pile_set_name": "Github"
} |
<?php
class Text_Wiki_Render_Plain_Break extends Text_Wiki_Render {
/**
*
* Renders a token into text matching the requested format.
*
* @access public
*
* @param array $options The "options" portion of the token (second
* element).
*
* @return string The text rendered from the token options.
*
*/
function token($options)
{
return "\n";
}
}
?>
| {
"pile_set_name": "Github"
} |
/* Copyright (c) 2007 Scott Lembcke
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <stdlib.h>
#include <math.h>
#include "chipmunk_private.h"
#include "constraints/util.h"
static cpFloat
defaultSpringForce(struct cpConstraint * constraint, cpFloat dist){
cpDampedSpring* spring = (cpDampedSpring*)constraint;
return (spring->restLength - dist)*spring->stiffness;
}
cpDampedSpringForceFunc cpDampedSpringUpdateForceDefault = defaultSpringForce;
static void
preStep(cpDampedSpring *spring, cpFloat dt, cpFloat dt_inv)
{
CONSTRAINT_BEGIN(spring, a, b);
spring->r1 = cpvrotate(spring->anchr1, a->rot);
spring->r2 = cpvrotate(spring->anchr2, b->rot);
cpVect delta = cpvsub(cpvadd(b->p, spring->r2), cpvadd(a->p, spring->r1));
cpFloat dist = cpvlength(delta);
spring->n = cpvmult(delta, 1.0f/(dist ? dist : INFINITY));
cpFloat k = k_scalar(a, b, spring->r1, spring->r2, spring->n);
spring->nMass = 1.0f/k;
spring->target_vrn = 0.0f;
spring->v_coef = 1.0f - cpfexp(-spring->damping*dt*k);
// apply spring force
cpFloat f_spring = spring->springForceFunc((cpConstraint*)spring, dist);
apply_impulses(a, b, spring->r1, spring->r2, cpvmult(spring->n, f_spring*dt));
}
static void
applyImpulse(cpDampedSpring *spring)
{
CONSTRAINT_BEGIN(spring, a, b);
cpVect n = spring->n;
cpVect r1 = spring->r1;
cpVect r2 = spring->r2;
// compute relative velocity
cpFloat vrn = normal_relative_velocity(a, b, r1, r2, n) - spring->target_vrn;
// compute velocity loss from drag
// not 100% certain this is derived correctly, though it makes sense
cpFloat v_damp = -vrn*spring->v_coef;
spring->target_vrn = vrn + v_damp;
apply_impulses(a, b, spring->r1, spring->r2, cpvmult(spring->n, v_damp*spring->nMass));
}
static cpFloat
getImpulse(cpConstraint *constraint)
{
return 0.0f;
}
static const cpConstraintClass klass = {
(cpConstraintPreStepFunction)preStep,
(cpConstraintApplyImpulseFunction)applyImpulse,
(cpConstraintGetImpulseFunction)getImpulse,
};
CP_DefineClassGetter(cpDampedSpring)
cpDampedSpring *
cpDampedSpringAlloc(void)
{
return (cpDampedSpring *)cpmalloc(sizeof(cpDampedSpring));
}
cpDampedSpring *
cpDampedSpringInit(cpDampedSpring *spring, cpBody *a, cpBody *b, cpVect anchr1, cpVect anchr2, cpFloat restLength, cpFloat stiffness, cpFloat damping)
{
cpConstraintInit((cpConstraint *)spring, cpDampedSpringGetClass(), a, b);
spring->anchr1 = anchr1;
spring->anchr2 = anchr2;
spring->restLength = restLength;
spring->stiffness = stiffness;
spring->damping = damping;
spring->springForceFunc = (cpDampedSpringForceFunc)defaultSpringForce;
return spring;
}
cpConstraint *
cpDampedSpringNew(cpBody *a, cpBody *b, cpVect anchr1, cpVect anchr2, cpFloat restLength, cpFloat stiffness, cpFloat damping)
{
return (cpConstraint *)cpDampedSpringInit(cpDampedSpringAlloc(), a, b, anchr1, anchr2, restLength, stiffness, damping);
}
| {
"pile_set_name": "Github"
} |
// Copyright (c) 2013-2016 The btcsuite developers
// Copyright (c) 2015-2016 The Decred developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package wire
import (
"bytes"
"io"
)
// fixedWriter implements the io.Writer interface and intentially allows
// testing of error paths by forcing short writes.
type fixedWriter struct {
b []byte
pos int
}
// Write writes the contents of p to w. When the contents of p would cause
// the writer to exceed the maximum allowed size of the fixed writer,
// io.ErrShortWrite is returned and the writer is left unchanged.
//
// This satisfies the io.Writer interface.
func (w *fixedWriter) Write(p []byte) (n int, err error) {
lenp := len(p)
if w.pos+lenp > cap(w.b) {
return 0, io.ErrShortWrite
}
n = lenp
w.pos += copy(w.b[w.pos:], p)
return
}
// Bytes returns the bytes already written to the fixed writer.
func (w *fixedWriter) Bytes() []byte {
return w.b
}
// newFixedWriter returns a new io.Writer that will error once more bytes than
// the specified max have been written.
func newFixedWriter(max int) io.Writer {
b := make([]byte, max, max)
fw := fixedWriter{b, 0}
return &fw
}
// fixedReader implements the io.Reader interface and intentially allows
// testing of error paths by forcing short reads.
type fixedReader struct {
buf []byte
pos int
iobuf *bytes.Buffer
}
// Read reads the next len(p) bytes from the fixed reader. When the number of
// bytes read would exceed the maximum number of allowed bytes to be read from
// the fixed writer, an error is returned.
//
// This satisfies the io.Reader interface.
func (fr *fixedReader) Read(p []byte) (n int, err error) {
n, err = fr.iobuf.Read(p)
fr.pos += n
return
}
// newFixedReader returns a new io.Reader that will error once more bytes than
// the specified max have been read.
func newFixedReader(max int, buf []byte) io.Reader {
b := make([]byte, max, max)
if buf != nil {
copy(b[:], buf)
}
iobuf := bytes.NewBuffer(b)
fr := fixedReader{b, 0, iobuf}
return &fr
}
| {
"pile_set_name": "Github"
} |
#[doc = r"Register block"]
#[repr(C)]
pub struct RegisterBlock {
#[doc = "0x00 - Control"]
pub ctrla: CTRLA,
_reserved1: [u8; 3usize],
#[doc = "0x04 - Software Event"]
pub swevt: SWEVT,
#[doc = "0x08 - Priority Control"]
pub prictrl: PRICTRL,
_reserved3: [u8; 7usize],
#[doc = "0x10 - Channel Pending Interrupt"]
pub intpend: INTPEND,
_reserved4: [u8; 2usize],
#[doc = "0x14 - Interrupt Status"]
pub intstatus: INTSTATUS,
#[doc = "0x18 - Busy Channels"]
pub busych: BUSYCH,
#[doc = "0x1c - Ready Users"]
pub readyusr: READYUSR,
#[doc = "0x20 - CHANNEL\\[%s\\]"]
pub channel: [CHANNEL; 32],
#[doc = "0x120 - User Multiplexer n"]
pub user: [USER; 67],
}
#[doc = r"Register block"]
#[repr(C)]
pub struct CHANNEL {
#[doc = "0x00 - Channel n Control"]
pub channel: self::channel::CHANNEL,
#[doc = "0x04 - Channel n Interrupt Enable Clear"]
pub chintenclr: self::channel::CHINTENCLR,
#[doc = "0x05 - Channel n Interrupt Enable Set"]
pub chintenset: self::channel::CHINTENSET,
#[doc = "0x06 - Channel n Interrupt Flag Status and Clear"]
pub chintflag: self::channel::CHINTFLAG,
#[doc = "0x07 - Channel n Status"]
pub chstatus: self::channel::CHSTATUS,
}
#[doc = r"Register block"]
#[doc = "CHANNEL\\[%s\\]"]
pub mod channel;
#[doc = "Control\n\nThis register you can [`read`](crate::generic::Reg::read), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [ctrla](ctrla) module"]
pub type CTRLA = crate::Reg<u8, _CTRLA>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _CTRLA;
#[doc = "`read()` method returns [ctrla::R](ctrla::R) reader structure"]
impl crate::Readable for CTRLA {}
#[doc = "`write(|w| ..)` method takes [ctrla::W](ctrla::W) writer structure"]
impl crate::Writable for CTRLA {}
#[doc = "Control"]
pub mod ctrla;
#[doc = "Software Event\n\nThis register you can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [swevt](swevt) module"]
pub type SWEVT = crate::Reg<u32, _SWEVT>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _SWEVT;
#[doc = "`write(|w| ..)` method takes [swevt::W](swevt::W) writer structure"]
impl crate::Writable for SWEVT {}
#[doc = "Software Event"]
pub mod swevt;
#[doc = "Priority Control\n\nThis register you can [`read`](crate::generic::Reg::read), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [prictrl](prictrl) module"]
pub type PRICTRL = crate::Reg<u8, _PRICTRL>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _PRICTRL;
#[doc = "`read()` method returns [prictrl::R](prictrl::R) reader structure"]
impl crate::Readable for PRICTRL {}
#[doc = "`write(|w| ..)` method takes [prictrl::W](prictrl::W) writer structure"]
impl crate::Writable for PRICTRL {}
#[doc = "Priority Control"]
pub mod prictrl;
#[doc = "Channel Pending Interrupt\n\nThis register you can [`read`](crate::generic::Reg::read), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [intpend](intpend) module"]
pub type INTPEND = crate::Reg<u16, _INTPEND>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _INTPEND;
#[doc = "`read()` method returns [intpend::R](intpend::R) reader structure"]
impl crate::Readable for INTPEND {}
#[doc = "`write(|w| ..)` method takes [intpend::W](intpend::W) writer structure"]
impl crate::Writable for INTPEND {}
#[doc = "Channel Pending Interrupt"]
pub mod intpend;
#[doc = "Interrupt Status\n\nThis register you can [`read`](crate::generic::Reg::read). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [intstatus](intstatus) module"]
pub type INTSTATUS = crate::Reg<u32, _INTSTATUS>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _INTSTATUS;
#[doc = "`read()` method returns [intstatus::R](intstatus::R) reader structure"]
impl crate::Readable for INTSTATUS {}
#[doc = "Interrupt Status"]
pub mod intstatus;
#[doc = "Busy Channels\n\nThis register you can [`read`](crate::generic::Reg::read). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [busych](busych) module"]
pub type BUSYCH = crate::Reg<u32, _BUSYCH>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _BUSYCH;
#[doc = "`read()` method returns [busych::R](busych::R) reader structure"]
impl crate::Readable for BUSYCH {}
#[doc = "Busy Channels"]
pub mod busych;
#[doc = "Ready Users\n\nThis register you can [`read`](crate::generic::Reg::read). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [readyusr](readyusr) module"]
pub type READYUSR = crate::Reg<u32, _READYUSR>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _READYUSR;
#[doc = "`read()` method returns [readyusr::R](readyusr::R) reader structure"]
impl crate::Readable for READYUSR {}
#[doc = "Ready Users"]
pub mod readyusr;
#[doc = "User Multiplexer n\n\nThis register you can [`read`](crate::generic::Reg::read), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [user](user) module"]
pub type USER = crate::Reg<u32, _USER>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _USER;
#[doc = "`read()` method returns [user::R](user::R) reader structure"]
impl crate::Readable for USER {}
#[doc = "`write(|w| ..)` method takes [user::W](user::W) writer structure"]
impl crate::Writable for USER {}
#[doc = "User Multiplexer n"]
pub mod user;
| {
"pile_set_name": "Github"
} |
RSpec.describe Readthis::Serializers do
CustomSerializer = Class.new
AnotherSerializer = Class.new
describe '#<<' do
it 'appends new serializers' do
serializers = Readthis::Serializers.new
serializers << CustomSerializer
expect(serializers.marshals).to include(CustomSerializer)
expect(serializers.flags).to eq([1, 2, 3, 4])
end
it 'increments flags' do
serializers = Readthis::Serializers.new
serializers << CustomSerializer
serializers << AnotherSerializer
expect(serializers.flags).to eq((1..5).to_a)
end
it 'prevents more than seven serializers' do
serializers = Readthis::Serializers.new
serializers << Class.new until serializers.flags.length >= 7
expect do
serializers << Class.new
end.to raise_error(Readthis::SerializersLimitError)
end
end
describe '#assoc' do
it 'looks up serializers by module' do
serializers = Readthis::Serializers.new
expect(serializers.assoc(Marshal)).to eq(0x1)
end
it 'raises a helpful error when the serializer is unknown' do
serializers = Readthis::Serializers.new
expect do
serializers.assoc(CustomSerializer)
end.to raise_error(Readthis::UnknownSerializerError)
end
end
describe '#rassoc' do
let(:serializers) { Readthis::Serializers.new }
it 'inverts the current set of serializers' do
expect(serializers.rassoc(1)).to eq(Marshal)
end
it 'returns custom serializers' do
serializers << CustomSerializer
expect(serializers.rassoc(4)).to eq(CustomSerializer)
end
it 'inverts default serializers after adding custom one' do
serializers << CustomSerializer
expect(serializers.rassoc(1)).to eq(Marshal)
expect(serializers.rassoc(3)).to eq(JSON)
end
it 'takes into account only first 3 bytes of passed integer' do
expect(serializers.rassoc(1)).to eq(Marshal)
expect(serializers.rassoc(11)).to eq(JSON)
serializers << CustomSerializer
expect(serializers.rassoc(12)).to eq(CustomSerializer)
end
end
describe '#freeze!' do
it 'does now allow appending after freeze' do
serializers = Readthis::Serializers.new
serializers.freeze!
expect do
serializers << CustomSerializer
end.to raise_error(Readthis::SerializersFrozenError)
end
end
describe '#reset!' do
it 'reverts back to the original set of serializers' do
serializers = Readthis::Serializers.new
serializers << Class.new
serializers.reset!
expect(serializers.serializers.length).to eq(3)
end
end
end
| {
"pile_set_name": "Github"
} |
module ApprovalGroup {
import 0x1.Signature;
resource T {
// we do not have collection support in Move now, so illustrate
// using 2 out-of 3
// for simplicity just use the plain public key here. We could
// also use the hash here as how auth key works
pk1: vector<u8>,
pk2: vector<u8>,
pk3: vector<u8>,
// the threshold policy
threshold: u64,
// Recipient address allowlist policy ...
}
// create a new approval group
public create(pk1: vector<u8>,
pk2: vector<u8>,
pk3: vector<u8>): Self.T {
return T {
pk1: move(pk1),
pk2: move(pk2),
pk3: move(pk3),
threshold: 2
};
}
// evaluate whether the approval group can exercise its authority
// right now only the threshold policy is checked
public has_authority(group: &Self.T,
pk1: vector<u8>,
sig1: vector<u8>,
pk2: vector<u8>,
sig2: vector<u8>,
hash: vector<u8>): bool {
let result1: bool;
let result2: bool;
assert(copy(pk1) != copy(pk2), 1000);
result1 = Self.verify_sig(copy(group), move(pk1), move(sig1), copy(hash));
result2 = Self.verify_sig(move(group), move(pk2), move(sig2), move(hash));
return (move(result1) && move(result2));
}
// helper function to evaluate the threshold policy
verify_sig(group: &Self.T, pk: vector<u8>, sig: vector<u8>, hash: vector<u8>): bool {
let result: bool;
if ((copy(pk) == *& copy(group).pk1) ||
(copy(pk) == *& copy(group).pk2) ||
(copy(pk) == *& copy(group).pk3)) {
result = Signature.ed25519_verify(move(sig), move(pk), move(hash));
} else {
result = false;
}
_ = move(group);
return move(result);
}
}
//! new-transaction
module ColdWallet {
import 0x1.Hash;
import 0x1.LBR;
import 0x1.LCS;
import 0x1.Libra;
import 0x1.Vector;
import 0x1.Signer;
import {{default}}.ApprovalGroup;
resource T {
balance: Libra.Libra<LBR.LBR>,
sequence_num: u64,
genesis_group: ApprovalGroup.T,
}
// This struct is unused, only intended to define the format of a transaction
// the serialization of the transaction is the concatnation of all the fields
struct transaction {
// The address that is going to be paid
payee: address,
// The amount of Libra.Libra<LBR.LBR> sent
amount: u64
}
// create a new ColdWallet with a default genesis group
public create(account: &signer, genesis_group: ApprovalGroup.T) {
let zero_balance: Libra.Libra<LBR.LBR>;
let wallet: Self.T;
zero_balance = Libra.zero<LBR.LBR>();
wallet = T {
balance: move(zero_balance),
sequence_num: 0,
genesis_group: move(genesis_group)
};
move_to<T>(move(account), move(wallet));
return;
}
public publish(account: &signer, self: Self.T) {
move_to<T>(move(account), move(self));
return;
}
// deposit money into a payee's cold wallet
public deposit(payee: address, to_deposit: Libra.Libra<LBR.LBR>) acquires T {
let payee_wallet_ref: &mut Self.T;
// Load the payee's account
payee_wallet_ref = borrow_global_mut<T>(move(payee));
// Deposit the `to_deposit` coin
Libra.deposit<LBR.LBR>(&mut move(payee_wallet_ref).balance, move(to_deposit));
return;
}
// withdraw money from this wallet, and send to a payee account
// Note that this implementation moves the fund into the payee's Libra account, without assuming
// there's a cold wallet module under that account
public withdraw_from_payer(
payer_: &signer,
payee: address,
amount: u64,
pk1: vector<u8>,
sig1: vector<u8>,
pk2: vector<u8>,
sig2: vector<u8>
) acquires T {
let payer: address;
let payer_ref: &mut Self.T;
let transaction_bytes: vector<u8>;
let prefix: vector<u8>;
let hash: vector<u8>;
let seq: u64;
let withdraw_amount: Libra.Libra<LBR.LBR>;
let has_authority: bool;
let account_balance: u64;
payer = Signer.address_of(copy(payer_));
payer_ref = borrow_global_mut<T>(copy(payer));
account_balance = Libra.value<LBR.LBR>(©(payer_ref).balance);
assert(copy(amount) <= move(account_balance), 1001);
// obtain the expected serialization of the transaction struct
transaction_bytes = Self.get_transaction_bytes(copy(payer), copy(payee), copy(amount), copy(payer_ref));
hash = Hash.sha3_256(move(transaction_bytes));
has_authority = ApprovalGroup.has_authority(©(payer_ref).genesis_group,
move(pk1), move(sig1),
move(pk2), move(sig2), move(hash));
// check to see if genesis group has authority to approve
if (move(has_authority)) {
// bump the sequence number
seq = *©(payer_ref).sequence_num;
*(&mut copy(payer_ref).sequence_num) = move(seq) + 1;
withdraw_amount = Libra.withdraw<LBR.LBR>(&mut copy(payer_ref).balance, move(amount));
// LibraAccount no longer has this API
//LibraAccount.deposit<LBR.LBR>(copy(payer_), move(payee), move(withdraw_amount));
Libra.destroy_zero<LBR.LBR>(move(withdraw_amount));
} else {
// how to handle error?
}
_ = move(payer_ref);
return;
}
// helper to get the expected serialization of a transaction
// serialization format: 'prefix' || payee_address || amount || sequence_number
get_transaction_bytes(payer: address, payee: address, amount: u64, wallet: &mut Self.T): vector<u8> {
let constant: vector<u8>;
let payer_bytes: vector<u8>;
let payee_bytes: vector<u8>;
let amount_bytes: vector<u8>;
let seq_bytes: vector<u8>;
let first_two: vector<u8>;
let first_three: vector<u8>;
let transaction_bytes: vector<u8>;
// TODO: consider moving into resource
// TODO: Move doesn't support string now. As a workaround,
// TODO: the prefix is the hex encoding of "coldwallet.transaction"
constant = h"636F6C6477616C6C65742E7472616E73616374696F6E";
payer_bytes = LCS.to_bytes<address>(&payer);
transaction_bytes = move(payer_bytes);
Vector.append<u8>(&mut transaction_bytes, move(constant));
payee_bytes = LCS.to_bytes<address>(&payee);
amount_bytes = LCS.to_bytes<u64>(&amount);
seq_bytes = LCS.to_bytes<u64>(&move(wallet).sequence_num);
Vector.append<u8>(&mut transaction_bytes, move(payee_bytes));
Vector.append<u8>(&mut transaction_bytes, move(amount_bytes));
Vector.append<u8>(&mut transaction_bytes, move(seq_bytes));
return move(transaction_bytes);
}
}
//! new-transaction
import {{default}}.ApprovalGroup;
import {{default}}.ColdWallet;
main(account: &signer) {
let genesis_group: ApprovalGroup.T;
let pk1: vector<u8>;
let pk2: vector<u8>;
let pk3: vector<u8>;
let wallet : ColdWallet.T;
pk1 = h"1234";
pk2 = h"5678";
pk3 = h"abc123";
genesis_group = ApprovalGroup.create(move(pk1), move(pk2), move(pk3));
ColdWallet.create(move(account), move(genesis_group));
return;
}
| {
"pile_set_name": "Github"
} |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.waveprotocol.wave.model.adt.docbased;
import org.waveprotocol.wave.model.adt.ObservableBasicMapTestBase;
import org.waveprotocol.wave.model.document.util.DefaultDocumentEventRouter;
import org.waveprotocol.wave.model.testing.BasicFactories;
import org.waveprotocol.wave.model.util.Serializer;
import org.waveprotocol.wave.model.wave.data.impl.ObservablePluggableMutableDocument;
import java.util.Collections;
/**
* Tests for the {@link DocumentBasedBasicMap} class.
*
*/
public class ObservableBasicMapWithDocumentBasedBasicMapTest extends ObservableBasicMapTestBase {
private static final String ENTRY_TAG = "read";
private static final String KEY_ATTR = "blipId";
private static final String VALUE_ATTR = "version";
@Override
protected void createMap() {
ObservablePluggableMutableDocument doc = BasicFactories.observableDocumentProvider().create(
"data", Collections.<String, String> emptyMap());
map = DocumentBasedBasicMap.create(DefaultDocumentEventRouter.create(doc),
doc.getDocumentElement(), Serializer.STRING, Serializer.INTEGER, ENTRY_TAG,
KEY_ATTR, VALUE_ATTR);
}
}
| {
"pile_set_name": "Github"
} |
/*
* Copyright 2017 Code Above Lab LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.codeabovelab.dm.cluman.cluster.docker.management.argument;
import lombok.Data;
/**
*/
@Data
public class SwarmLeaveArg {
/**
* 'Force leave swarm, even if this is the last manager or that it will break the cluster.' <p/>
* Default 'false'.
*/
private Boolean force;
}
| {
"pile_set_name": "Github"
} |
import QtQuick 2.0
import QtQuick.Controls 1.0
import QtQuick.Layouts 1.0
import im.ricochet 1.0
MouseArea {
id: offlineState
acceptedButtons: Qt.LeftButton | Qt.RightButton
visible: opacity > 0
enabled: visible
opacity: 0
clip: true
Behavior on opacity { NumberAnimation { duration: 500 } }
Rectangle {
anchors.fill: parent
color: palette.base
}
Label {
id: label
anchors {
horizontalCenter: parent.horizontalCenter
verticalCenter: parent.verticalCenter
verticalCenterOffset: parent.height / -3
}
font.pointSize: 14
}
Rectangle {
id: indicator
width: label.width
anchors {
top: label.bottom
topMargin: 2
}
height: 2
x: label.x
onWidthChanged: if (indicatorAnimation.running) indicatorAnimation.restart()
property alias running: indicatorAnimation.running
SequentialAnimation {
id: indicatorAnimation
function restart() {
stop()
animation1.to = offlineState.width
animation2.from = -indicator.width
animation2.to = offlineState.width
start()
}
NumberAnimation {
id: animation1
target: indicator
property: "x"
to: offlineState.width
duration: 500
easing.type: Easing.InQuad
}
NumberAnimation {
id: animation2
loops: Animation.Infinite
target: indicator
property: "x"
from: -indicator.width
to: offlineState.width
duration: 1500
easing.type: Easing.OutInQuad
}
}
}
onWidthChanged: if (indicatorAnimation.running) indicatorAnimation.restart()
Label {
id: detailedLabel
anchors {
left: parent.left
right: parent.right
top: indicator.bottom
margins: 16
}
wrapMode: Text.Wrap
horizontalAlignment: Text.AlignHCenter
color: Qt.lighter(palette.text, 1.2)
font.pointSize: 11
text: torControl.errorMessage
}
GridLayout {
id: buttonRow
visible: false
anchors {
left: parent.left
right: parent.right
top: detailedLabel.bottom
margins: 16
topMargin: 32
}
Button {
Layout.alignment: Qt.AlignRight | Qt.AlignVCenter
text: qsTr("Configure")
onClicked: {
var object = createDialog("NetworkSetupWizard.qml", { }, window)
object.visible = true
}
}
Button {
Layout.alignment: Qt.AlignLeft | Qt.AlignVCenter
text: qsTr("Details")
onClicked: {
openPreferences("TorPreferences.qml")
}
}
}
states: [
State {
name: "connected"
when: torControl.torStatus === TorControl.TorReady
PropertyChanges {
target: offlineState
opacity: 0
}
},
State {
name: "failed"
when: torControl.status === TorControl.Error
PropertyChanges {
target: offlineState
opacity: 1
}
PropertyChanges {
target: label
text: qsTr("Connection failed")
}
PropertyChanges {
target: indicator
color: "#ffdcc4"
running: false
}
PropertyChanges {
target: buttonRow
visible: true
}
},
State {
name: "connecting"
when: torControl.torStatus !== TorControl.TorReady
PropertyChanges {
target: offlineState
opacity: 1
}
PropertyChanges {
target: label
//: \u2026 is ellipsis
text: qsTr("Connecting\u2026")
}
PropertyChanges {
target: indicator
color: "#c4e7ff"
running: true
x: label.x
}
}
]
transitions: [
Transition {
to: "connecting"
SequentialAnimation {
PropertyAction {
target: label
property: "text"
}
PropertyAction {
target: indicator
property: "running"
}
ColorAnimation {
target: indicator
property: "color"
duration: 1000
}
}
},
Transition {
to: "failed"
SequentialAnimation {
PropertyAction {
target: indicator
property: "running"
}
PropertyAction {
target: label
property: "text"
}
ParallelAnimation {
NumberAnimation {
target: indicator
property: "x"
duration: 1000
easing.type: Easing.OutQuad
}
ColorAnimation {
target: indicator
property: "color"
duration: 1000
}
}
}
}
]
}
| {
"pile_set_name": "Github"
} |
<?xml version="1.0"?>
<!DOCTYPE fontconfig SYSTEM "fonts.dtd">
<fontconfig>
<!--
Mark common families with their generics so we'll get
something reasonable
-->
<!--
Serif faces
-->
<alias>
<family>Nazli</family>
<default><family>serif</family></default>
</alias>
<alias>
<family>Lotoos</family>
<default><family>serif</family></default>
</alias>
<alias>
<family>Mitra</family>
<default><family>serif</family></default>
</alias>
<alias>
<family>Ferdosi</family>
<default><family>serif</family></default>
</alias>
<alias>
<family>Badr</family>
<default><family>serif</family></default>
</alias>
<alias>
<family>Zar</family>
<default><family>serif</family></default>
</alias>
<alias>
<family>Titr</family>
<default><family>serif</family></default>
</alias>
<alias>
<family>Jadid</family>
<default><family>serif</family></default>
</alias>
<alias>
<family>Kochi Mincho</family>
<default><family>serif</family></default>
</alias>
<alias>
<family>AR PL SungtiL GB</family>
<default><family>serif</family></default>
</alias>
<alias>
<family>AR PL Mingti2L Big5</family>
<default><family>serif</family></default>
</alias>
<alias>
<family>MS 明朝</family>
<default><family>serif</family></default>
</alias>
<alias>
<family>NanumMyeongjo</family>
<default><family>serif</family></default>
</alias>
<alias>
<family>UnBatang</family>
<default><family>serif</family></default>
</alias>
<alias>
<family>Baekmuk Batang</family>
<default><family>serif</family></default>
</alias>
<alias>
<family>MgOpen Canonica</family>
<default><family>serif</family></default>
</alias>
<alias>
<family>Sazanami Mincho</family>
<default><family>serif</family></default>
</alias>
<alias>
<family>AR PL ZenKai Uni</family>
<default><family>serif</family></default>
</alias>
<alias>
<family>ZYSong18030</family>
<default><family>serif</family></default>
</alias>
<alias>
<family>FreeSerif</family>
<default><family>serif</family></default>
</alias>
<alias>
<family>SimSun</family>
<default><family>serif</family></default>
</alias>
<!--
Sans-serif faces
-->
<alias>
<family>Arshia</family>
<default><family>sans-serif</family></default>
</alias>
<alias>
<family>Elham</family>
<default><family>sans-serif</family></default>
</alias>
<alias>
<family>Farnaz</family>
<default><family>sans-serif</family></default>
</alias>
<alias>
<family>Nasim</family>
<default><family>sans-serif</family></default>
</alias>
<alias>
<family>Sina</family>
<default><family>sans-serif</family></default>
</alias>
<alias>
<family>Roya</family>
<default><family>sans-serif</family></default>
</alias>
<alias>
<family>Koodak</family>
<default><family>sans-serif</family></default>
</alias>
<alias>
<family>Terafik</family>
<default><family>sans-serif</family></default>
</alias>
<alias>
<family>Kochi Gothic</family>
<default><family>sans-serif</family></default>
</alias>
<alias>
<family>AR PL KaitiM GB</family>
<default><family>sans-serif</family></default>
</alias>
<alias>
<family>AR PL KaitiM Big5</family>
<default><family>sans-serif</family></default>
</alias>
<alias>
<family>MS ゴシック</family>
<default><family>sans-serif</family></default>
</alias>
<alias>
<family>NanumGothic</family>
<default><family>sans-serif</family></default>
</alias>
<alias>
<family>UnDotum</family>
<default><family>sans-serif</family></default>
</alias>
<alias>
<family>Baekmuk Dotum</family>
<default><family>sans-serif</family></default>
</alias>
<alias>
<family>MgOpen Modata</family>
<default><family>sans-serif</family></default>
</alias>
<alias>
<family>Sazanami Gothic</family>
<default><family>sans-serif</family></default>
</alias>
<alias>
<family>AR PL ShanHeiSun Uni</family>
<default><family>sans-serif</family></default>
</alias>
<alias>
<family>ZYSong18030</family>
<default><family>sans-serif</family></default>
</alias>
<alias>
<family>FreeSans</family>
<default><family>sans-serif</family></default>
</alias>
<!--
Monospace faces
-->
<alias>
<family>NSimSun</family>
<default><family>monospace</family></default>
</alias>
<alias>
<family>ZYSong18030</family>
<default><family>monospace</family></default>
</alias>
<alias>
<family>NanumGothicCoding</family>
<default><family>monospace</family></default>
</alias>
<alias>
<family>FreeMono</family>
<default><family>monospace</family></default>
</alias>
<!--
Fantasy faces
-->
<alias>
<family>Homa</family>
<default><family>fantasy</family></default>
</alias>
<alias>
<family>Kamran</family>
<default><family>fantasy</family></default>
</alias>
<alias>
<family>Fantezi</family>
<default><family>fantasy</family></default>
</alias>
<alias>
<family>Tabassom</family>
<default><family>fantasy</family></default>
</alias>
<!--
Cursive faces
-->
<alias>
<family>IranNastaliq</family>
<default><family>cursive</family></default>
</alias>
<alias>
<family>Nafees Nastaleeq</family>
<default><family>cursive</family></default>
</alias>
</fontconfig>
| {
"pile_set_name": "Github"
} |
import * as React from 'react'
import { storiesOf } from '@storybook/react'
import { Example, Stack } from '@auth0/cosmos/_helpers/story-helpers'
import { Button, ButtonGroup } from '../../../'
storiesOf('Button', module).add('with icon', () => (
<Example title="Button with icon" align="center">
<Stack>
<Button appearance="primary" icon="plus">
Create Client
</Button>
<Button appearance="secondary" icon="play-circle">
Tutorial
</Button>
<Button appearance="link" icon="copy" />
<Button icon="copy" label="Copy to Clipboard" />
<Button icon="copy" href="https://auth0.com" label="Copy to Clipboard" />
<Button icon="chevron-left">Previous page</Button>
<Button iconAlign="right" icon="chevron-right">
Next page
</Button>
</Stack>
</Example>
))
| {
"pile_set_name": "Github"
} |
/****************************************************************************
Copyright (c) 2015 Neo Kim ([email protected])
http://www.cocos2d-x.org
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
****************************************************************************/
#include "UIScrollViewBar.h"
#include "CCImage.h"
#include "2d/CCSprite.h"
#include "base/ccUtils.h"
NS_CC_BEGIN
namespace ui {
static const char* HALF_CIRCLE_IMAGE = "iVBORw0KGgoAAAANSUhEUgAAAAwAAAAGCAMAAADAMI+zAAAAJ1BMVEX///////////////////////////////////////////////////9Ruv0SAAAADHRSTlMABgcbbW7Hz9Dz+PmlcJP5AAAAMElEQVR4AUXHwQ2AQAhFwYcLH1H6r1djzDK3ASxUpTBeK/uTCyz7dx54b44m4p5cD1MwAooEJyk3AAAAAElFTkSuQmCC";
static const char* BODY_IMAGE_1_PIXEL_HEIGHT = "iVBORw0KGgoAAAANSUhEUgAAAAwAAAABCAMAAADdNb8LAAAAA1BMVEX///+nxBvIAAAACklEQVR4AWNABgAADQABYc2cpAAAAABJRU5ErkJggg==";
static const Color3B DEFAULT_COLOR(52, 65, 87);
static const float DEFAULT_MARGIN = 20;
static const float DEFAULT_AUTO_HIDE_TIME = 0.2f;
static const float DEFAULT_SCROLLBAR_OPACITY = 0.4f;
ScrollViewBar::ScrollViewBar(ScrollView* parent, ScrollView::Direction direction):
_parent(parent),
_direction(direction),
_upperHalfCircle(nullptr),
_lowerHalfCircle(nullptr),
_body(nullptr),
_opacity(255 * DEFAULT_SCROLLBAR_OPACITY),
_marginFromBoundary(DEFAULT_MARGIN),
_marginForLength(DEFAULT_MARGIN),
_touching(false),
_autoHideEnabled(true),
_autoHideTime(DEFAULT_AUTO_HIDE_TIME),
_autoHideRemainingTime(0)
{
CCASSERT(parent != nullptr, "Parent scroll view must not be null!");
CCASSERT(direction != ScrollView::Direction::BOTH, "Illegal scroll direction for scroll bar!");
setCascadeColorEnabled(true);
setCascadeOpacityEnabled(true);
}
ScrollViewBar::~ScrollViewBar()
{
}
ScrollViewBar* ScrollViewBar::create(ScrollView* parent, ScrollView::Direction direction)
{
ScrollViewBar* node = new (std::nothrow) ScrollViewBar(parent, direction);
if (node && node->init())
{
node->autorelease();
return node;
}
CC_SAFE_DELETE(node);
return nullptr;
}
bool ScrollViewBar::init()
{
if (!ProtectedNode::init())
{
return false;
}
_upperHalfCircle = utils::createSpriteFromBase64(HALF_CIRCLE_IMAGE);
_upperHalfCircle->setAnchorPoint(Vec2::ANCHOR_MIDDLE_BOTTOM);
addProtectedChild(_upperHalfCircle);
_lowerHalfCircle = Sprite::createWithTexture(_upperHalfCircle->getTexture(), _upperHalfCircle->getTextureRect(), _upperHalfCircle->isTextureRectRotated());
_lowerHalfCircle->setScaleY(-1);
_lowerHalfCircle->setAnchorPoint(Vec2::ANCHOR_MIDDLE_BOTTOM);
addProtectedChild(_lowerHalfCircle);
_body = utils::createSpriteFromBase64(BODY_IMAGE_1_PIXEL_HEIGHT);
_body->setAnchorPoint(Vec2::ANCHOR_MIDDLE_BOTTOM);
addProtectedChild(_body);
setColor(DEFAULT_COLOR);
if(_direction == ScrollView::Direction::HORIZONTAL)
{
setRotation(90);
}
if(_autoHideEnabled)
{
ProtectedNode::setOpacity(0);
}
return true;
}
void ScrollViewBar::setPositionFromCorner(const Vec2& positionFromCorner)
{
if(_direction == ScrollView::Direction::VERTICAL)
{
_marginForLength = positionFromCorner.y;
_marginFromBoundary = positionFromCorner.x;
}
else
{
_marginForLength = positionFromCorner.x;
_marginFromBoundary = positionFromCorner.y;
}
}
Vec2 ScrollViewBar::getPositionFromCorner() const
{
if(_direction == ScrollView::Direction::VERTICAL)
{
return Vec2(_marginFromBoundary, _marginForLength);
}
else
{
return Vec2(_marginForLength, _marginFromBoundary);
}
}
void ScrollViewBar::setWidth(float width)
{
float scale = width / _body->getContentSize().width;
_body->setScaleX(scale);
_upperHalfCircle->setScale(scale);
_lowerHalfCircle->setScale(-scale);
}
void ScrollViewBar::setAutoHideEnabled(bool autoHideEnabled)
{
_autoHideEnabled = autoHideEnabled;
ProtectedNode::setOpacity(_opacity);
}
float ScrollViewBar::getWidth() const
{
return _body->getBoundingBox().size.width;
}
void ScrollViewBar::updateLength(float length)
{
float ratio = length / _body->getTextureRect().size.height;
_body->setScaleY(ratio);
_upperHalfCircle->setPositionY(_body->getPositionY() + length);
}
void ScrollViewBar::onEnter()
{
#if CC_ENABLE_SCRIPT_BINDING
if (_scriptType == kScriptTypeJavascript)
{
if (ScriptEngineManager::sendNodeEventToJSExtended(this, kNodeOnEnter))
return;
}
#endif
ProtectedNode::onEnter();
scheduleUpdate();
}
void ScrollViewBar::update(float deltaTime)
{
processAutoHide(deltaTime);
}
void ScrollViewBar::processAutoHide(float deltaTime)
{
if(!_autoHideEnabled || _autoHideRemainingTime <= 0)
{
return;
}
else if(_touching)
{
// If it is touching, don't auto hide.
return;
}
_autoHideRemainingTime -= deltaTime;
if(_autoHideRemainingTime <= _autoHideTime)
{
_autoHideRemainingTime = MAX(0, _autoHideRemainingTime);
ProtectedNode::setOpacity(_opacity * (_autoHideRemainingTime / _autoHideTime));
}
}
void ScrollViewBar::onTouchBegan()
{
if(!_autoHideEnabled)
{
return;
}
_touching = true;
}
void ScrollViewBar::onTouchEnded()
{
if(!_autoHideEnabled)
{
return;
}
_touching = false;
if(_autoHideRemainingTime <= 0)
{
// If the remaining time is 0, it means that it didn't moved after touch started so scroll bar is not showing.
return;
}
_autoHideRemainingTime = _autoHideTime;
}
void ScrollViewBar::onScrolled(const Vec2& outOfBoundary)
{
if(_autoHideEnabled)
{
_autoHideRemainingTime = _autoHideTime;
ProtectedNode::setOpacity(_opacity);
}
Layout* innerContainer = _parent->getInnerContainer();
float innerContainerMeasure = 0;
float scrollViewMeasure = 0;
float outOfBoundaryValue = 0;
float innerContainerPosition = 0;
if(_direction == ScrollView::Direction::VERTICAL)
{
innerContainerMeasure = innerContainer->getContentSize().height;
scrollViewMeasure = _parent->getContentSize().height;
outOfBoundaryValue = outOfBoundary.y;
innerContainerPosition = -innerContainer->getPositionY();
}
else if(_direction == ScrollView::Direction::HORIZONTAL)
{
innerContainerMeasure = innerContainer->getContentSize().width;
scrollViewMeasure = _parent->getContentSize().width;
outOfBoundaryValue = outOfBoundary.x;
innerContainerPosition = -innerContainer->getPositionX();
}
float length = calculateLength(innerContainerMeasure, scrollViewMeasure, outOfBoundaryValue);
Vec2 position = calculatePosition(innerContainerMeasure, scrollViewMeasure, innerContainerPosition, outOfBoundaryValue, length);
updateLength(length);
setPosition(position);
}
float ScrollViewBar::calculateLength(float innerContainerMeasure, float scrollViewMeasure, float outOfBoundaryValue)
{
float denominatorValue = innerContainerMeasure;
if(outOfBoundaryValue != 0)
{
// If it is out of boundary, the length of scroll bar gets shorter quickly.
static const float GETTING_SHORTER_FACTOR = 20;
denominatorValue += (outOfBoundaryValue > 0 ? outOfBoundaryValue : -outOfBoundaryValue) * GETTING_SHORTER_FACTOR;
}
float lengthRatio = scrollViewMeasure / denominatorValue;
return fabsf(scrollViewMeasure - 2 * _marginForLength) * lengthRatio;
}
Vec2 ScrollViewBar::calculatePosition(float innerContainerMeasure, float scrollViewMeasure, float innerContainerPosition, float outOfBoundaryValue, float length)
{
float denominatorValue = innerContainerMeasure - scrollViewMeasure;
if(outOfBoundaryValue != 0)
{
denominatorValue += fabs(outOfBoundaryValue);
}
float positionRatio = 0;
if(denominatorValue != 0)
{
positionRatio = innerContainerPosition / denominatorValue;
positionRatio = MAX(positionRatio, 0);
positionRatio = MIN(positionRatio, 1);
}
float position = (scrollViewMeasure - length - 2 * _marginForLength) * positionRatio + _marginForLength;
if(_direction == ScrollView::Direction::VERTICAL)
{
return Vec2(_parent->getContentSize().width - _marginFromBoundary, position);
}
else
{
return Vec2(position, _marginFromBoundary);
}
}
}
NS_CC_END
| {
"pile_set_name": "Github"
} |
//
// Generated by class-dump 3.5 (64 bit).
//
// class-dump is Copyright (C) 1997-1998, 2000-2001, 2004-2013 by Steve Nygard.
//
#import "NSDocumentController.h"
@interface NSDocumentController (Enumeration)
+ (void)enumerateOpenDocuments:(CDUnknownBlockType)arg1;
@end
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<document type="com.apple.InterfaceBuilder3.CocoaTouch.Storyboard.XIB" version="3.0" toolsVersion="6245" systemVersion="13F34" targetRuntime="iOS.CocoaTouch" propertyAccessControl="none" useAutolayout="YES" useTraitCollections="YES" initialViewController="X5k-f2-b5h">
<dependencies>
<plugIn identifier="com.apple.InterfaceBuilder.IBCocoaTouchPlugin" version="6238"/>
</dependencies>
<scenes>
<!--View Controller-->
<scene sceneID="gAE-YM-kbH">
<objects>
<viewController id="X5k-f2-b5h" sceneMemberID="viewController">
<layoutGuides>
<viewControllerLayoutGuide type="top" id="Y8P-hJ-Z43"/>
<viewControllerLayoutGuide type="bottom" id="9ZL-r4-8FZ"/>
</layoutGuides>
<view key="view" contentMode="scaleToFill" id="yd7-JS-zBw">
<rect key="frame" x="0.0" y="0.0" width="600" height="600"/>
<autoresizingMask key="autoresizingMask" widthSizable="YES" heightSizable="YES"/>
<subviews>
<imageView userInteractionEnabled="NO" contentMode="scaleToFill" misplaced="YES" image="Icon-60.png" translatesAutoresizingMaskIntoConstraints="NO" id="23">
<rect key="frame" x="270" y="270" width="60" height="60"/>
<rect key="contentStretch" x="0.0" y="0.0" width="0.0" height="0.0"/>
</imageView>
</subviews>
<color key="backgroundColor" red="0.20392156862745098" green="0.59607843137254901" blue="0.85882352941176465" alpha="1" colorSpace="calibratedRGB"/>
<constraints>
<constraint firstItem="23" firstAttribute="centerY" secondItem="yd7-JS-zBw" secondAttribute="centerY" priority="1" id="39"/>
<constraint firstItem="23" firstAttribute="centerX" secondItem="yd7-JS-zBw" secondAttribute="centerX" priority="1" id="41"/>
</constraints>
</view>
</viewController>
<placeholder placeholderIdentifier="IBFirstResponder" id="XAI-xm-WK6" userLabel="First Responder" sceneMemberID="firstResponder"/>
</objects>
<point key="canvasLocation" x="349" y="339"/>
</scene>
</scenes>
<resources>
<image name="Icon-60.png" width="180" height="180"/>
</resources>
</document>
| {
"pile_set_name": "Github"
} |
from tartiflette.language.validators.query.rule import (
June2018ReleaseValidationRule,
)
from tartiflette.utils.errors import graphql_error_from_nodes
class FragmentSpreadTypeExistence(June2018ReleaseValidationRule):
"""
This validator validates that the type of the typeCondition of a fragment
or inline spread is define in the schema
More details @ https://graphql.github.io/graphql-spec/June2018/#sec-Fragment-Spread-Type-Existence
"""
RULE_NAME = "fragment-spread-type-existence"
RULE_LINK = "https://graphql.github.io/graphql-spec/June2018/#sec-Fragment-Spread-Type-Existence"
RULE_NUMBER = "5.5.1.2"
def validate(self, path, schema, fragment, **__):
errors = []
if fragment.type_condition and not schema.has_type(
fragment.type_condition.name.value
):
errors.append(
graphql_error_from_nodes(
message=f"Unknown type {fragment.type_condition.name.value}.",
nodes=fragment,
path=path,
extensions=self._extensions,
)
)
return errors
| {
"pile_set_name": "Github"
} |
/*
* Copyright 2020 Red Hat, Inc. and/or its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.kie.server.services.taskassigning.planning.test.model;
import java.util.List;
import org.kie.server.services.taskassigning.user.system.api.User;
import org.kie.server.services.taskassigning.user.system.api.UserSystemService;
/**
* Base test class for the UserSystemServiceLoaderTest
*/
public abstract class AbstractUserSystemService implements UserSystemService {
protected String name;
public AbstractUserSystemService(String name) {
this.name = name;
}
@Override
public void start() {
}
@Override
public void test() throws Exception {
}
@Override
public String getName() {
return name;
}
@Override
public List<User> findAllUsers() {
return null;
}
@Override
public User findUser(String id) {
return null;
}
}
| {
"pile_set_name": "Github"
} |
sub hll-config($config) {
@include(main-version-common)@
$config<static-nqp-home> := '@static_nqp_home@';
}
| {
"pile_set_name": "Github"
} |
# [:zap: Zapsnap](http://zapsnap.io/)
[](https://travis-ci.org/twobucks/zapsnap)
Temporary peer to peer screenshot sharing from your browser.
## Links
* [zapsnap-desktop](https://github.com/twobucks/zapsnap-desktop) - MacOS app for taking screenshots
* [seedshot-cli](https://github.com/twobucks/seedshot-cli) - CLI tool for taking screenshots (Linux and MacOS)
## What rocks
* the files are temporary, so we don't waste resources on storing them
* powered by [WebTorrent](https://github.com/feross/webtorrent)
* browser is used for sharing images peer to peer
* when all browsers with the image are closed, the image is gone forever
## What sucks
* browser support, since it depends on [WebTorrent](https://github.com/feross/webtorrent) which doesn't support IE and probably lacks support for majority
of mobile browsers
* each file depends on torrent network so it takes around ~3s to load the image
* no Windows support for taking screenshots
* once you as an owner of an image close the browser, the file might still be available if other peers keep their browser open
## Development
```
npm start # will start the server
npm run watch # watch for CSS/JS file changes and build
npm run build # build CSS/JS for production
```
## Attributions
Logo created by il Capitano from [Noun Project](https://thenounproject.com/search/?q=zap&i=889349).
Design by [Benjamin Alijagić](https://twitter.com/benjam1n).
## License
MIT
## Sponsors
Two Bucks Ltd © 2017
[](https://twobucks.co)
| {
"pile_set_name": "Github"
} |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.dromara.hmily.tac.sqlparser.model.segment.dml.assignment;
import lombok.Getter;
import lombok.RequiredArgsConstructor;
import org.dromara.hmily.tac.sqlparser.model.segment.HmilySegment;
import org.dromara.hmily.tac.sqlparser.model.segment.dml.expr.HmilyExpressionSegment;
import java.util.List;
/**
* Insert values segment.
*/
@RequiredArgsConstructor
@Getter
public final class HmilyInsertValuesSegment implements HmilySegment {
private final int startIndex;
private final int stopIndex;
private final List<HmilyExpressionSegment> values;
}
| {
"pile_set_name": "Github"
} |
function Y = vl_nnsoftmax(X,temp,dzdY)
%VL_NNSOFTMAX CNN softmax.
% Y = VL_NNSOFTMAX(X) applies the softmax operator the data X. X
% has dimension H x W x D x N, packing N arrays of W x H
% D-dimensional vectors.
%
% D can be thought of as the number of possible classes and the
% function computes the softmax along the D dimension. Often W=H=1,
% but this is not a requirement, as the operator is applied
% convolutionally at all spatial locations.
%
% DZDX = VL_NNSOFTMAX(X, DZDY) computes the derivative of the block
% projected onto DZDY. DZDX and DZDY have the same dimensions as
% X and Y respectively.
% Copyright (C) 2014 Andrea Vedaldi.
% All rights reserved.
%
% This file is part of the VLFeat library and is made available under
% the terms of the BSD license (see the COPYING file).
E = exp(bsxfun(@minus, X, max(X,[],3))./temper) ;
L = sum(E,3) ;
Y = bsxfun(@rdivide, E, L) ;
if nargin <= 1, return ; end
% backward
Y = Y .* bsxfun(@minus, dzdY, sum(dzdY .* Y, 3)) ;
| {
"pile_set_name": "Github"
} |
/*
* Trick
* 2016 (c) National Aeronautics and Space Administration (NASA)
*/
package trick;
import java.awt.Graphics2D;
import java.awt.Graphics;
import java.awt.Image;
import java.awt.geom.AffineTransform;
import java.awt.image.AffineTransformOp;
import java.awt.image.BufferedImage;
import java.io.BufferedOutputStream;
import java.io.BufferedReader;
import java.io.DataOutputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.FileReader;
import java.net.Socket;
import java.util.*;
import javax.swing.ImageIcon;
import javax.swing.JFrame;
import javax.swing.JPanel;
import java.awt.Color;
/**
*
* @author penn
*/
class ArenaView extends JPanel {
private double scale; // Pixels per meter
private Color arenaColor;
private Color ballColor;
private double[] ballPos;
// Origin of world coordinates in jpanel coordinates.
private int worldOriginX;
private int worldOriginY;
public ArenaView( double mapScale ) {
scale = mapScale;
setScale(mapScale);
arenaColor = Color.WHITE;
ballColor = new Color(1,0,0, 0.5f);
ballPos = new double[] {0.0, 0.0};
}
public void setBallPos (double x, double y) {
ballPos[0] = x;
ballPos[1] = y;
}
public void setScale (double mapScale) {
if (mapScale < 0.00005) {
scale = 0.00005;
} else {
scale = mapScale;
}
}
public void drawCenteredCircle(Graphics2D g, int x, int y, int r) {
x = x-(r/2);
y = y-(r/2);
g.fillOval(x,y,r,r);
}
private void doDrawing(Graphics g) {
Graphics2D g2d = (Graphics2D) g;
int width = getWidth();
int height = getHeight();
worldOriginX = (width/2);
worldOriginY = (height/2);
g2d.setPaint(arenaColor);
g2d.fillRect(0, 0, width, height);
// Horizontal Lines
g2d.setPaint(Color.LIGHT_GRAY);
for (int Y = worldOriginY % (int)scale ; Y < height ; Y += (int)scale ) {
g2d.drawLine(0, Y, width, Y);
}
// Vertical Lines
g2d.setPaint(Color.LIGHT_GRAY);
for (int X = worldOriginX % (int)scale ; X < width ; X += (int)scale ) {
g2d.drawLine(X, 0, X, height);
}
// Coordinate Axes
g2d.setPaint(Color.BLACK);
g2d.drawLine(0, worldOriginY, width, worldOriginY);
g2d.drawLine(worldOriginX, 0, worldOriginX, height);
// Draw Ball
g2d.setPaint(ballColor);
int sx = (int)(worldOriginX + scale * ballPos[0]);
int sy = (int)(worldOriginY - scale * ballPos[1]);
drawCenteredCircle(g2d, sx, sy, (int)(scale));
}
@Override
public void paintComponent(Graphics g) {
super.paintComponent(g);
doDrawing(g);
}
}
public class BallDisplay extends JFrame {
private ArenaView arenaView;
private BufferedReader in;
private DataOutputStream out;
public BallDisplay(ArenaView arena) {
arenaView = arena;
add( arenaView);
setTitle("Ball Display");
setSize(800, 800);
setLocationRelativeTo(null);
setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE);
}
public void connectToServer(String host, int port ) throws IOException {
Socket socket = new Socket(host, port);
in = new BufferedReader( new InputStreamReader( socket.getInputStream()));
out = new DataOutputStream(new BufferedOutputStream(socket.getOutputStream()));
}
public void drawArenaView() {
arenaView.repaint();
}
private static void printHelpText() {
System.out.println(
"----------------------------------------------------------------------\n"
+ "usage: java jar BallDisplay.jar <port-number>\n"
+ "----------------------------------------------------------------------\n"
);
}
public static void main(String[] args) throws IOException {
String host = "localHost";
int port = 0;
String vehicleImageFile = null;
int ii = 0;
while (ii < args.length) {
switch (args[ii]) {
case "-help" :
case "--help" : {
printHelpText();
System.exit(0);
} break;
default : {
port = (Integer.parseInt(args[ii]));
} break;
}
++ii;
}
if (port == 0) {
System.out.println("No variable server port specified.");
printHelpText();
System.exit(0);
}
double mapScale = 20; // 20 pixels per meter
ArenaView arenaview = new ArenaView( mapScale);
BallDisplay sd = new BallDisplay(arenaview);
sd.setVisible(true);
double ballX = 0.0;
double ballY = 0.0;
System.out.println("Connecting to: " + host + ":" + port);
sd.connectToServer(host, port);
sd.out.writeBytes("trick.var_set_client_tag(\"BallDisplay\") \n");
sd.out.flush();
sd.out.writeBytes("trick.var_pause()\n");
sd.out.writeBytes("trick.var_add(\"ball.state.output.position[0]\") \n" +
"trick.var_add(\"ball.state.output.position[1]\") \n" );
sd.out.flush();
sd.out.writeBytes("trick.var_ascii() \n" +
"trick.var_cycle(0.1) \n" +
"trick.var_unpause() \n" );
sd.out.flush();
Boolean go = true;
while (go) {
String field[];
try {
String line;
line = sd.in.readLine();
field = line.split("\t");
ballX = Double.parseDouble( field[1] );
ballY = Double.parseDouble( field[2] );
// Set the Ball position
arenaview.setBallPos(ballX, ballY);
} catch (IOException | NullPointerException e ) {
go = false;
}
sd.drawArenaView();
}
}
}
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>com.apple.security.app-sandbox</key>
<true/>
<key>com.apple.security.application-groups</key>
<array>
<string>group.Mortennn.FiScript</string>
</array>
<key>com.apple.security.temporary-exception.files.absolute-path.read-write</key>
<array>
<string>/</string>
</array>
<key>com.apple.security.temporary-exception.files.home-relative-path.read-write</key>
<array>
<string>/</string>
</array>
</dict>
</plist>
| {
"pile_set_name": "Github"
} |
<?php
/*
* Copyright 2014 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
/**
* The "clientId" collection of methods.
* Typical usage is:
* <code>
* $analyticsService = new Google_Service_Analytics(...);
* $clientId = $analyticsService->clientId;
* </code>
*/
class Google_Service_Analytics_Resource_ManagementClientId extends Google_Service_Resource
{
/**
* Hashes the given Client ID. (clientId.hashClientId)
*
* @param Google_Service_Analytics_HashClientIdRequest $postBody
* @param array $optParams Optional parameters.
* @return Google_Service_Analytics_HashClientIdResponse
*/
public function hashClientId(Google_Service_Analytics_HashClientIdRequest $postBody, $optParams = array())
{
$params = array('postBody' => $postBody);
$params = array_merge($params, $optParams);
return $this->call('hashClientId', array($params), "Google_Service_Analytics_HashClientIdResponse");
}
}
| {
"pile_set_name": "Github"
} |
// Classes and structures being serialized
// Generated by ProtocolBuffer
// - a pure c# code generation implementation of protocol buffers
// Report bugs to: https://silentorbit.com/protobuf/
// DO NOT EDIT
// This file will be overwritten when CodeGenerator is run.
// To make custom modifications, edit the .proto file and add //:external before the message line
// then write the code and the changes in a separate file.
using System;
using System.Collections.Generic;
namespace Personal
{
public partial class Person
{
public enum PhoneType
{
MOBILE = 0,
HOME = 1,
WORK = 2,
}
public string Name { get; set; }
public int Id { get; set; }
public string Email { get; set; }
public List<Personal.Person.PhoneNumber> Phone { get; set; }
public partial class PhoneNumber
{
public PhoneNumber()
{
Type = Personal.Person.PhoneType.HOME;
}
public string Number { get; set; }
public Personal.Person.PhoneType Type { get; set; }
}
}
public partial class AddressBook
{
public List<Personal.Person> List { get; set; }
}
}
namespace Google.Protobuf
{
/// <summary>
/// <para> Protocol Buffers - Google's data interchange format</para>
/// <para> Copyright 2008 Google Inc. All rights reserved.</para>
/// <para> http://code.google.com/p/protobuf/</para>
/// <para></para>
/// <para> Redistribution and use in source and binary forms, with or without</para>
/// <para> modification, are permitted provided that the following conditions are</para>
/// <para> met:</para>
/// <para></para>
/// <para> * Redistributions of source code must retain the above copyright</para>
/// <para> notice, this list of conditions and the following disclaimer.</para>
/// <para> * Redistributions in binary form must reproduce the above</para>
/// <para> copyright notice, this list of conditions and the following disclaimer</para>
/// <para> in the documentation and/or other materials provided with the</para>
/// <para> distribution.</para>
/// <para> * Neither the name of Google Inc. nor the names of its</para>
/// <para> contributors may be used to endorse or promote products derived from</para>
/// <para> this software without specific prior written permission.</para>
/// <para></para>
/// <para> THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS</para>
/// <para> "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT</para>
/// <para> LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR</para>
/// <para> A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT</para>
/// <para> OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,</para>
/// <para> SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT</para>
/// <para> LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,</para>
/// <para> DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY</para>
/// <para> THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT</para>
/// <para> (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE</para>
/// <para> OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.</para>
/// <para> Author: [email protected] (Kenton Varda)</para>
/// <para> Based on original Protocol Buffers design by</para>
/// <para> Sanjay Ghemawat, Jeff Dean, and others.</para>
/// <para></para>
/// <para> The messages in this file describe the definitions found in .proto files.</para>
/// <para> A valid .proto file can be translated directly to a FileDescriptorProto</para>
/// <para> without any other information (e.g. without reading its imports).</para>
/// <para> descriptor.proto must be optimized for speed because reflection-based</para>
/// <para> algorithms don't work during bootstrapping.</para>
/// <para> The protocol compiler can output a FileDescriptorSet containing the .proto</para>
/// <para> files it parses.</para>
/// </summary>
public partial class FileDescriptorSet
{
public List<Google.Protobuf.FileDescriptorProto> File { get; set; }
}
/// <summary> Describes a complete .proto file.</summary>
public partial class FileDescriptorProto
{
public string Name { get; set; }
/// <summary> file name, relative to root of source tree</summary>
public string Package { get; set; }
/// <summary>
/// <para> e.g. "foo", "foo.bar", etc.</para>
/// <para> Names of files imported by this file.</para>
/// </summary>
public List<string> Dependency { get; set; }
/// <summary> Indexes of the public imported files in the dependency list above.</summary>
public List<int> PublicDependency { get; set; }
/// <summary>
/// <para> Indexes of the weak imported files in the dependency list.</para>
/// <para> For Google-internal migration only. Do not use.</para>
/// </summary>
public List<int> WeakDependency { get; set; }
/// <summary> All top-level definitions in this file.</summary>
public List<Google.Protobuf.DescriptorProto> MessageType { get; set; }
public List<Google.Protobuf.EnumDescriptorProto> EnumType { get; set; }
public List<Google.Protobuf.ServiceDescriptorProto> Service { get; set; }
public List<Google.Protobuf.FieldDescriptorProto> Extension { get; set; }
public Google.Protobuf.FileOptions Options { get; set; }
/// <summary>
/// <para> This field contains optional information about the original source code.</para>
/// <para> You may safely remove this entire field whithout harming runtime</para>
/// <para> functionality of the descriptors -- the information is needed only by</para>
/// <para> development tools.</para>
/// </summary>
public Google.Protobuf.SourceCodeInfo SourceCodeInfo { get; set; }
}
/// <summary> Describes a message type.</summary>
public partial class DescriptorProto
{
public string Name { get; set; }
public List<Google.Protobuf.FieldDescriptorProto> Field { get; set; }
public List<Google.Protobuf.FieldDescriptorProto> Extension { get; set; }
public List<Google.Protobuf.DescriptorProto> NestedType { get; set; }
public List<Google.Protobuf.EnumDescriptorProto> EnumType { get; set; }
public List<Google.Protobuf.DescriptorProto.ExtensionRange> ExtensionRangeField { get; set; }
public Google.Protobuf.MessageOptions Options { get; set; }
public partial class ExtensionRange
{
public int Start { get; set; }
public int End { get; set; }
}
}
/// <summary> Describes a field within a message.</summary>
public partial class FieldDescriptorProto
{
public enum Type
{
/// <summary>
/// <para> 0 is reserved for errors.</para>
/// <para> Order is weird for historical reasons.</para>
/// </summary>
TYPE_DOUBLE = 1,
TYPE_FLOAT = 2,
/// <summary>
/// <para> Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if</para>
/// <para> negative values are likely.</para>
/// </summary>
TYPE_INT64 = 3,
TYPE_UINT64 = 4,
/// <summary>
/// <para> Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if</para>
/// <para> negative values are likely.</para>
/// </summary>
TYPE_INT32 = 5,
TYPE_FIXED64 = 6,
TYPE_FIXED32 = 7,
TYPE_BOOL = 8,
TYPE_STRING = 9,
TYPE_GROUP = 10,
/// <summary> Tag-delimited aggregate.</summary>
TYPE_MESSAGE = 11,
/// <summary>
/// <para> Length-delimited aggregate.</para>
/// <para> New in version 2.</para>
/// </summary>
TYPE_BYTES = 12,
TYPE_UINT32 = 13,
TYPE_ENUM = 14,
TYPE_SFIXED32 = 15,
TYPE_SFIXED64 = 16,
TYPE_SINT32 = 17,
/// <summary> Uses ZigZag encoding.</summary>
TYPE_SINT64 = 18,
}
public enum Label
{
/// <summary> 0 is reserved for errors</summary>
LABEL_OPTIONAL = 1,
LABEL_REQUIRED = 2,
LABEL_REPEATED = 3,
}
public string Name { get; set; }
public int Number { get; set; }
public Google.Protobuf.FieldDescriptorProto.Label label { get; set; }
/// <summary>
/// <para> If type_name is set, this need not be set. If both this and type_name</para>
/// <para> are set, this must be either TYPE_ENUM or TYPE_MESSAGE.</para>
/// </summary>
public Google.Protobuf.FieldDescriptorProto.Type type { get; set; }
/// <summary>
/// <para> For message and enum types, this is the name of the type. If the name</para>
/// <para> starts with a '.', it is fully-qualified. Otherwise, C++-like scoping</para>
/// <para> rules are used to find the type (i.e. first the nested types within this</para>
/// <para> message are searched, then within the parent, on up to the root</para>
/// <para> namespace).</para>
/// </summary>
public string TypeName { get; set; }
/// <summary>
/// <para> For extensions, this is the name of the type being extended. It is</para>
/// <para> resolved in the same manner as type_name.</para>
/// </summary>
public string Extendee { get; set; }
/// <summary>
/// <para> For numeric types, contains the original text representation of the value.</para>
/// <para> For booleans, "true" or "false".</para>
/// <para> For strings, contains the default text contents (not escaped in any way).</para>
/// <para> For bytes, contains the C escaped value. All bytes >= 128 are escaped.</para>
/// <para> TODO(kenton): Base-64 encode?</para>
/// </summary>
public string DefaultValue { get; set; }
public Google.Protobuf.FieldOptions Options { get; set; }
}
/// <summary> Describes an enum type.</summary>
public partial class EnumDescriptorProto
{
public string Name { get; set; }
public List<Google.Protobuf.EnumValueDescriptorProto> Value { get; set; }
public Google.Protobuf.EnumOptions Options { get; set; }
}
/// <summary> Describes a value within an enum.</summary>
public partial class EnumValueDescriptorProto
{
public string Name { get; set; }
public int Number { get; set; }
public Google.Protobuf.EnumValueOptions Options { get; set; }
}
/// <summary> Describes a service.</summary>
public partial class ServiceDescriptorProto
{
public string Name { get; set; }
public List<Google.Protobuf.MethodDescriptorProto> Method { get; set; }
public Google.Protobuf.ServiceOptions Options { get; set; }
}
/// <summary> Describes a method of a service.</summary>
public partial class MethodDescriptorProto
{
public string Name { get; set; }
/// <summary>
/// <para> Input and output type names. These are resolved in the same way as</para>
/// <para> FieldDescriptorProto.type_name, but must refer to a message type.</para>
/// </summary>
public string InputType { get; set; }
public string OutputType { get; set; }
public Google.Protobuf.MethodOptions Options { get; set; }
}
/// <summary>
/// <para> ===================================================================</para>
/// <para> Options</para>
/// <para> Each of the definitions above may have "options" attached. These are</para>
/// <para> just annotations which may cause code to be generated slightly differently</para>
/// <para> or may contain hints for code that manipulates protocol messages.</para>
/// <para></para>
/// <para> Clients may define custom options as extensions of the *Options messages.</para>
/// <para> These extensions may not yet be known at parsing time, so the parser cannot</para>
/// <para> store the values in them. Instead it stores them in a field in the *Options</para>
/// <para> message called uninterpreted_option. This field must have the same name</para>
/// <para> across all *Options messages. We then use this field to populate the</para>
/// <para> extensions when we build a descriptor, at which point all protos have been</para>
/// <para> parsed and so all extensions are known.</para>
/// <para></para>
/// <para> Extension numbers for custom options may be chosen as follows:</para>
/// <para> * For options which will only be used within a single application or</para>
/// <para> organization, or for experimental options, use field numbers 50000</para>
/// <para> through 99999. It is up to you to ensure that you do not use the</para>
/// <para> same number for multiple options.</para>
/// <para> * For options which will be published and used publicly by multiple</para>
/// <para> independent entities, e-mail [email protected]</para>
/// <para> to reserve extension numbers. Simply provide your project name (e.g.</para>
/// <para> Object-C plugin) and your porject website (if available) -- there's no need</para>
/// <para> to explain how you intend to use them. Usually you only need one extension</para>
/// <para> number. You can declare multiple options with only one extension number by</para>
/// <para> putting them in a sub-message. See the Custom Options section of the docs</para>
/// <para> for examples:</para>
/// <para> http://code.google.com/apis/protocolbuffers/docs/proto.html#options</para>
/// <para> If this turns out to be popular, a web service will be set up</para>
/// <para> to automatically assign option numbers.</para>
/// </summary>
public partial class FileOptions
{
public FileOptions()
{
JavaMultipleFiles = false;
JavaGenerateEqualsAndHash = false;
OptimizeFor = Google.Protobuf.FileOptions.OptimizeMode.SPEED;
CcGenericServices = false;
JavaGenericServices = false;
PyGenericServices = false;
}
public enum OptimizeMode
{
SPEED = 1,
/// <summary>
/// <para> Generate complete code for parsing, serialization,</para>
/// <para> etc.</para>
/// </summary>
CODE_SIZE = 2,
/// <summary> Use ReflectionOps to implement these methods.</summary>
LITE_RUNTIME = 3,
}
/// <summary>
/// <para> Sets the Java package where classes generated from this .proto will be</para>
/// <para> placed. By default, the proto package is used, but this is often</para>
/// <para> inappropriate because proto packages do not normally start with backwards</para>
/// <para> domain names.</para>
/// </summary>
public string JavaPackage { get; set; }
/// <summary>
/// <para> If set, all the classes from the .proto file are wrapped in a single</para>
/// <para> outer class with the given name. This applies to both Proto1</para>
/// <para> (equivalent to the old "--one_java_file" option) and Proto2 (where</para>
/// <para> a .proto always translates to a single class, but you may want to</para>
/// <para> explicitly choose the class name).</para>
/// </summary>
public string JavaOuterClassname { get; set; }
/// <summary>
/// <para> If set true, then the Java code generator will generate a separate .java</para>
/// <para> file for each top-level message, enum, and service defined in the .proto</para>
/// <para> file. Thus, these types will *not* be nested inside the outer class</para>
/// <para> named by java_outer_classname. However, the outer class will still be</para>
/// <para> generated to contain the file's getDescriptor() method as well as any</para>
/// <para> top-level extensions defined in the file.</para>
/// </summary>
public bool JavaMultipleFiles { get; set; }
/// <summary>
/// <para> If set true, then the Java code generator will generate equals() and</para>
/// <para> hashCode() methods for all messages defined in the .proto file. This is</para>
/// <para> purely a speed optimization, as the AbstractMessage base class includes</para>
/// <para> reflection-based implementations of these methods.</para>
/// </summary>
public bool JavaGenerateEqualsAndHash { get; set; }
/// <summary> Generate code using MessageLite and the lite runtime.</summary>
public Google.Protobuf.FileOptions.OptimizeMode OptimizeFor { get; set; }
/// <summary>
/// <para> Sets the Go package where structs generated from this .proto will be</para>
/// <para> placed. There is no default.</para>
/// </summary>
public string GoPackage { get; set; }
/// <summary>
/// <para> Should generic services be generated in each language? "Generic" services</para>
/// <para> are not specific to any particular RPC system. They are generated by the</para>
/// <para> main code generators in each language (without additional plugins).</para>
/// <para> Generic services were the only kind of service generation supported by</para>
/// <para> early versions of proto2.</para>
/// <para></para>
/// <para> Generic services are now considered deprecated in favor of using plugins</para>
/// <para> that generate code specific to your particular RPC system. Therefore,</para>
/// <para> these default to false. Old code which depends on generic services should</para>
/// <para> explicitly set them to true.</para>
/// </summary>
public bool CcGenericServices { get; set; }
public bool JavaGenericServices { get; set; }
public bool PyGenericServices { get; set; }
/// <summary> The parser stores options it doesn't recognize here. See above.</summary>
public List<Google.Protobuf.UninterpretedOption> UninterpretedOption { get; set; }
}
public partial class MessageOptions
{
public MessageOptions()
{
MessageSetWireFormat = false;
NoStandardDescriptorAccessor = false;
}
/// <summary>
/// <para> Set true to use the old proto1 MessageSet wire format for extensions.</para>
/// <para> This is provided for backwards-compatibility with the MessageSet wire</para>
/// <para> format. You should not use this for any other reason: It's less</para>
/// <para> efficient, has fewer features, and is more complicated.</para>
/// <para></para>
/// <para> The message must be defined exactly as follows:</para>
/// <para> message Foo {</para>
/// <para> option message_set_wire_format = true;</para>
/// <para> extensions 4 to max;</para>
/// <para> }</para>
/// <para> Note that the message cannot have any defined fields; MessageSets only</para>
/// <para> have extensions.</para>
/// <para></para>
/// <para> All extensions of your type must be singular messages; e.g. they cannot</para>
/// <para> be int32s, enums, or repeated messages.</para>
/// <para></para>
/// <para> Because this is an option, the above two restrictions are not enforced by</para>
/// <para> the protocol compiler.</para>
/// </summary>
public bool MessageSetWireFormat { get; set; }
/// <summary>
/// <para> Disables the generation of the standard "descriptor()" accessor, which can</para>
/// <para> conflict with a field of the same name. This is meant to make migration</para>
/// <para> from proto1 easier; new code should avoid fields named "descriptor".</para>
/// </summary>
public bool NoStandardDescriptorAccessor { get; set; }
/// <summary> The parser stores options it doesn't recognize here. See above.</summary>
public List<Google.Protobuf.UninterpretedOption> UninterpretedOption { get; set; }
}
public partial class FieldOptions
{
public FieldOptions()
{
Ctype = Google.Protobuf.FieldOptions.CType.STRING;
Lazy = false;
Deprecated = false;
Weak = false;
}
public enum CType
{
/// <summary> Default mode.</summary>
STRING = 0,
CORD = 1,
STRING_PIECE = 2,
}
/// <summary>
/// <para> The ctype option instructs the C++ code generator to use a different</para>
/// <para> representation of the field than it normally would. See the specific</para>
/// <para> options below. This option is not yet implemented in the open source</para>
/// <para> release -- sorry, we'll try to include it in a future version!</para>
/// </summary>
public Google.Protobuf.FieldOptions.CType Ctype { get; set; }
/// <summary>
/// <para> The packed option can be enabled for repeated primitive fields to enable</para>
/// <para> a more efficient representation on the wire. Rather than repeatedly</para>
/// <para> writing the tag and type for each element, the entire array is encoded as</para>
/// <para> a single length-delimited blob.</para>
/// </summary>
public bool Packed { get; set; }
/// <summary>
/// <para> Should this field be parsed lazily? Lazy applies only to message-type</para>
/// <para> fields. It means that when the outer message is initially parsed, the</para>
/// <para> inner message's contents will not be parsed but instead stored in encoded</para>
/// <para> form. The inner message will actually be parsed when it is first accessed.</para>
/// <para></para>
/// <para> This is only a hint. Implementations are free to choose whether to use</para>
/// <para> eager or lazy parsing regardless of the value of this option. However,</para>
/// <para> setting this option true suggests that the protocol author believes that</para>
/// <para> using lazy parsing on this field is worth the additional bookkeeping</para>
/// <para> overhead typically needed to implement it.</para>
/// <para></para>
/// <para> This option does not affect the public interface of any generated code;</para>
/// <para> all method signatures remain the same. Furthermore, thread-safety of the</para>
/// <para> interface is not affected by this option; const methods remain safe to</para>
/// <para> call from multiple threads concurrently, while non-const methods continue</para>
/// <para> to require exclusive access.</para>
/// <para></para>
/// <para></para>
/// <para> Note that implementations may choose not to check required fields within</para>
/// <para> a lazy sub-message. That is, calling IsInitialized() on the outher message</para>
/// <para> may return true even if the inner message has missing required fields.</para>
/// <para> This is necessary because otherwise the inner message would have to be</para>
/// <para> parsed in order to perform the check, defeating the purpose of lazy</para>
/// <para> parsing. An implementation which chooses not to check required fields</para>
/// <para> must be consistent about it. That is, for any particular sub-message, the</para>
/// <para> implementation must either *always* check its required fields, or *never*</para>
/// <para> check its required fields, regardless of whether or not the message has</para>
/// <para> been parsed.</para>
/// </summary>
public bool Lazy { get; set; }
/// <summary>
/// <para> Is this field deprecated?</para>
/// <para> Depending on the target platform, this can emit Deprecated annotations</para>
/// <para> for accessors, or it will be completely ignored; in the very least, this</para>
/// <para> is a formalization for deprecating fields.</para>
/// </summary>
public bool Deprecated { get; set; }
/// <summary>
/// <para> EXPERIMENTAL. DO NOT USE.</para>
/// <para> For "map" fields, the name of the field in the enclosed type that</para>
/// <para> is the key for this map. For example, suppose we have:</para>
/// <para> message Item {</para>
/// <para> required string name = 1;</para>
/// <para> required string value = 2;</para>
/// <para> }</para>
/// <para> message Config {</para>
/// <para> repeated Item items = 1 [experimental_map_key="name"];</para>
/// <para> }</para>
/// <para> In this situation, the map key for Item will be set to "name".</para>
/// <para> TODO: Fully-implement this, then remove the "experimental_" prefix.</para>
/// </summary>
public string ExperimentalMapKey { get; set; }
/// <summary> For Google-internal migration only. Do not use.</summary>
public bool Weak { get; set; }
/// <summary> The parser stores options it doesn't recognize here. See above.</summary>
public List<Google.Protobuf.UninterpretedOption> UninterpretedOption { get; set; }
}
public partial class EnumOptions
{
public EnumOptions()
{
AllowAlias = true;
}
/// <summary>
/// <para> Set this option to false to disallow mapping different tag names to a same</para>
/// <para> value.</para>
/// </summary>
public bool AllowAlias { get; set; }
/// <summary> The parser stores options it doesn't recognize here. See above.</summary>
public List<Google.Protobuf.UninterpretedOption> UninterpretedOption { get; set; }
}
public partial class EnumValueOptions
{
/// <summary> The parser stores options it doesn't recognize here. See above.</summary>
public List<Google.Protobuf.UninterpretedOption> UninterpretedOption { get; set; }
}
public partial class ServiceOptions
{
/// <summary>
/// <para> Note: Field numbers 1 through 32 are reserved for Google's internal RPC</para>
/// <para> framework. We apologize for hoarding these numbers to ourselves, but</para>
/// <para> we were already using them long before we decided to release Protocol</para>
/// <para> Buffers.</para>
/// <para> The parser stores options it doesn't recognize here. See above.</para>
/// </summary>
public List<Google.Protobuf.UninterpretedOption> UninterpretedOption { get; set; }
}
public partial class MethodOptions
{
/// <summary>
/// <para> Note: Field numbers 1 through 32 are reserved for Google's internal RPC</para>
/// <para> framework. We apologize for hoarding these numbers to ourselves, but</para>
/// <para> we were already using them long before we decided to release Protocol</para>
/// <para> Buffers.</para>
/// <para> The parser stores options it doesn't recognize here. See above.</para>
/// </summary>
public List<Google.Protobuf.UninterpretedOption> UninterpretedOption { get; set; }
}
/// <summary>
/// <para> A message representing a option the parser does not recognize. This only</para>
/// <para> appears in options protos created by the compiler::Parser class.</para>
/// <para> DescriptorPool resolves these when building Descriptor objects. Therefore,</para>
/// <para> options protos in descriptor objects (e.g. returned by Descriptor::options(),</para>
/// <para> or produced by Descriptor::CopyTo()) will never have UninterpretedOptions</para>
/// <para> in them.</para>
/// </summary>
public partial class UninterpretedOption
{
public List<Google.Protobuf.UninterpretedOption.NamePart> Name { get; set; }
/// <summary>
/// <para> The value of the uninterpreted option, in whatever type the tokenizer</para>
/// <para> identified it as during parsing. Exactly one of these should be set.</para>
/// </summary>
public string IdentifierValue { get; set; }
public ulong PositiveIntValue { get; set; }
public long NegativeIntValue { get; set; }
public double DoubleValue { get; set; }
public byte[] StringValue { get; set; }
public string AggregateValue { get; set; }
/// <summary>
/// <para> The name of the uninterpreted option. Each string represents a segment in</para>
/// <para> a dot-separated name. is_extension is true iff a segment represents an</para>
/// <para> extension (denoted with parentheses in options specs in .proto files).</para>
/// <para> E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents</para>
/// <para> "foo.(bar.baz).qux".</para>
/// </summary>
public partial class NamePart
{
public string NamePartField { get; set; }
public bool IsExtension { get; set; }
}
}
/// <summary>
/// <para> ===================================================================</para>
/// <para> Optional source code info</para>
/// <para> Encapsulates information about the original source file from which a</para>
/// <para> FileDescriptorProto was generated.</para>
/// </summary>
public partial class SourceCodeInfo
{
/// <summary>
/// <para> A Location identifies a piece of source code in a .proto file which</para>
/// <para> corresponds to a particular definition. This information is intended</para>
/// <para> to be useful to IDEs, code indexers, documentation generators, and similar</para>
/// <para> tools.</para>
/// <para></para>
/// <para> For example, say we have a file like:</para>
/// <para> message Foo {</para>
/// <para> optional string foo = 1;</para>
/// <para> }</para>
/// <para> Let's look at just the field definition:</para>
/// <para> optional string foo = 1;</para>
/// <para> ^ ^^ ^^ ^ ^^^</para>
/// <para> a bc de f ghi</para>
/// <para> We have the following locations:</para>
/// <para> span path represents</para>
/// <para> [a,i) [ 4, 0, 2, 0 ] The whole field definition.</para>
/// <para> [a,b) [ 4, 0, 2, 0, 4 ] The label (optional).</para>
/// <para> [c,d) [ 4, 0, 2, 0, 5 ] The type (string).</para>
/// <para> [e,f) [ 4, 0, 2, 0, 1 ] The name (foo).</para>
/// <para> [g,h) [ 4, 0, 2, 0, 3 ] The number (1).</para>
/// <para></para>
/// <para> Notes:</para>
/// <para> - A location may refer to a repeated field itself (i.e. not to any</para>
/// <para> particular index within it). This is used whenever a set of elements are</para>
/// <para> logically enclosed in a single code segment. For example, an entire</para>
/// <para> extend block (possibly containing multiple extension definitions) will</para>
/// <para> have an outer location whose path refers to the "extensions" repeated</para>
/// <para> field without an index.</para>
/// <para> - Multiple locations may have the same path. This happens when a single</para>
/// <para> logical declaration is spread out across multiple places. The most</para>
/// <para> obvious example is the "extend" block again -- there may be multiple</para>
/// <para> extend blocks in the same scope, each of which will have the same path.</para>
/// <para> - A location's span is not always a subset of its parent's span. For</para>
/// <para> example, the "extendee" of an extension declaration appears at the</para>
/// <para> beginning of the "extend" block and is shared by all extensions within</para>
/// <para> the block.</para>
/// <para> - Just because a location's span is a subset of some other location's span</para>
/// <para> does not mean that it is a descendent. For example, a "group" defines</para>
/// <para> both a type and a field in a single declaration. Thus, the locations</para>
/// <para> corresponding to the type and field and their components will overlap.</para>
/// <para> - Code which tries to interpret locations should probably be designed to</para>
/// <para> ignore those that it doesn't understand, as more types of locations could</para>
/// <para> be recorded in the future.</para>
/// </summary>
public List<Google.Protobuf.SourceCodeInfo.Location> LocationField { get; set; }
public partial class Location
{
/// <summary>
/// <para> Identifies which part of the FileDescriptorProto was defined at this</para>
/// <para> location.</para>
/// <para></para>
/// <para> Each element is a field number or an index. They form a path from</para>
/// <para> the root FileDescriptorProto to the place where the definition. For</para>
/// <para> example, this path:</para>
/// <para> [ 4, 3, 2, 7, 1 ]</para>
/// <para> refers to:</para>
/// <para> file.message_type(3) // 4, 3</para>
/// <para> .field(7) // 2, 7</para>
/// <para> .name() // 1</para>
/// <para> This is because FileDescriptorProto.message_type has field number 4:</para>
/// <para> repeated DescriptorProto message_type = 4;</para>
/// <para> and DescriptorProto.field has field number 2:</para>
/// <para> repeated FieldDescriptorProto field = 2;</para>
/// <para> and FieldDescriptorProto.name has field number 1:</para>
/// <para> optional string name = 1;</para>
/// <para></para>
/// <para> Thus, the above path gives the location of a field name. If we removed</para>
/// <para> the last element:</para>
/// <para> [ 4, 3, 2, 7 ]</para>
/// <para> this path refers to the whole field declaration (from the beginning</para>
/// <para> of the label to the terminating semicolon).</para>
/// </summary>
public List<int> Path { get; set; }
/// <summary>
/// <para> Always has exactly three or four elements: start line, start column,</para>
/// <para> end line (optional, otherwise assumed same as start line), end column.</para>
/// <para> These are packed into a single field for efficiency. Note that line</para>
/// <para> and column numbers are zero-based -- typically you will want to add</para>
/// <para> 1 to each before displaying to a user.</para>
/// </summary>
public List<int> Span { get; set; }
/// <summary>
/// <para> If this SourceCodeInfo represents a complete declaration, these are any</para>
/// <para> comments appearing before and after the declaration which appear to be</para>
/// <para> attached to the declaration.</para>
/// <para></para>
/// <para> A series of line comments appearing on consecutive lines, with no other</para>
/// <para> tokens appearing on those lines, will be treated as a single comment.</para>
/// <para></para>
/// <para> Only the comment content is provided; comment markers (e.g. //) are</para>
/// <para> stripped out. For block comments, leading whitespace and an asterisk</para>
/// <para> will be stripped from the beginning of each line other than the first.</para>
/// <para> Newlines are included in the output.</para>
/// <para></para>
/// <para> Examples:</para>
/// <para></para>
/// <para> optional int32 foo = 1; // Comment attached to foo.</para>
/// <para> // Comment attached to bar.</para>
/// <para> optional int32 bar = 2;</para>
/// <para></para>
/// <para> optional string baz = 3;</para>
/// <para> // Comment attached to baz.</para>
/// <para> // Another line attached to baz.</para>
/// <para></para>
/// <para> // Comment attached to qux.</para>
/// <para> //</para>
/// <para> // Another line attached to qux.</para>
/// <para> optional double qux = 4;</para>
/// <para></para>
/// <para> optional string corge = 5;</para>
/// <para> /* Block comment attached</para>
/// <para> * to corge. Leading asterisks</para>
/// <para> * will be removed. */</para>
/// <para> /* Block comment attached to</para>
/// <para> * grault. */</para>
/// <para> optional int32 grault = 6;</para>
/// </summary>
public string LeadingComments { get; set; }
public string TrailingComments { get; set; }
}
}
}
namespace Local
{
/// <summary>This is a demonstration of features only present in ProtoBuf Code Generator</summary>
internal partial class LocalFeatures
{
/// <summary>Make class field of type TimeSpan, serialized to Ticks</summary>
public TimeSpan Uptime { get; set; }
/// <summary>Make class field of type DateTime, serialized to Ticks</summary>
public DateTime DueDate { get; set; }
//public double Amount { get; set; } // Implemented by user elsewhere
/// <summary>Custom field access types. Default: public</summary>
private string Denial { get; set; }
protected string Secret { get; set; }
internal string Internal { get; set; }
public string PR { get; set; }
/// <summary>Generate a c# readonly field</summary>
public readonly Mine.MyMessageV1 TestingReadOnly = new Mine.MyMessageV1();
/// <summary>When deserializing this one must be set to a class before</summary>
public LocalFeatureTest.InterfaceTest MyInterface { get; set; }
public LocalFeatureTest.StructTest MyStruct;
public TestB.ExternalStruct MyExtStruct;
public TestB.ExternalClass MyExtClass { get; set; }
public LocalFeatureTest.TopEnum MyEnum { get; set; }
// protected virtual void BeforeSerialize() {}
// protected virtual void AfterDeserialize() {}
}
}
namespace LocalFeatureTest
{
/// <summary>Testing local struct serialization</summary>
public partial interface InterfaceTest
{
}
/// <summary>Testing local struct serialization</summary>
public partial struct StructTest
{
}
}
namespace TestB
{
// Written elsewhere
// public struct ExternalStruct {}
// Written elsewhere
// public class ExternalClass {}
}
namespace Mine
{
/// <summary>
/// <para>This class is documented here:</para>
/// <para>With multiple lines</para>
/// </summary>
public partial class MyMessageV1
{
/// <summary>This field is important to comment as we just did here</summary>
public int FieldA { get; set; }
/// <summary>Values for unknown fields.</summary>
public List<global::SilentOrbit.ProtocolBuffers.KeyValue> PreservedFields;
}
}
namespace Yours
{
public partial class MyMessageV2
{
public MyMessageV2()
{
FieldA = -1;
FieldB = 4.5;
FieldC = 5.4f;
FieldD = -2;
FieldE = -3;
FieldF = 4;
FieldG = 5;
FieldH = -6;
FieldI = -7;
FieldJ = 8;
FieldK = 9;
FieldL = -10;
FieldM = -11;
FieldN = false;
FieldO = "test";
FieldR = Yours.MyMessageV2.MyEnum.ETest2;
}
public enum MyEnum
{
/// <summary>First test</summary>
ETest1 = 0,
/// <summary>Second test</summary>
ETest2 = 3,
ETest3 = 2,
}
public enum AliasedEnum
{
Nothing = 0,
Zero = 0,
Nada = 0,
Some = 1,
ALot = 2,
}
public int FieldA { get; set; }
public double FieldB { get; set; }
public float FieldC { get; set; }
public int FieldD { get; set; }
public long FieldE { get; set; }
public uint FieldF { get; set; }
public ulong FieldG { get; set; }
public int FieldH { get; set; }
public long FieldI { get; set; }
public uint FieldJ { get; set; }
public ulong FieldK { get; set; }
public int FieldL { get; set; }
public long FieldM { get; set; }
public bool FieldN { get; set; }
public string FieldO { get; set; }
public byte[] FieldP { get; set; }
public Yours.MyMessageV2.MyEnum FieldQ { get; set; }
public Yours.MyMessageV2.MyEnum FieldR { get; set; }
protected string Dummy { get; set; }
public List<uint> FieldT { get; set; }
public List<uint> FieldS { get; set; }
public Theirs.TheirMessage FieldU { get; set; }
public List<Theirs.TheirMessage> FieldV { get; set; }
}
}
namespace Theirs
{
public partial class TheirMessage
{
public int FieldA { get; set; }
}
}
namespace Proto.Test
{
/// <summary>Message without any low id(< 16) fields</summary>
public partial class LongMessage
{
public int FieldX1 { get; set; }
public int FieldX2 { get; set; }
public int FieldX3 { get; set; }
public int FieldX4 { get; set; }
}
/// <summary>Nested testing</summary>
public partial class Data
{
public double Somefield { get; set; }
}
public partial class Container
{
public Proto.Test.Container.Nested MyNestedMessage { get; set; }
/// <summary>Name collision test</summary>
public Proto.Test.Container.Nested NestedField { get; set; }
public partial class Nested
{
public Proto.Test.Data NestedData { get; set; }
}
}
public partial class MyMessage
{
public int Foo { get; set; }
public string Bar { get; set; }
}
}
namespace LocalFeatureTest
{
public enum TopEnum
{
First = 1,
Last = 1000000,
}
}
namespace Proto.Test
{
public enum MyEnum
{
FOO = 1,
BAR = 2,
}
}
| {
"pile_set_name": "Github"
} |
'use strict';
var SymbolPoly = require('../polyfill');
module.exports = function (t, a) {
var symbol;
a.throws(function () { t(undefined); }, TypeError, "Undefined");
a.throws(function () { t(null); }, TypeError, "Null");
a.throws(function () { t(true); }, TypeError, "Primitive");
a.throws(function () { t('raz'); }, TypeError, "String");
a.throws(function () { t({}); }, TypeError, "Object");
a.throws(function () { t([]); }, TypeError, "Array");
if (typeof Symbol !== 'undefined') {
symbol = Symbol();
a(t(symbol), symbol, "Native");
}
symbol = SymbolPoly();
a(t(symbol), symbol, "Polyfill");
};
| {
"pile_set_name": "Github"
} |
SELECT RegionID, uniq(UserID) AS u FROM test.hits WHERE CounterID = 800784 GROUP BY RegionID ORDER BY u DESC, RegionID LIMIT 10 -- nothing
| {
"pile_set_name": "Github"
} |
/*
* Copyright (c) 2009-2011, Salvatore Sanfilippo <antirez at gmail dot com>
* Copyright (c) 2010-2014, Pieter Noordhuis <pcnoordhuis at gmail dot com>
* Copyright (c) 2015, Matt Stancliff <matt at genges dot com>,
* Jan-Erik Rediger <janerik at fnordig dot com>
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Redis nor the names of its contributors may be used
* to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __HIREDIS_H
#define __HIREDIS_H
#include "read.h"
#include <stdarg.h> /* for va_list */
#include <sys/time.h> /* for struct timeval */
#include <stdint.h> /* uintXX_t, etc */
#include "sds.h" /* for sds */
#define HIREDIS_MAJOR 0
#define HIREDIS_MINOR 14
#define HIREDIS_PATCH 0
#define HIREDIS_SONAME 0.14
/* Connection type can be blocking or non-blocking and is set in the
* least significant bit of the flags field in redisContext. */
#define REDIS_BLOCK 0x1
/* Connection may be disconnected before being free'd. The second bit
* in the flags field is set when the context is connected. */
#define REDIS_CONNECTED 0x2
/* The async API might try to disconnect cleanly and flush the output
* buffer and read all subsequent replies before disconnecting.
* This flag means no new commands can come in and the connection
* should be terminated once all replies have been read. */
#define REDIS_DISCONNECTING 0x4
/* Flag specific to the async API which means that the context should be clean
* up as soon as possible. */
#define REDIS_FREEING 0x8
/* Flag that is set when an async callback is executed. */
#define REDIS_IN_CALLBACK 0x10
/* Flag that is set when the async context has one or more subscriptions. */
#define REDIS_SUBSCRIBED 0x20
/* Flag that is set when monitor mode is active */
#define REDIS_MONITORING 0x40
/* Flag that is set when we should set SO_REUSEADDR before calling bind() */
#define REDIS_REUSEADDR 0x80
#define REDIS_KEEPALIVE_INTERVAL 15 /* seconds */
/* number of times we retry to connect in the case of EADDRNOTAVAIL and
* SO_REUSEADDR is being used. */
#define REDIS_CONNECT_RETRIES 10
#ifdef __cplusplus
extern "C" {
#endif
/* This is the reply object returned by redisCommand() */
typedef struct redisReply {
int type; /* REDIS_REPLY_* */
long long integer; /* The integer when type is REDIS_REPLY_INTEGER */
double dval; /* The double when type is REDIS_REPLY_DOUBLE */
size_t len; /* Length of string */
char *str; /* Used for REDIS_REPLY_ERROR, REDIS_REPLY_STRING
and REDIS_REPLY_DOUBLE (in additionl to dval). */
size_t elements; /* number of elements, for REDIS_REPLY_ARRAY */
struct redisReply **element; /* elements vector for REDIS_REPLY_ARRAY */
} redisReply;
redisReader *redisReaderCreate(void);
/* Function to free the reply objects hiredis returns by default. */
void freeReplyObject(void *reply);
/* Functions to format a command according to the protocol. */
int redisvFormatCommand(char **target, const char *format, va_list ap);
int redisFormatCommand(char **target, const char *format, ...);
int redisFormatCommandArgv(char **target, int argc, const char **argv, const size_t *argvlen);
int redisFormatSdsCommandArgv(sds *target, int argc, const char ** argv, const size_t *argvlen);
void redisFreeCommand(char *cmd);
void redisFreeSdsCommand(sds cmd);
enum redisConnectionType {
REDIS_CONN_TCP,
REDIS_CONN_UNIX
};
/* Context for a connection to Redis */
typedef struct redisContext {
int err; /* Error flags, 0 when there is no error */
char errstr[128]; /* String representation of error when applicable */
int fd;
int flags;
char *obuf; /* Write buffer */
redisReader *reader; /* Protocol reader */
enum redisConnectionType connection_type;
struct timeval *timeout;
struct {
char *host;
char *source_addr;
int port;
} tcp;
struct {
char *path;
} unix_sock;
/* For non-blocking connect */
struct sockadr *saddr;
size_t addrlen;
} redisContext;
redisContext *redisConnect(const char *ip, int port);
redisContext *redisConnectWithTimeout(const char *ip, int port, const struct timeval tv);
redisContext *redisConnectNonBlock(const char *ip, int port);
redisContext *redisConnectBindNonBlock(const char *ip, int port,
const char *source_addr);
redisContext *redisConnectBindNonBlockWithReuse(const char *ip, int port,
const char *source_addr);
redisContext *redisConnectUnix(const char *path);
redisContext *redisConnectUnixWithTimeout(const char *path, const struct timeval tv);
redisContext *redisConnectUnixNonBlock(const char *path);
redisContext *redisConnectFd(int fd);
/**
* Reconnect the given context using the saved information.
*
* This re-uses the exact same connect options as in the initial connection.
* host, ip (or path), timeout and bind address are reused,
* flags are used unmodified from the existing context.
*
* Returns REDIS_OK on successful connect or REDIS_ERR otherwise.
*/
int redisReconnect(redisContext *c);
int redisSetTimeout(redisContext *c, const struct timeval tv);
int redisEnableKeepAlive(redisContext *c);
void redisFree(redisContext *c);
int redisFreeKeepFd(redisContext *c);
int redisBufferRead(redisContext *c);
int redisBufferWrite(redisContext *c, int *done);
/* In a blocking context, this function first checks if there are unconsumed
* replies to return and returns one if so. Otherwise, it flushes the output
* buffer to the socket and reads until it has a reply. In a non-blocking
* context, it will return unconsumed replies until there are no more. */
int redisGetReply(redisContext *c, void **reply);
int redisGetReplyFromReader(redisContext *c, void **reply);
/* Write a formatted command to the output buffer. Use these functions in blocking mode
* to get a pipeline of commands. */
int redisAppendFormattedCommand(redisContext *c, const char *cmd, size_t len);
/* Write a command to the output buffer. Use these functions in blocking mode
* to get a pipeline of commands. */
int redisvAppendCommand(redisContext *c, const char *format, va_list ap);
int redisAppendCommand(redisContext *c, const char *format, ...);
int redisAppendCommandArgv(redisContext *c, int argc, const char **argv, const size_t *argvlen);
/* Issue a command to Redis. In a blocking context, it is identical to calling
* redisAppendCommand, followed by redisGetReply. The function will return
* NULL if there was an error in performing the request, otherwise it will
* return the reply. In a non-blocking context, it is identical to calling
* only redisAppendCommand and will always return NULL. */
void *redisvCommand(redisContext *c, const char *format, va_list ap);
void *redisCommand(redisContext *c, const char *format, ...);
void *redisCommandArgv(redisContext *c, int argc, const char **argv, const size_t *argvlen);
#ifdef __cplusplus
}
#endif
#endif
| {
"pile_set_name": "Github"
} |
export const table2excelData = [
{
'name': '推广名称1',
'fav': 0,
'show': 7302,
'weak': 5627,
'signin': 1563,
'click': 4254,
'active': 1438,
'day7': 274,
'day30': 285,
'tomorrow': 1727,
'day': 558,
'week': 4440,
'month': 5610
},
{
'name': '推广名称2',
'fav': 0,
'show': 4720,
'weak': 4086,
'signin': 3792,
'click': 8690,
'active': 8470,
'day7': 8172,
'day30': 5197,
'tomorrow': 1684,
'day': 2593,
'week': 2507,
'month': 1537
},
{
'name': '推广名称3',
'fav': 0,
'show': 7181,
'weak': 8007,
'signin': 8477,
'click': 1879,
'active': 16,
'day7': 2249,
'day30': 3450,
'tomorrow': 377,
'day': 1561,
'week': 3219,
'month': 1588
},
{
'name': '推广名称4',
'fav': 0,
'show': 9911,
'weak': 8976,
'signin': 8807,
'click': 8050,
'active': 7668,
'day7': 1547,
'day30': 2357,
'tomorrow': 7278,
'day': 5309,
'week': 1655,
'month': 9043
},
{
'name': '推广名称5',
'fav': 0,
'show': 934,
'weak': 1394,
'signin': 6463,
'click': 5278,
'active': 9256,
'day7': 209,
'day30': 3563,
'tomorrow': 8285,
'day': 1230,
'week': 4840,
'month': 9908
},
{
'name': '推广名称6',
'fav': 0,
'show': 6856,
'weak': 1608,
'signin': 457,
'click': 4949,
'active': 2909,
'day7': 4525,
'day30': 6171,
'tomorrow': 1920,
'day': 1966,
'week': 904,
'month': 6851
},
{
'name': '推广名称7',
'fav': 0,
'show': 5107,
'weak': 6407,
'signin': 4166,
'click': 7970,
'active': 1002,
'day7': 8701,
'day30': 9040,
'tomorrow': 7632,
'day': 4061,
'week': 4359,
'month': 3676
},
{
'name': '推广名称8',
'fav': 0,
'show': 5107,
'weak': 6407,
'signin': 4166,
'click': 7970,
'active': 1002,
'day7': 8701,
'day30': 9040,
'tomorrow': 7632,
'day': 4061,
'week': 4359,
'month': 3676
},
{
'name': '推广名称9',
'fav': 0,
'show': 5107,
'weak': 6407,
'signin': 4166,
'click': 7970,
'active': 1002,
'day7': 8701,
'day30': 9040,
'tomorrow': 7632,
'day': 4061,
'week': 4359,
'month': 3676
},
{
'name': '推广名称10',
'fav': 0,
'show': 5107,
'weak': 6407,
'signin': 4166,
'click': 7970,
'active': 1002,
'day7': 8701,
'day30': 9040,
'tomorrow': 7632,
'day': 4061,
'week': 4359,
'month': 3676
}
];
export const excelColumns = [
{
'title': '名称',
'key': 'name'
},
{
'title': '展示',
'key': 'show',
'sortable': true,
filters: [
{
label: '大于4000',
value: 1
},
{
label: '小于4000',
value: 2
}
],
filterMultiple: false,
filterMethod (value, row) {
if (value === 1) {
return row.show > 4000;
} else if (value === 2) {
return row.show < 4000;
}
}
},
{
'title': '唤醒',
'key': 'weak',
'sortable': true
},
{
'title': '登录',
'key': 'signin',
'sortable': true
},
{
'title': '点击',
'key': 'click',
'sortable': true
},
{
'title': '激活',
'key': 'active',
'sortable': true
},
{
'title': '30日留存',
'key': 'day30',
'sortable': true
},
{
'title': '月活跃',
'key': 'month',
'sortable': true
}
];
| {
"pile_set_name": "Github"
} |
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
namespace Toolbox.Library
{
public interface ITexture
{
uint MipCount { get; set; }
uint ArrayCount { get; set; }
uint Width { get; set; }
uint Height { get; set; }
}
}
| {
"pile_set_name": "Github"
} |
function _typeof2(obj) { if (typeof Symbol === "function" && typeof Symbol.iterator === "symbol") { _typeof2 = function _typeof2(obj) { return typeof obj; }; } else { _typeof2 = function _typeof2(obj) { return obj && typeof Symbol === "function" && obj.constructor === Symbol && obj !== Symbol.prototype ? "symbol" : typeof obj; }; } return _typeof2(obj); }
function _typeof(obj) {
if (typeof Symbol === "function" && _typeof2(Symbol.iterator) === "symbol") {
module.exports = _typeof = function _typeof(obj) {
return _typeof2(obj);
};
} else {
module.exports = _typeof = function _typeof(obj) {
return obj && typeof Symbol === "function" && obj.constructor === Symbol && obj !== Symbol.prototype ? "symbol" : _typeof2(obj);
};
}
return _typeof(obj);
}
module.exports = _typeof; | {
"pile_set_name": "Github"
} |
Accept-Ranges: bytes
Age: 281
Cache-Control: public, max-age=3600
Connection: keep-alive
Content-Language: en
Content-Length: 73757
Content-Type: text/html; charset=utf-8
Date: Thu, 05 Oct 2017 13:21:38 GMT
Etag: W/"1507209415-1"
Expires: Sun, 19 Nov 1978 05:00:00 GMT
Last-Modified: Thu, 05 Oct 2017 13:16:55 GMT
Link: <https://blog.pinterest.com/en>; rel="canonical",<https://blog.pinterest.com/en>; rel="shortlink"
Server: nginx
Set-Cookie: _ss_country_code=FR; Secure; Path=/
Strict-Transport-Security: max-age=31536000; includeSubDomains
Vary: Cookie,Accept-Encoding
Via: 1.1 varnish-v4 1.1 varnish
X-Cache: HIT, HIT
X-Cache-Hits: 11, 1
X-Content-Type-Options: nosniff
X-Drupal-Cache: MISS
X-Frame-Options: SameOrigin
X-Generator: Drupal 7 (http://drupal.org)
X-Request-Id: 5b78f275e3d8da06bef72b2387bcd630e29fc05e3a9b59283a899bcb753c2330
X-Served-By: cache-cdg8721-CDG
X-Timer: S1507209698.087227,VS0,VE1
| {
"pile_set_name": "Github"
} |
<template>
<div id="app">
<header>
<router-link to="/">Public page</router-link> |
<router-link to="/admin">Admin page</router-link>
</header>
<section style="display: flex; padding: 10px">
<button @click="$acl.change(['read', 'write'])">Turn admin</button>
<button @click="$acl.change('read')">Turn public</button>
</section>
<p style="padding: 10px">Current permission: {{ $acl.get }}</p>
<hr>
<p><small>Page content:</small></p>
<router-view/>
</div>
</template>
<script>
export default {
name: 'App'
}
</script>
| {
"pile_set_name": "Github"
} |
package types
import (
"bytes"
"encoding/hex"
"fmt"
"io"
"github.com/bytom/bytom/encoding/blockchain"
"github.com/bytom/bytom/errors"
"github.com/bytom/bytom/protocol/bc"
)
const serRequired = 0x7 // Bit mask accepted serialization flag.
// Tx holds a transaction along with its hash.
type Tx struct {
TxData
*bc.Tx `json:"-"`
}
// NewTx returns a new Tx containing data and its hash. If you have already
// computed the hash, use struct literal notation to make a Tx object directly.
func NewTx(data TxData) *Tx {
return &Tx{
TxData: data,
Tx: MapTx(&data),
}
}
// OutputID return the hash of the output position
func (tx *Tx) OutputID(outputIndex int) *bc.Hash {
return tx.ResultIds[outputIndex]
}
// UnmarshalText fulfills the encoding.TextUnmarshaler interface.
func (tx *Tx) UnmarshalText(p []byte) error {
if err := tx.TxData.UnmarshalText(p); err != nil {
return err
}
tx.Tx = MapTx(&tx.TxData)
return nil
}
// SetInputArguments sets the Arguments field in input n.
func (tx *Tx) SetInputArguments(n uint32, args [][]byte) {
tx.Inputs[n].SetArguments(args)
id := tx.Tx.InputIDs[n]
e := tx.Entries[id]
switch e := e.(type) {
case *bc.Issuance:
e.WitnessArguments = args
case *bc.Spend:
e.WitnessArguments = args
}
}
// TxData encodes a transaction in the blockchain.
type TxData struct {
Version uint64
SerializedSize uint64
TimeRange uint64
Inputs []*TxInput
Outputs []*TxOutput
}
// MarshalText fulfills the json.Marshaler interface.
func (tx *TxData) MarshalText() ([]byte, error) {
var buf bytes.Buffer
if _, err := tx.WriteTo(&buf); err != nil {
return nil, err
}
b := make([]byte, hex.EncodedLen(buf.Len()))
hex.Encode(b, buf.Bytes())
return b, nil
}
// UnmarshalText fulfills the encoding.TextUnmarshaler interface.
func (tx *TxData) UnmarshalText(p []byte) error {
b := make([]byte, hex.DecodedLen(len(p)))
if _, err := hex.Decode(b, p); err != nil {
return err
}
r := blockchain.NewReader(b)
if err := tx.readFrom(r); err != nil {
return err
}
if trailing := r.Len(); trailing > 0 {
return fmt.Errorf("trailing garbage (%d bytes)", trailing)
}
return nil
}
func (tx *TxData) readFrom(r *blockchain.Reader) (err error) {
startSerializedSize := r.Len()
var serflags [1]byte
if _, err = io.ReadFull(r, serflags[:]); err != nil {
return errors.Wrap(err, "reading serialization flags")
}
if serflags[0] != serRequired {
return fmt.Errorf("unsupported serflags %#x", serflags[0])
}
if tx.Version, err = blockchain.ReadVarint63(r); err != nil {
return errors.Wrap(err, "reading transaction version")
}
if tx.TimeRange, err = blockchain.ReadVarint63(r); err != nil {
return err
}
n, err := blockchain.ReadVarint31(r)
if err != nil {
return errors.Wrap(err, "reading number of transaction inputs")
}
for ; n > 0; n-- {
ti := new(TxInput)
if err = ti.readFrom(r); err != nil {
return errors.Wrapf(err, "reading input %d", len(tx.Inputs))
}
tx.Inputs = append(tx.Inputs, ti)
}
n, err = blockchain.ReadVarint31(r)
if err != nil {
return errors.Wrap(err, "reading number of transaction outputs")
}
for ; n > 0; n-- {
to := new(TxOutput)
if err = to.readFrom(r); err != nil {
return errors.Wrapf(err, "reading output %d", len(tx.Outputs))
}
tx.Outputs = append(tx.Outputs, to)
}
tx.SerializedSize = uint64(startSerializedSize - r.Len())
return nil
}
// WriteTo writes tx to w.
func (tx *TxData) WriteTo(w io.Writer) (int64, error) {
ew := errors.NewWriter(w)
if err := tx.writeTo(ew, serRequired); err != nil {
return 0, err
}
return ew.Written(), ew.Err()
}
func (tx *TxData) writeTo(w io.Writer, serflags byte) error {
if _, err := w.Write([]byte{serflags}); err != nil {
return errors.Wrap(err, "writing serialization flags")
}
if _, err := blockchain.WriteVarint63(w, tx.Version); err != nil {
return errors.Wrap(err, "writing transaction version")
}
if _, err := blockchain.WriteVarint63(w, tx.TimeRange); err != nil {
return errors.Wrap(err, "writing transaction maxtime")
}
if _, err := blockchain.WriteVarint31(w, uint64(len(tx.Inputs))); err != nil {
return errors.Wrap(err, "writing tx input count")
}
for i, ti := range tx.Inputs {
if err := ti.writeTo(w); err != nil {
return errors.Wrapf(err, "writing tx input %d", i)
}
}
if _, err := blockchain.WriteVarint31(w, uint64(len(tx.Outputs))); err != nil {
return errors.Wrap(err, "writing tx output count")
}
for i, to := range tx.Outputs {
if err := to.writeTo(w); err != nil {
return errors.Wrapf(err, "writing tx output %d", i)
}
}
return nil
}
| {
"pile_set_name": "Github"
} |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.axis2.wsdl.util;
import javax.xml.namespace.QName;
import java.util.ArrayList;
import java.util.List;
/**
* This class acts as the holder for the information that is required to be attached to the
* AxisMessage during unwrapping
*/
public class MessagePartInformationHolder {
private QName operationName;
private List partsList = new ArrayList();
public QName getOperationName() {
return operationName;
}
public void setOperationName(QName operationName) {
this.operationName = operationName;
}
public List getPartsList() {
return partsList;
}
public void setPartsList(List partsList) {
this.partsList = partsList;
}
}
| {
"pile_set_name": "Github"
} |
export { default } from './tooltip.component';
| {
"pile_set_name": "Github"
} |
package org.springframework.webflow.samples.booking;
import javax.validation.ConstraintValidator;
import javax.validation.ConstraintValidatorContext;
public class BookingDateRangeValidator implements ConstraintValidator<BookingDateRange, Booking> {
public void initialize(BookingDateRange bookingDateRange) {
}
public boolean isValid(Booking booking, ConstraintValidatorContext context) {
if ((booking.getCheckinDate() != null) && (booking.getCheckoutDate() != null)
&& booking.getCheckoutDate().before(booking.getCheckinDate())) {
return false;
}
return true;
}
}
| {
"pile_set_name": "Github"
} |
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package net
import (
"context"
"io"
"os"
)
func (c *TCPConn) readFrom(r io.Reader) (int64, error) {
return genericReadFrom(c, r)
}
func dialTCP(ctx context.Context, net string, laddr, raddr *TCPAddr) (*TCPConn, error) {
if testHookDialTCP != nil {
return testHookDialTCP(ctx, net, laddr, raddr)
}
return doDialTCP(ctx, net, laddr, raddr)
}
func doDialTCP(ctx context.Context, net string, laddr, raddr *TCPAddr) (*TCPConn, error) {
switch net {
case "tcp", "tcp4", "tcp6":
default:
return nil, UnknownNetworkError(net)
}
if raddr == nil {
return nil, errMissingAddress
}
fd, err := dialPlan9(ctx, net, laddr, raddr)
if err != nil {
return nil, err
}
return newTCPConn(fd), nil
}
func (ln *TCPListener) ok() bool { return ln != nil && ln.fd != nil && ln.fd.ctl != nil }
func (ln *TCPListener) accept() (*TCPConn, error) {
fd, err := ln.fd.acceptPlan9()
if err != nil {
return nil, err
}
return newTCPConn(fd), nil
}
func (ln *TCPListener) close() error {
if err := ln.fd.pfd.Close(); err != nil {
return err
}
if _, err := ln.fd.ctl.WriteString("hangup"); err != nil {
ln.fd.ctl.Close()
return err
}
if err := ln.fd.ctl.Close(); err != nil {
return err
}
return nil
}
func (ln *TCPListener) file() (*os.File, error) {
f, err := ln.dup()
if err != nil {
return nil, err
}
return f, nil
}
func listenTCP(ctx context.Context, network string, laddr *TCPAddr) (*TCPListener, error) {
fd, err := listenPlan9(ctx, network, laddr)
if err != nil {
return nil, err
}
return &TCPListener{fd}, nil
}
| {
"pile_set_name": "Github"
} |
<?php
namespace console\controllers;
use yii\console\Controller;
use eagle\modules\listing\models\OdOrder2;
use eagle\modules\listing\models\OdOrderItem2;
use eagle\modules\order\models\OdOrder;
use eagle\modules\order\models\OdOrderItem;
use eagle\modules\listing\models\OdOrderShipped2;
use eagle\modules\order\models\OdOrderShipped;
use eagle\models\UserBase;
use eagle\modules\platform\apihelpers\AliexpressAccountsApiHelper;
use eagle\models\AliexpressOrder;
use common\api\aliexpressinterface\AliexpressInterface_Helper;
use eagle\modules\listing\models\EbayItem;
use eagle\modules\listing\helpers\EbayitemHelper;
/**
* 后台处理某些逻辑的脚本
*/
class ScriptController extends Controller {
/**
* eagle1的订单库数据转移到eagle2中,包括表格od_order,od_order_item,od_order_shipped
* @author fanjs
*/
public function actionOrder1data1to2(){
set_time_limit(0);
$maxuserid=UserBase::find()->where('puid = 0 and uid >0 and uid <=240')->select('uid')->asArray()->all();
foreach ($maxuserid as $k=>$v){
$i=$v['uid'];
echo 'start connect subdb:--->'.$i."\n";
try {
//开始处理订单od_order
echo 'start deal od_order for user:'.$i."\n";
$db = OdOrder2::getDb();
$q = $db->createCommand('select * from '.OdOrder2::tableName().' order by order_id ASC')->query();
while ($row=$q->read()){
if ($row['order_status']>200){
$row['order_status']=200;
}
unset($row['order_manual_id']);
unset($row['default_shipping_method_code']);
$row['default_warehouse_id ']=0;
unset($row['default_carrier_code']);
$row['is_manual_order']='0';
$neworder = new OdOrder();
// $neworder->findOne(['order_id'=>$row['order_id']]);
$neworder->setAttributes($row,false);
if($neworder->save(false)){
echo 'od_order:'.$row['order_id']."save success!\n";
}else{
echo 'od_order:'.$row['order_id']."save failure!\n";
continue;
}
//订单如果处理完成,开始处理订单商品
$items=OdOrderItem2::find()->where(['order_id'=>$row['order_id']])->asArray()->all();
if (count($items)){
$failure=0;//记录失败存储的记录数量,》0 则表示部分数据保存失败
foreach ($items as $i){
$item=new OdOrderItem();
// $item->findOne(['order_id'=>$i['order_id'],'order_item_id'=>$i['order_item_id']]);
$item->setAttributes($i,false);
if ($item->save(false)){
echo 'od_order_item for OrderID:'.$row['order_id']."save success!\n";
}else{
$failure++;
echo 'od_order_item for OrderID:'.$row['order_id']."save failure!\n";
}
}
if ($failure>0){
OdOrder::deleteAll(['order_id'=>$row['order_id']]);
OdOrderItem::deleteAll(['order_id'=>$row['order_id']]);
continue;
}
}
//开始处理订单的发货数据
$shipped=OdOrderShipped2::find()->where(['order_id'=>$row['order_id']])->asArray()->all();
if (count($shipped)){
foreach ($shipped as $s){
$ship= new OdOrderShipped();
// $ship->findOne(['order_id'=>$i['order_id'],'id'=>$s['id']]);
$ship->setAttributes($s,false);
if ($ship->save(false)){
echo 'od_order_shipped for OrderID:'.$row['order_id']."save success!\n";
}else{
echo 'od_order_shipped for OrderID:'.$row['order_id']."save failure!\n";
}
}
}
}
}catch(\Exception $e){
echo $e->getMessage()."\n";
}
}
}
public function actionOrder2data1to2(){
set_time_limit(0);
// $maxuserid=UserBase::find()->where('puid = 0 and uid >240 and uid <=480')->max('uid');
$maxuserid=UserBase::find()->where('puid = 0 and uid >240 and uid <=480')->select('uid')->asArray()->all();
foreach ($maxuserid as $k=>$v){
$i=$v['uid'];
echo 'start connect subdb:--->'.$i."\n";
try {
//开始处理订单od_order
echo 'start deal od_order for user:'.$i."\n";
$db = OdOrder2::getDb();
$q = $db->createCommand('select * from '.OdOrder2::tableName().' order by order_id ASC')->query();
while ($row=$q->read()){
if ($row['order_status']>200){
$row['order_status']=200;
}
unset($row['order_manual_id']);
unset($row['default_shipping_method_code']);
$row['default_warehouse_id ']=0;
unset($row['default_carrier_code']);
$row['is_manual_order']='0';
$neworder = new OdOrder();
// $neworder->findOne(['order_id'=>$row['order_id']]);
$neworder->setAttributes($row,false);
if($neworder->save(false)){
echo 'od_order:'.$row['order_id']."save success!\n";
}else{
echo 'od_order:'.$row['order_id']."save failure!\n";
continue;
}
//订单如果处理完成,开始处理订单商品
$items=OdOrderItem2::find()->where(['order_id'=>$row['order_id']])->asArray()->all();
if (count($items)){
$failure=0;//记录失败存储的记录数量,》0 则表示部分数据保存失败
foreach ($items as $i){
$item=new OdOrderItem();
// $item->findOne(['order_id'=>$i['order_id'],'order_item_id'=>$i['order_item_id']]);
$item->setAttributes($i,false);
if ($item->save(false)){
echo 'od_order_item for OrderID:'.$row['order_id']."save success!\n";
}else{
$failure++;
echo 'od_order_item for OrderID:'.$row['order_id']."save failure!\n";
}
}
if ($failure>0){
OdOrder::deleteAll(['order_id'=>$row['order_id']]);
OdOrderItem::deleteAll(['order_id'=>$row['order_id']]);
continue;
}
}
//开始处理订单的发货数据
$shipped=OdOrderShipped2::find()->where(['order_id'=>$row['order_id']])->asArray()->all();
if (count($shipped)){
foreach ($shipped as $s){
$ship= new OdOrderShipped();
// $ship->findOne(['order_id'=>$i['order_id'],'id'=>$s['id']]);
$ship->setAttributes($s,false);
if ($ship->save(false)){
echo 'od_order_shipped for OrderID:'.$row['order_id']."save success!\n";
}else{
echo 'od_order_shipped for OrderID:'.$row['order_id']."save failure!\n";
}
}
}
}
}catch(\Exception $e){
echo $e->getMessage()."\n";
}
}
}
public function actionOrder3data1to2(){
set_time_limit(0);
// $maxuserid=UserBase::find()->where('puid = 0 and uid >480 and uid <=720')->max('uid');
$maxuserid=UserBase::find()->where('puid = 0 and uid >480 and uid <=720')->select('uid')->asArray()->all();
foreach ($maxuserid as $k=>$v){
$i=$v['uid'];
echo 'start connect subdb:--->'.$i."\n";
try {
//开始处理订单od_order
echo 'start deal od_order for user:'.$i."\n";
$db = OdOrder2::getDb();
$q = $db->createCommand('select * from '.OdOrder2::tableName().' order by order_id ASC')->query();
while ($row=$q->read()){
if ($row['order_status']>200){
$row['order_status']=200;
}
unset($row['order_manual_id']);
unset($row['default_shipping_method_code']);
$row['default_warehouse_id ']=0;
unset($row['default_carrier_code']);
$row['is_manual_order']='0';
$neworder = new OdOrder();
// $neworder->findOne(['order_id'=>$row['order_id']]);
$neworder->setAttributes($row,false);
if($neworder->save(false)){
echo 'od_order:'.$row['order_id']."save success!\n";
}else{
echo 'od_order:'.$row['order_id']."save failure!\n";
continue;
}
//订单如果处理完成,开始处理订单商品
$items=OdOrderItem2::find()->where(['order_id'=>$row['order_id']])->asArray()->all();
if (count($items)){
$failure=0;//记录失败存储的记录数量,》0 则表示部分数据保存失败
foreach ($items as $i){
$item=new OdOrderItem();
// $item->findOne(['order_id'=>$i['order_id'],'order_item_id'=>$i['order_item_id']]);
$item->setAttributes($i,false);
if ($item->save(false)){
echo 'od_order_item for OrderID:'.$row['order_id']."save success!\n";
}else{
$failure++;
echo 'od_order_item for OrderID:'.$row['order_id']."save failure!\n";
}
}
if ($failure>0){
OdOrder::deleteAll(['order_id'=>$row['order_id']]);
OdOrderItem::deleteAll(['order_id'=>$row['order_id']]);
continue;
}
}
//开始处理订单的发货数据
$shipped=OdOrderShipped2::find()->where(['order_id'=>$row['order_id']])->asArray()->all();
if (count($shipped)){
foreach ($shipped as $s){
$ship= new OdOrderShipped();
// $ship->findOne(['order_id'=>$i['order_id'],'id'=>$s['id']]);
$ship->setAttributes($s,false);
if ($ship->save(false)){
echo 'od_order_shipped for OrderID:'.$row['order_id']."save success!\n";
}else{
echo 'od_order_shipped for OrderID:'.$row['order_id']."save failure!\n";
}
}
}
}
}catch(\Exception $e){
echo $e->getMessage()."\n";
}
}
}
public function actionOrder4data1to2(){
set_time_limit(0);
$maxuserid=UserBase::find()->where('puid = 0 and uid >720 and uid <=960')->select('uid')->asArray()->all();
foreach ($maxuserid as $k=>$v){
$i=$v['uid'];
echo 'start connect subdb:--->'.$i."\n";
try {
//开始处理订单od_order
echo 'start deal od_order for user:'.$i."\n";
$db = OdOrder2::getDb();
$q = $db->createCommand('select * from '.OdOrder2::tableName().' order by order_id ASC')->query();
while ($row=$q->read()){
if ($row['order_status']>200){
$row['order_status']=200;
}
unset($row['order_manual_id']);
unset($row['default_shipping_method_code']);
$row['default_warehouse_id ']=0;
unset($row['default_carrier_code']);
$row['is_manual_order']='0';
$neworder = new OdOrder();
// $neworder->findOne(['order_id'=>$row['order_id']]);
$neworder->setAttributes($row,false);
if($neworder->save(false)){
echo 'od_order:'.$row['order_id']."save success!\n";
}else{
echo 'od_order:'.$row['order_id']."save failure!\n";
continue;
}
//订单如果处理完成,开始处理订单商品
$items=OdOrderItem2::find()->where(['order_id'=>$row['order_id']])->asArray()->all();
if (count($items)){
$failure=0;//记录失败存储的记录数量,》0 则表示部分数据保存失败
foreach ($items as $i){
$item=new OdOrderItem();
// $item->findOne(['order_id'=>$i['order_id'],'order_item_id'=>$i['order_item_id']]);
$item->setAttributes($i,false);
if ($item->save(false)){
echo 'od_order_item for OrderID:'.$row['order_id']."save success!\n";
}else{
$failure++;
echo 'od_order_item for OrderID:'.$row['order_id']."save failure!\n";
}
}
if ($failure>0){
OdOrder::deleteAll(['order_id'=>$row['order_id']]);
OdOrderItem::deleteAll(['order_id'=>$row['order_id']]);
continue;
}
}
//开始处理订单的发货数据
$shipped=OdOrderShipped2::find()->where(['order_id'=>$row['order_id']])->asArray()->all();
if (count($shipped)){
foreach ($shipped as $s){
$ship= new OdOrderShipped();
// $ship->findOne(['order_id'=>$i['order_id'],'id'=>$s['id']]);
$ship->setAttributes($s,false);
if ($ship->save(false)){
echo 'od_order_shipped for OrderID:'.$row['order_id']."save success!\n";
}else{
echo 'od_order_shipped for OrderID:'.$row['order_id']."save failure!\n";
}
}
}
}
}catch(\Exception $e){
echo $e->getMessage()."\n";
}
}
}
public function actionOrder5data1to2(){
set_time_limit(0);
$maxuserid=UserBase::find()->where('puid = 0 and uid >960 and uid <=1200')->select('uid')->asArray()->all();
foreach ($maxuserid as $k=>$v){
$i=$v['uid'];
echo 'start connect subdb:--->'.$i."\n";
try {
//开始处理订单od_order
echo 'start deal od_order for user:'.$i."\n";
$db = OdOrder2::getDb();
$q = $db->createCommand('select * from '.OdOrder2::tableName().' order by order_id ASC')->query();
while ($row=$q->read()){
if ($row['order_status']>200){
$row['order_status']=200;
}
unset($row['order_manual_id']);
unset($row['default_shipping_method_code']);
$row['default_warehouse_id ']=0;
unset($row['default_carrier_code']);
$row['is_manual_order']='0';
$neworder = new OdOrder();
// $neworder->findOne(['order_id'=>$row['order_id']]);
$neworder->setAttributes($row,false);
if($neworder->save(false)){
echo 'od_order:'.$row['order_id']."save success!\n";
}else{
echo 'od_order:'.$row['order_id']."save failure!\n";
continue;
}
//订单如果处理完成,开始处理订单商品
$items=OdOrderItem2::find()->where(['order_id'=>$row['order_id']])->asArray()->all();
if (count($items)){
$failure=0;//记录失败存储的记录数量,》0 则表示部分数据保存失败
foreach ($items as $i){
$item=new OdOrderItem();
// $item->findOne(['order_id'=>$i['order_id'],'order_item_id'=>$i['order_item_id']]);
$item->setAttributes($i,false);
if ($item->save(false)){
echo 'od_order_item for OrderID:'.$row['order_id']."save success!\n";
}else{
$failure++;
echo 'od_order_item for OrderID:'.$row['order_id']."save failure!\n";
}
}
if ($failure>0){
OdOrder::deleteAll(['order_id'=>$row['order_id']]);
OdOrderItem::deleteAll(['order_id'=>$row['order_id']]);
continue;
}
}
//开始处理订单的发货数据
$shipped=OdOrderShipped2::find()->where(['order_id'=>$row['order_id']])->asArray()->all();
if (count($shipped)){
foreach ($shipped as $s){
$ship= new OdOrderShipped();
// $ship->findOne(['order_id'=>$i['order_id'],'id'=>$s['id']]);
$ship->setAttributes($s,false);
if ($ship->save(false)){
echo 'od_order_shipped for OrderID:'.$row['order_id']."save success!\n";
}else{
echo 'od_order_shipped for OrderID:'.$row['order_id']."save failure!\n";
}
}
}
}
}catch(\Exception $e){
echo $e->getMessage()."\n";
}
}
}
/**
* 将各个子库的订单的历史订单批量检测状态
* @author fanjs
*/
public function actionDocheck(){
set_time_limit(0);
$maxuserid=UserBase::find()->where('puid = 0 and uid >0 and uid <=1300')->select('uid')->asArray()->all();
foreach ($maxuserid as $k=>$v){
$i=$v['uid'];
echo 'start connect subdb:--->'.$i."\n";
try {
//开始处理订单od_order
echo 'start deal od_order for user:'.$i."\n";
$db = OdOrder2::getDb();
$q = $db->createCommand('select * from '.OdOrder::tableName().' where exception_status=0 and create_time <1437973200 order by order_id ASC')->query();
while ($row=$q->read()){
$order=OdOrder::findOne($row['order_id']);
switch ($row['order_source']){
case 'ebay':
if (200<=$row['order_status'] && $row['order_status']<=300){
//将有发货时间的,或者没有发货时间但是付款时间是20天以前的订单归为已完成
if ($order->delivery_time>0||($order->delivery_time==0&&$order->paid_time<=(time()-20*24*3600))){
$order->order_status=OdOrder::STATUS_SHIPPED;
}
}
break;
case 'aliexpress':
$aliOrder = AliexpressOrder::find()->where(['id'=>$row['order_source_order_id']])->select(['orderstatus','gmtpaysuccess','gmtcreate'])->asArray()->One();
if (in_array($aliOrder['orderstatus'], array('PLACE_ORDER_SUCCESS'))){//未付款
$order_status = 100;
}elseif (in_array($aliOrder['orderstatus'], array('WAIT_SELLER_SEND_GOODS','RISK_CONTROL'))){//已付款
$order_status = 200;
}elseif (in_array($aliOrder['orderstatus'], array('IN_CANCEL'))){//申请取消
$order_status = 600;
}elseif (in_array($aliOrder['orderstatus'], array('SELLER_PART_SEND_GOODS','WAIT_BUYER_ACCEPT_GOODS','FUND_PROCESSING','WAIT_SELLER_EXAMINE_MONEY'))){
$order_status = 400;
}elseif (in_array($aliOrder['orderstatus'], array('FINISH'))){
$order_status = 500;
}elseif (in_array($aliOrder['orderstatus'], array('IN_ISSUE','IN_FROZEN'))){//需要挂起的订单
//根据是否有付款时间判断是否曾经付过款
if (strlen($aliOrder['gmtpaysuccess'])>10){
$order_status = 200;
}else{
$order_status = 100;
}
}
$order->order_status=$order_status;
$order->order_source_status=$aliOrder['orderstatus'];
$order->order_source_create_time=AliexpressInterface_Helper::transLaStrTimetoTimestamp($aliOrder['gmtcreate']);
$order->paid_time=AliexpressInterface_Helper::transLaStrTimetoTimestamp($aliOrder['gmtpaysuccess']);
echo $row['order_source_order_id']."\n";
print_r($aliOrder);
break;
default:
break;
}
$order->save(false);
}
}catch(\Exception $e){
echo $e->getMessage()."\n";
}
}
}
/**
* ebay的item多属性拆分到映射表
* @author fanjs
*/
public function actionVariationmajor(){
$maxuserid=UserBase::find()->where('puid = 0 and uid >0')->select('uid')->asArray()->all();
foreach ($maxuserid as $k=>$v){
$i=$v['uid'];
echo 'start connect subdb:--->'.$i."\n";
try {
//开始处理ebayitem
echo 'start deal ebayitem for user:'.$i."\n";
$db = EbayItem::getDb();
$q = $db->createCommand('select * from '.EbayItem::tableName().' where listingstatus = "Active" and isvariation = 1')->query();
$count = 0;
while ($row=$q->read()){
$itemid=$row['itemid'];
if (strlen($itemid)==0){
continue;
}
echo '正在处理数据'.$itemid."\n";
$ei = EbayItem::findOne(['itemid'=>$itemid]);
// $ei->save();
// $ei->detail->save();
$detail = $ei->detail;
EbayitemHelper::SaveVariation($itemid, $detail->variation);
$count++;
}
echo '总共处理'.$count.'条数据';
}catch(\Exception $e){
echo $e->getMessage()."\n";
}
}
}
} | {
"pile_set_name": "Github"
} |
open OUnit2
open Expr
type result =
| OK of expr
| Fail
let record label_expr_list record = RecordExtend(label_map_from_list label_expr_list, record)
let test_cases = [
("", Fail);
("a", OK (Var "a"));
("f(x, y)", OK (Call(Var "f", [Var "x"; Var "y"])));
("f(x)(y)", OK (Call(Call(Var "f", [Var "x"]), [Var "y"])));
("let f = fun x y -> g(x, y) in f(a, b)",
OK (Let("f", Fun(["x"; "y"], Call(Var "g", [Var "x"; Var "y"])),
Call(Var "f", [Var "a"; Var "b"]))));
("let x = a in " ^
"let y = b in " ^
"f(x, y)", OK (Let("x", Var "a", Let("y", Var "b", Call(Var "f", [Var "x"; Var "y"])))));
("f x", Fail);
("let a = one", Fail);
("a, b", Fail);
("a = b", Fail);
("()", Fail);
("fun x, y -> y", Fail);
(* records *)
("{}", OK RecordEmpty);
("{ }", OK RecordEmpty);
("{", Fail);
("a.x", OK (RecordSelect(Var "a", "x")));
("{m - a}", OK (RecordRestrict(Var "m", "a")));
("{m - a", Fail);
("m - a", Fail);
("{a = x}", OK (record [("a", [Var "x"])] RecordEmpty));
("{a = x", Fail);
("{a=x, b = y}", OK (record [("a", [Var "x"]); ("b", [Var "y"])] RecordEmpty));
("{b = y ,a=x}", OK (record [("a", [Var "x"]); ("b", [Var "y"])] RecordEmpty));
("{a=x,h=w,d=y,b=q,g=z,c=t,e=s,f=r}",
OK (record [("a", [Var "x"]); ("b", [Var "q"]); ("c", [Var "t"]); ("d", [Var "y"]);
("e", [Var "s"]); ("f", [Var "r"]); ("g", [Var "z"]); ("h", [Var "w"])] RecordEmpty));
("{a = x|m}", OK (record [("a", [Var "x"])] (Var "m")));
("{a | m}", Fail);
("{ a = x, b = y | m}", OK (record [("a", [Var "x"]); ("b", [Var "y"])] (Var "m")));
("{ a = x, b = y | {m - a} }",
OK (record [("a", [Var "x"]); ("b", [Var "y"])] (RecordRestrict(Var "m", "a"))));
("{ b = y | m - a }", Fail);
("let x = {a = f(x), b = y.b} in { a = fun z -> z | {x - a} }",
OK (Let("x", record [("a", [Call(Var "f", [Var "x"])]); ("b", [RecordSelect(Var "y", "b")])] RecordEmpty, record [("a", [Fun(["z"], Var "z")])]
(RecordRestrict (Var "x", "a")))));
]
let string_of_result = function
| Fail -> "Fail"
| OK expr -> "OK (" ^ string_of_expr expr ^ ")"
let rec cmp_expr expr1 expr2 = match (expr1, expr2) with
| Var name1, Var name2 -> name1 = name2
| Call(fn1, args1), Call(fn2, args2) ->
cmp_expr fn1 fn2 && List.for_all2 cmp_expr args1 args2
| Fun(params1, body1), Fun(params2, body2) ->
params1 = params2 && cmp_expr body1 body2
| Let(name1, expr1, body1), Let(name2, expr2, body2) ->
name1 = name2 && cmp_expr expr1 expr2 && cmp_expr body1 body2
| RecordSelect(r1, label1), RecordSelect(r2, label2) ->
label1 = label2 && cmp_expr r1 r2
| RecordExtend(label_expr_map1, r1), RecordExtend(label_expr_map2, r2) ->
LabelMap.equal (List.for_all2 cmp_expr) label_expr_map1 label_expr_map2 && cmp_expr r1 r2
| RecordRestrict(r1, label1), RecordRestrict(r2, label2) ->
label1 = label2 && cmp_expr r1 r2
| RecordEmpty, RecordEmpty -> true
| _, _ -> false
let cmp_result result1 result2 = match (result1, result2) with
| Fail, Fail -> true
| OK expr1, OK expr2 -> cmp_expr expr1 expr2
| _ -> false
let make_single_test_case (code, expected_result) =
String.escaped code >:: fun _ ->
let result =
try
OK (Parser.expr_eof Lexer.token (Lexing.from_string code))
with Parsing.Parse_error ->
Fail
in
assert_equal ~printer:string_of_result ~cmp:cmp_result expected_result result
let suite =
"test_parser" >::: List.map make_single_test_case test_cases
| {
"pile_set_name": "Github"
} |
# Tagalog translation for transmission
# Copyright (c) 2010 Rosetta Contributors and Canonical Ltd 2010
# This file is distributed under the same license as the transmission package.
# FIRST AUTHOR <EMAIL@ADDRESS>, 2010.
#
msgid ""
msgstr ""
"Project-Id-Version: transmission\n"
"Report-Msgid-Bugs-To: FULL NAME <EMAIL@ADDRESS>\n"
"POT-Creation-Date: 2012-02-03 15:22-0600\n"
"PO-Revision-Date: 2010-09-19 14:43+0000\n"
"Last-Translator: Launchpad Translations Administrators <Unknown>\n"
"Language-Team: Tagalog <[email protected]>\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"Plural-Forms: nplurals=2; plural=n > 1;\n"
"X-Launchpad-Export-Date: 2013-06-26 02:13+0000\n"
"X-Generator: Launchpad (build 16681)\n"
#: ../gtk/actions.c:45
msgid "Sort by _Activity"
msgstr ""
#: ../gtk/actions.c:46
msgid "Sort by _Name"
msgstr ""
#: ../gtk/actions.c:47
msgid "Sort by _Progress"
msgstr ""
#: ../gtk/actions.c:48
msgid "Sort by _Queue"
msgstr ""
#: ../gtk/actions.c:49
msgid "Sort by Rati_o"
msgstr ""
#: ../gtk/actions.c:50
msgid "Sort by Stat_e"
msgstr ""
#: ../gtk/actions.c:51
msgid "Sort by A_ge"
msgstr ""
#: ../gtk/actions.c:52
msgid "Sort by Time _Left"
msgstr ""
#: ../gtk/actions.c:53
msgid "Sort by Si_ze"
msgstr ""
#: ../gtk/actions.c:70
msgid "_Show Transmission"
msgstr ""
#: ../gtk/actions.c:71
msgid "Message _Log"
msgstr ""
#: ../gtk/actions.c:86
msgid "Enable Alternative Speed _Limits"
msgstr ""
#: ../gtk/actions.c:87
msgid "_Compact View"
msgstr ""
#: ../gtk/actions.c:88
msgid "Re_verse Sort Order"
msgstr ""
#: ../gtk/actions.c:89
msgid "_Filterbar"
msgstr ""
#: ../gtk/actions.c:90
msgid "_Statusbar"
msgstr ""
#: ../gtk/actions.c:91
msgid "_Toolbar"
msgstr ""
#: ../gtk/actions.c:96
msgid "_File"
msgstr ""
#: ../gtk/actions.c:97
msgid "_Torrent"
msgstr ""
#: ../gtk/actions.c:98
msgid "_View"
msgstr ""
#: ../gtk/actions.c:99
msgid "_Sort Torrents By"
msgstr ""
#: ../gtk/actions.c:100
msgid "_Queue"
msgstr ""
#: ../gtk/actions.c:101 ../gtk/details.c:2426
msgid "_Edit"
msgstr ""
#: ../gtk/actions.c:102
msgid "_Help"
msgstr ""
#: ../gtk/actions.c:103
msgid "Copy _Magnet Link to Clipboard"
msgstr ""
#: ../gtk/actions.c:104
msgid "Open _URL…"
msgstr ""
#: ../gtk/actions.c:104
msgid "Open URL…"
msgstr ""
#: ../gtk/actions.c:105 ../gtk/actions.c:106
msgid "Open a torrent"
msgstr ""
#: ../gtk/actions.c:107
msgid "_Start"
msgstr ""
#: ../gtk/actions.c:107
msgid "Start torrent"
msgstr ""
#: ../gtk/actions.c:108
msgid "Start _Now"
msgstr ""
#: ../gtk/actions.c:108
msgid "Start torrent now"
msgstr ""
#: ../gtk/actions.c:109
msgid "_Statistics"
msgstr ""
#: ../gtk/actions.c:110
msgid "_Donate"
msgstr ""
#: ../gtk/actions.c:111
msgid "_Verify Local Data"
msgstr ""
#: ../gtk/actions.c:112
msgid "_Pause"
msgstr ""
#: ../gtk/actions.c:112
msgid "Pause torrent"
msgstr ""
#: ../gtk/actions.c:113
msgid "_Pause All"
msgstr ""
#: ../gtk/actions.c:113
msgid "Pause all torrents"
msgstr ""
#: ../gtk/actions.c:114
msgid "_Start All"
msgstr ""
#: ../gtk/actions.c:114
msgid "Start all torrents"
msgstr ""
#: ../gtk/actions.c:115
msgid "Set _Location…"
msgstr ""
#: ../gtk/actions.c:116
msgid "Remove torrent"
msgstr ""
#: ../gtk/actions.c:117
msgid "_Delete Files and Remove"
msgstr ""
#: ../gtk/actions.c:118
msgid "_New…"
msgstr ""
#: ../gtk/actions.c:118
msgid "Create a torrent"
msgstr ""
#: ../gtk/actions.c:119
msgid "_Quit"
msgstr ""
#: ../gtk/actions.c:120
msgid "Select _All"
msgstr ""
#: ../gtk/actions.c:121
msgid "Dese_lect All"
msgstr ""
#: ../gtk/actions.c:123
msgid "Torrent properties"
msgstr ""
#: ../gtk/actions.c:124
msgid "Open Fold_er"
msgstr ""
#: ../gtk/actions.c:126
msgid "_Contents"
msgstr ""
#: ../gtk/actions.c:127
msgid "Ask Tracker for _More Peers"
msgstr ""
#: ../gtk/actions.c:128
msgid "Move to _Top"
msgstr ""
#: ../gtk/actions.c:129
msgid "Move _Up"
msgstr ""
#: ../gtk/actions.c:130
msgid "Move _Down"
msgstr ""
#: ../gtk/actions.c:131
msgid "Move to _Bottom"
msgstr ""
#: ../gtk/actions.c:132
msgid "Present Main Window"
msgstr ""
#: ../gtk/conf.c:331 ../gtk/conf.c:336
#, c-format
msgid "Importing \"%s\""
msgstr ""
#: ../gtk/details.c:448 ../gtk/details.c:460
msgid "Use global settings"
msgstr ""
#: ../gtk/details.c:449
msgid "Seed regardless of ratio"
msgstr ""
#: ../gtk/details.c:450
msgid "Stop seeding at ratio:"
msgstr ""
#: ../gtk/details.c:461
msgid "Seed regardless of activity"
msgstr ""
#: ../gtk/details.c:462
msgid "Stop seeding if idle for N minutes:"
msgstr ""
#: ../gtk/details.c:478 ../gtk/tr-prefs.c:1282
msgid "Speed"
msgstr ""
#: ../gtk/details.c:480
msgid "Honor global _limits"
msgstr ""
#: ../gtk/details.c:485
#, c-format
msgid "Limit _download speed (%s):"
msgstr ""
#: ../gtk/details.c:498
#, c-format
msgid "Limit _upload speed (%s):"
msgstr ""
#: ../gtk/details.c:511 ../gtk/open-dialog.c:351
msgid "Torrent _priority:"
msgstr ""
#: ../gtk/details.c:515
msgid "Seeding Limits"
msgstr ""
#: ../gtk/details.c:525
msgid "_Ratio:"
msgstr ""
#: ../gtk/details.c:534
msgid "_Idle:"
msgstr ""
#: ../gtk/details.c:537
msgid "Peer Connections"
msgstr ""
#: ../gtk/details.c:540
msgid "_Maximum peers:"
msgstr ""
#: ../gtk/details.c:559 ../gtk/torrent-cell-renderer.c:201
#: ../libtransmission/verify.c:273
msgid "Queued for verification"
msgstr ""
#: ../gtk/details.c:560
msgid "Verifying local data"
msgstr ""
#: ../gtk/details.c:561 ../gtk/torrent-cell-renderer.c:204
msgid "Queued for download"
msgstr ""
#: ../gtk/details.c:562 ../gtk/filter.c:702
msgctxt "Verb"
msgid "Downloading"
msgstr ""
#: ../gtk/details.c:563 ../gtk/torrent-cell-renderer.c:207
msgid "Queued for seeding"
msgstr ""
#: ../gtk/details.c:564 ../gtk/filter.c:703
msgctxt "Verb"
msgid "Seeding"
msgstr ""
#: ../gtk/details.c:565 ../gtk/filter.c:705 ../gtk/torrent-cell-renderer.c:198
msgid "Finished"
msgstr ""
#: ../gtk/details.c:565 ../gtk/filter.c:704 ../gtk/torrent-cell-renderer.c:198
msgid "Paused"
msgstr ""
#: ../gtk/details.c:598
msgid "N/A"
msgstr ""
#: ../gtk/details.c:610 ../gtk/file-list.c:610
msgid "Mixed"
msgstr ""
#: ../gtk/details.c:611
msgid "No Torrents Selected"
msgstr ""
#: ../gtk/details.c:633
msgid "Private to this tracker -- DHT and PEX disabled"
msgstr ""
#: ../gtk/details.c:635
msgid "Public torrent"
msgstr ""
#: ../gtk/details.c:658
#, c-format
msgid "Created by %1$s"
msgstr ""
#: ../gtk/details.c:660
#, c-format
msgid "Created on %1$s"
msgstr ""
#: ../gtk/details.c:662
#, c-format
msgid "Created by %1$s on %2$s"
msgstr ""
#: ../gtk/details.c:748
msgid "Unknown"
msgstr ""
#: ../gtk/details.c:776
#, c-format
msgid "%1$s (%2$'d piece @ %3$s)"
msgid_plural "%1$s (%2$'d pieces @ %3$s)"
msgstr[0] ""
msgstr[1] ""
#: ../gtk/details.c:782
#, c-format
msgid "%1$s (%2$'d piece)"
msgid_plural "%1$s (%2$'d pieces)"
msgstr[0] ""
msgstr[1] ""
#: ../gtk/details.c:816
#, c-format
msgid "%1$s (%2$s%%)"
msgstr ""
#: ../gtk/details.c:818
#, c-format
msgid "%1$s (%2$s%% of %3$s%% Available)"
msgstr ""
#: ../gtk/details.c:820
#, c-format
msgid "%1$s (%2$s%% of %3$s%% Available); %4$s Unverified"
msgstr ""
#: ../gtk/details.c:839
#, c-format
msgid "%1$s (+%2$s corrupt)"
msgstr ""
#: ../gtk/details.c:861
#, c-format
msgid "%s (Ratio: %s)"
msgstr ""
#: ../gtk/details.c:889
msgid "No errors"
msgstr ""
#: ../gtk/details.c:902
msgid "Never"
msgstr ""
#: ../gtk/details.c:906
msgid "Active now"
msgstr ""
#: ../gtk/details.c:910
#, c-format
msgid "%1$s ago"
msgstr ""
#: ../gtk/details.c:929
msgid "Activity"
msgstr ""
#: ../gtk/details.c:934
msgid "Torrent size:"
msgstr ""
#: ../gtk/details.c:939
msgid "Have:"
msgstr ""
#: ../gtk/details.c:944 ../gtk/stats.c:155 ../gtk/stats.c:172
msgid "Downloaded:"
msgstr ""
#: ../gtk/details.c:949 ../gtk/stats.c:152 ../gtk/stats.c:169
msgid "Uploaded:"
msgstr ""
#: ../gtk/details.c:954
msgid "State:"
msgstr ""
#: ../gtk/details.c:959
msgid "Running time:"
msgstr ""
#: ../gtk/details.c:964
msgid "Remaining time:"
msgstr ""
#: ../gtk/details.c:969
msgid "Last activity:"
msgstr ""
#: ../gtk/details.c:975
msgid "Error:"
msgstr ""
#: ../gtk/details.c:980
msgid "Details"
msgstr ""
#: ../gtk/details.c:986
msgid "Location:"
msgstr ""
#: ../gtk/details.c:993
msgid "Hash:"
msgstr ""
#: ../gtk/details.c:999
msgid "Privacy:"
msgstr ""
#: ../gtk/details.c:1006
msgid "Origin:"
msgstr ""
#: ../gtk/details.c:1023
msgid "Comment:"
msgstr ""
#: ../gtk/details.c:1051
msgid "Webseeds"
msgstr ""
#: ../gtk/details.c:1053 ../gtk/details.c:1106
msgid "Down"
msgstr ""
#: ../gtk/details.c:1104
msgid "Address"
msgstr ""
#: ../gtk/details.c:1108
msgid "Up"
msgstr ""
#: ../gtk/details.c:1109
msgid "Client"
msgstr ""
#: ../gtk/details.c:1110
msgid "%"
msgstr ""
#: ../gtk/details.c:1112
msgid "Up Reqs"
msgstr ""
#: ../gtk/details.c:1114
msgid "Dn Reqs"
msgstr ""
#: ../gtk/details.c:1116
msgid "Dn Blocks"
msgstr ""
#: ../gtk/details.c:1118
msgid "Up Blocks"
msgstr ""
#: ../gtk/details.c:1120
msgid "We Cancelled"
msgstr ""
#: ../gtk/details.c:1122
msgid "They Cancelled"
msgstr ""
#: ../gtk/details.c:1123
msgid "Flags"
msgstr ""
#: ../gtk/details.c:1478
msgid "Optimistic unchoke"
msgstr ""
#: ../gtk/details.c:1479
msgid "Downloading from this peer"
msgstr ""
#: ../gtk/details.c:1480
msgid "We would download from this peer if they would let us"
msgstr ""
#: ../gtk/details.c:1481
msgid "Uploading to peer"
msgstr ""
#: ../gtk/details.c:1482
msgid "We would upload to this peer if they asked"
msgstr ""
#: ../gtk/details.c:1483
msgid "Peer has unchoked us, but we're not interested"
msgstr ""
#: ../gtk/details.c:1484
msgid "We unchoked this peer, but they're not interested"
msgstr ""
#: ../gtk/details.c:1485
msgid "Encrypted connection"
msgstr ""
#: ../gtk/details.c:1486
msgid "Peer was found through Peer Exchange (PEX)"
msgstr ""
#: ../gtk/details.c:1487
msgid "Peer was found through DHT"
msgstr ""
#: ../gtk/details.c:1488
msgid "Peer is an incoming connection"
msgstr ""
#: ../gtk/details.c:1489
msgid "Peer is connected over µTP"
msgstr ""
#: ../gtk/details.c:1734 ../gtk/details.c:2441
msgid "Show _more details"
msgstr ""
#: ../gtk/details.c:1805
#, c-format
msgid "Got a list of %1$s%2$'d peers%3$s %4$s ago"
msgstr ""
#: ../gtk/details.c:1809
#, c-format
msgid "Peer list request %1$stimed out%2$s %3$s ago; will retry"
msgstr ""
#: ../gtk/details.c:1812
#, c-format
msgid "Got an error %1$s\"%2$s\"%3$s %4$s ago"
msgstr ""
#: ../gtk/details.c:1820
msgid "No updates scheduled"
msgstr ""
#: ../gtk/details.c:1825
#, c-format
msgid "Asking for more peers in %s"
msgstr ""
#: ../gtk/details.c:1829
msgid "Queued to ask for more peers"
msgstr ""
#: ../gtk/details.c:1834
#, c-format
msgid "Asking for more peers now… <small>%s</small>"
msgstr ""
#: ../gtk/details.c:1844
#, c-format
msgid "Tracker had %s%'d seeders and %'d leechers%s %s ago"
msgstr ""
#: ../gtk/details.c:1848
#, c-format
msgid "Got a scrape error \"%s%s%s\" %s ago"
msgstr ""
#: ../gtk/details.c:1858
#, c-format
msgid "Asking for peer counts in %s"
msgstr ""
#: ../gtk/details.c:1862
msgid "Queued to ask for peer counts"
msgstr ""
#: ../gtk/details.c:1867
#, c-format
msgid "Asking for peer counts now… <small>%s</small>"
msgstr ""
#: ../gtk/details.c:2137
msgid "List contains invalid URLs"
msgstr ""
#: ../gtk/details.c:2142
msgid "Please correct the errors and try again."
msgstr ""
#: ../gtk/details.c:2192
#, c-format
msgid "%s - Edit Trackers"
msgstr ""
#: ../gtk/details.c:2202
msgid "Tracker Announce URLs"
msgstr ""
#: ../gtk/details.c:2205 ../gtk/makemeta-ui.c:490
msgid ""
"To add a backup URL, add it on the line after the primary URL.\n"
"To add another primary URL, add it after a blank line."
msgstr ""
#: ../gtk/details.c:2302
#, c-format
msgid "%s - Add Tracker"
msgstr ""
#: ../gtk/details.c:2316
msgid "Tracker"
msgstr ""
#: ../gtk/details.c:2322
msgid "_Announce URL:"
msgstr ""
#: ../gtk/details.c:2397 ../gtk/details.c:2541 ../gtk/filter.c:323
msgid "Trackers"
msgstr ""
#: ../gtk/details.c:2421
msgid "_Add"
msgstr ""
#: ../gtk/details.c:2432
msgid "_Remove"
msgstr ""
#: ../gtk/details.c:2448
msgid "Show _backup trackers"
msgstr ""
#: ../gtk/details.c:2533 ../gtk/msgwin.c:440
msgid "Information"
msgstr ""
#: ../gtk/details.c:2537
msgid "Peers"
msgstr ""
#: ../gtk/details.c:2546
msgid "File listing not available for combined torrent properties"
msgstr ""
#: ../gtk/details.c:2550 ../gtk/makemeta-ui.c:437
msgid "Files"
msgstr ""
#: ../gtk/details.c:2554 ../gtk/tr-prefs.c:1239 ../gtk/tr-window.c:658
msgid "Options"
msgstr ""
#: ../gtk/details.c:2578
#, c-format
msgid "%s Properties"
msgstr ""
#: ../gtk/details.c:2589
#, c-format
msgid "%'d Torrent Properties"
msgstr ""
#: ../gtk/dialogs.c:95
#, c-format
msgid "Remove torrent?"
msgid_plural "Remove %d torrents?"
msgstr[0] ""
msgstr[1] ""
#: ../gtk/dialogs.c:101
#, c-format
msgid "Delete this torrent's downloaded files?"
msgid_plural "Delete these %d torrents' downloaded files?"
msgstr[0] ""
msgstr[1] ""
#: ../gtk/dialogs.c:111
msgid ""
"Once removed, continuing the transfer will require the torrent file or "
"magnet link."
msgid_plural ""
"Once removed, continuing the transfers will require the torrent files or "
"magnet links."
msgstr[0] ""
msgstr[1] ""
#: ../gtk/dialogs.c:117
msgid "This torrent has not finished downloading."
msgid_plural "These torrents have not finished downloading."
msgstr[0] ""
msgstr[1] ""
#: ../gtk/dialogs.c:123
msgid "This torrent is connected to peers."
msgid_plural "These torrents are connected to peers."
msgstr[0] ""
msgstr[1] ""
#: ../gtk/dialogs.c:130
msgid "One of these torrents is connected to peers."
msgid_plural "Some of these torrents are connected to peers."
msgstr[0] ""
msgstr[1] ""
#: ../gtk/dialogs.c:137
msgid "One of these torrents has not finished downloading."
msgid_plural "Some of these torrents have not finished downloading."
msgstr[0] ""
msgstr[1] ""
#: ../gtk/file-list.c:607 ../gtk/filter.c:348 ../gtk/util.c:471
msgid "High"
msgstr ""
#: ../gtk/file-list.c:608 ../gtk/filter.c:352 ../gtk/util.c:472
msgid "Normal"
msgstr ""
#: ../gtk/file-list.c:609 ../gtk/filter.c:356 ../gtk/util.c:473
msgid "Low"
msgstr ""
#: ../gtk/file-list.c:827 ../gtk/msgwin.c:305
msgid "Name"
msgstr ""
#. add "size" column
#: ../gtk/file-list.c:842
msgid "Size"
msgstr ""
#. add "progress" column
#: ../gtk/file-list.c:857
msgid "Have"
msgstr ""
#. add "enabled" column
#: ../gtk/file-list.c:870
msgid "Download"
msgstr ""
#. add priority column
#: ../gtk/file-list.c:886 ../gtk/filter.c:343
msgid "Priority"
msgstr ""
#: ../gtk/filter.c:315 ../gtk/filter.c:699
msgid "All"
msgstr ""
#: ../gtk/filter.c:329 ../gtk/tr-prefs.c:567 ../gtk/tr-prefs.c:1285
msgid "Privacy"
msgstr ""
#: ../gtk/filter.c:334
msgid "Public"
msgstr ""
#: ../gtk/filter.c:338
msgid "Private"
msgstr ""
#: ../gtk/filter.c:701
msgid "Active"
msgstr ""
#: ../gtk/filter.c:706
msgctxt "Verb"
msgid "Verifying"
msgstr ""
#: ../gtk/filter.c:707 ../gtk/msgwin.c:439
msgid "Error"
msgstr ""
#. add the activity combobox
#: ../gtk/filter.c:996
msgid "_Show:"
msgstr ""
#: ../gtk/main.c:308
#, c-format
msgid "Error registering Transmission as x-scheme-handler/magnet handler: %s"
msgstr ""
#: ../gtk/main.c:477
#, c-format
msgid ""
"Got signal %d; trying to shut down cleanly. Do it again if it gets stuck."
msgstr ""
#: ../gtk/main.c:610
msgid "Where to look for configuration files"
msgstr ""
#: ../gtk/main.c:611
msgid "Start with all torrents paused"
msgstr ""
#: ../gtk/main.c:612
msgid "Start minimized in notification area"
msgstr ""
#: ../gtk/main.c:613
msgid "Show version number and exit"
msgstr ""
#: ../gtk/main.c:630 ../gtk/transmission-gtk.desktop.in.h:1
msgid "Transmission"
msgstr ""
#. parse the command line
#: ../gtk/main.c:634
msgid "[torrent files or urls]"
msgstr ""
#: ../gtk/main.c:639
#, c-format
msgid ""
"%s\n"
"Run '%s --help' to see a full list of available command line options.\n"
msgstr ""
#: ../gtk/main.c:739
msgid ""
"Transmission is a file-sharing program. When you run a torrent, its data "
"will be made available to others by means of upload. You and you alone are "
"fully responsible for exercising proper judgement and abiding by your local "
"laws."
msgstr ""
#: ../gtk/main.c:741
msgid "I _Accept"
msgstr ""
#: ../gtk/main.c:963
msgid "<b>Closing Connections</b>"
msgstr ""
#: ../gtk/main.c:967
msgid "Sending upload/download totals to tracker…"
msgstr ""
#: ../gtk/main.c:972
msgid "_Quit Now"
msgstr ""
#: ../gtk/main.c:1030
msgid "Couldn't add corrupt torrent"
msgid_plural "Couldn't add corrupt torrents"
msgstr[0] ""
msgstr[1] ""
#: ../gtk/main.c:1037
msgid "Couldn't add duplicate torrent"
msgid_plural "Couldn't add duplicate torrents"
msgstr[0] ""
msgstr[1] ""
#: ../gtk/main.c:1336
msgid "A fast and easy BitTorrent client"
msgstr ""
#: ../gtk/main.c:1337
msgid "Copyright (c) The Transmission Project"
msgstr ""
#. Translators: translate "translator-credits" as your name
#. to have it appear in the credits in the "About"
#. dialog
#: ../gtk/main.c:1343
msgid "translator-credits"
msgstr ""
"Launchpad Contributions:\n"
" James Randall G. Quizon https://launchpad.net/~james-quizon2000\n"
" Reli Ann Faye Rogado https://launchpad.net/~liannfaye"
#: ../gtk/makemeta-ui.c:75
#, c-format
msgid "Creating \"%s\""
msgstr ""
#: ../gtk/makemeta-ui.c:77
#, c-format
msgid "Created \"%s\"!"
msgstr ""
#: ../gtk/makemeta-ui.c:79
#, c-format
msgid "Error: invalid announce URL \"%s\""
msgstr ""
#: ../gtk/makemeta-ui.c:81
#, c-format
msgid "Cancelled"
msgstr ""
#: ../gtk/makemeta-ui.c:83
#, c-format
msgid "Error reading \"%s\": %s"
msgstr ""
#: ../gtk/makemeta-ui.c:85
#, c-format
msgid "Error writing \"%s\": %s"
msgstr ""
#. how much data we've scanned through to generate checksums
#: ../gtk/makemeta-ui.c:102
#, c-format
msgid "Scanned %s"
msgstr ""
#: ../gtk/makemeta-ui.c:167 ../gtk/makemeta-ui.c:425
msgid "New Torrent"
msgstr ""
#: ../gtk/makemeta-ui.c:183
msgid "Creating torrent…"
msgstr ""
#: ../gtk/makemeta-ui.c:292
msgid "No source selected"
msgstr ""
#: ../gtk/makemeta-ui.c:298
#, c-format
msgid "%1$s; %2$'d File"
msgid_plural "%1$s; %2$'d Files"
msgstr[0] ""
msgstr[1] ""
#: ../gtk/makemeta-ui.c:305
#, c-format
msgid "%1$'d Piece @ %2$s"
msgid_plural "%1$'d Pieces @ %2$s"
msgstr[0] ""
msgstr[1] ""
#: ../gtk/makemeta-ui.c:439
msgid "Sa_ve to:"
msgstr ""
#: ../gtk/makemeta-ui.c:445
msgid "Source F_older:"
msgstr ""
#: ../gtk/makemeta-ui.c:457
msgid "Source _File:"
msgstr ""
#: ../gtk/makemeta-ui.c:469
msgid "<i>No source selected</i>"
msgstr ""
#: ../gtk/makemeta-ui.c:473
msgid "Properties"
msgstr ""
#: ../gtk/makemeta-ui.c:475
msgid "_Trackers:"
msgstr ""
#: ../gtk/makemeta-ui.c:497
msgid "Co_mment:"
msgstr ""
#: ../gtk/makemeta-ui.c:506
msgid "_Private torrent"
msgstr ""
#: ../gtk/msgwin.c:144
#, c-format
msgid "Couldn't save \"%s\""
msgstr ""
#: ../gtk/msgwin.c:205
msgid "Save Log"
msgstr ""
#: ../gtk/msgwin.c:300
msgid "Time"
msgstr ""
#: ../gtk/msgwin.c:310
msgid "Message"
msgstr ""
#: ../gtk/msgwin.c:441
msgid "Debug"
msgstr ""
#: ../gtk/msgwin.c:467
msgid "Message Log"
msgstr ""
#: ../gtk/msgwin.c:502
msgid "Level"
msgstr ""
#: ../gtk/notify.c:219
msgid "Open File"
msgstr ""
#: ../gtk/notify.c:224
msgid "Open Folder"
msgstr ""
#: ../gtk/notify.c:232
msgid "Torrent Complete"
msgstr ""
#: ../gtk/notify.c:254
msgid "Torrent Added"
msgstr ""
#: ../gtk/open-dialog.c:240
msgid "Torrent files"
msgstr ""
#: ../gtk/open-dialog.c:245
msgid "All files"
msgstr ""
#. make the dialog
#: ../gtk/open-dialog.c:270
msgid "Torrent Options"
msgstr ""
#: ../gtk/open-dialog.c:292 ../gtk/tr-prefs.c:334
msgid "Mo_ve .torrent file to the trash"
msgstr ""
#: ../gtk/open-dialog.c:294 ../gtk/tr-prefs.c:326
msgid "_Start when added"
msgstr ""
#. "torrent file" row
#: ../gtk/open-dialog.c:310
msgid "_Torrent file:"
msgstr ""
#: ../gtk/open-dialog.c:313
msgid "Select Source File"
msgstr ""
#: ../gtk/open-dialog.c:325
msgid "_Destination folder:"
msgstr ""
#: ../gtk/open-dialog.c:328
msgid "Select Destination Folder"
msgstr ""
#: ../gtk/open-dialog.c:427
msgid "Open a Torrent"
msgstr ""
#: ../gtk/open-dialog.c:443 ../gtk/tr-prefs.c:330
msgid "Show _options dialog"
msgstr ""
#: ../gtk/open-dialog.c:491
msgid "Open URL"
msgstr ""
#: ../gtk/open-dialog.c:504
msgid "Open torrent from URL"
msgstr ""
#: ../gtk/open-dialog.c:509
msgid "_URL"
msgstr ""
#: ../gtk/relocate.c:62
#, c-format
msgid "Moving \"%s\""
msgstr ""
#: ../gtk/relocate.c:84
msgid "Couldn't move torrent"
msgstr ""
#: ../gtk/relocate.c:125
msgid "This may take a moment…"
msgstr ""
#: ../gtk/relocate.c:156 ../gtk/relocate.c:176
msgid "Set Torrent Location"
msgstr ""
#: ../gtk/relocate.c:172 ../gtk/tr-prefs.c:269
msgid "Location"
msgstr ""
#: ../gtk/relocate.c:179
msgid "Torrent _location:"
msgstr ""
#: ../gtk/relocate.c:180
msgid "_Move from the current folder"
msgstr ""
#: ../gtk/relocate.c:183
msgid "Local data is _already there"
msgstr ""
#: ../gtk/stats.c:72 ../gtk/stats.c:164
#, c-format
msgid "Started %'d time"
msgid_plural "Started %'d times"
msgstr[0] ""
msgstr[1] ""
#: ../gtk/stats.c:97
msgid "Reset your statistics?"
msgstr ""
#: ../gtk/stats.c:98
msgid ""
"These statistics are for your information only. Resetting them doesn't "
"affect the statistics logged by your BitTorrent trackers."
msgstr ""
#: ../gtk/stats.c:109 ../gtk/stats.c:137
msgid "_Reset"
msgstr ""
#: ../gtk/stats.c:134 ../gtk/tr-window.c:699
msgid "Statistics"
msgstr ""
#: ../gtk/stats.c:149
msgid "Current Session"
msgstr ""
#: ../gtk/stats.c:158 ../gtk/stats.c:175
msgid "Ratio:"
msgstr ""
#: ../gtk/stats.c:161 ../gtk/stats.c:178
msgid "Duration:"
msgstr ""
#: ../gtk/stats.c:163
msgid "Total"
msgstr ""
#. %1$s is how much we've got,
#. %2$s is how much we'll have when done,
#. %3$s%% is a percentage of the two
#: ../gtk/torrent-cell-renderer.c:61
#, c-format
msgid "%1$s of %2$s (%3$s%%)"
msgstr ""
#. %1$s is how much we've got,
#. %2$s is the torrent's total size,
#. %3$s%% is a percentage of the two,
#. %4$s is how much we've uploaded,
#. %5$s is our upload-to-download ratio,
#. %6$s is the ratio we want to reach before we stop uploading
#: ../gtk/torrent-cell-renderer.c:77
#, c-format
msgid "%1$s of %2$s (%3$s%%), uploaded %4$s (Ratio: %5$s Goal: %6$s)"
msgstr ""
#. %1$s is how much we've got,
#. %2$s is the torrent's total size,
#. %3$s%% is a percentage of the two,
#. %4$s is how much we've uploaded,
#. %5$s is our upload-to-download ratio
#: ../gtk/torrent-cell-renderer.c:93
#, c-format
msgid "%1$s of %2$s (%3$s%%), uploaded %4$s (Ratio: %5$s)"
msgstr ""
#. %1$s is the torrent's total size,
#. %2$s is how much we've uploaded,
#. %3$s is our upload-to-download ratio,
#. %4$s is the ratio we want to reach before we stop uploading
#: ../gtk/torrent-cell-renderer.c:110
#, c-format
msgid "%1$s, uploaded %2$s (Ratio: %3$s Goal: %4$s)"
msgstr ""
#. %1$s is the torrent's total size,
#. %2$s is how much we've uploaded,
#. %3$s is our upload-to-download ratio
#: ../gtk/torrent-cell-renderer.c:122
#, c-format
msgid "%1$s, uploaded %2$s (Ratio: %3$s)"
msgstr ""
#: ../gtk/torrent-cell-renderer.c:136
msgid "Remaining time unknown"
msgstr ""
#. time remaining
#: ../gtk/torrent-cell-renderer.c:142
#, c-format
msgid "%s remaining"
msgstr ""
#. 1==down arrow, 2==down speed, 3==up arrow, 4==down speed
#: ../gtk/torrent-cell-renderer.c:167
#, c-format
msgid "%1$s %2$s, %3$s %4$s"
msgstr ""
#. bandwidth speed + unicode arrow
#: ../gtk/torrent-cell-renderer.c:172 ../gtk/torrent-cell-renderer.c:176
#, c-format
msgid "%1$s %2$s"
msgstr ""
#: ../gtk/torrent-cell-renderer.c:179
msgid "Stalled"
msgstr ""
#: ../gtk/torrent-cell-renderer.c:181 ../gtk/tr-icon.c:69
msgid "Idle"
msgstr ""
#: ../gtk/torrent-cell-renderer.c:211
#, c-format
msgid "Verifying local data (%.1f%% tested)"
msgstr ""
#: ../gtk/torrent-cell-renderer.c:222
#, c-format
msgid "Ratio %s"
msgstr ""
#: ../gtk/torrent-cell-renderer.c:244
#, c-format
msgid "Tracker gave a warning: \"%s\""
msgstr ""
#: ../gtk/torrent-cell-renderer.c:245
#, c-format
msgid "Tracker gave an error: \"%s\""
msgstr ""
#: ../gtk/torrent-cell-renderer.c:246
#, c-format
msgid "Error: %s"
msgstr ""
#: ../gtk/torrent-cell-renderer.c:266
#, c-format
msgid "Downloading from %1$'d of %2$'d connected peer"
msgid_plural "Downloading from %1$'d of %2$'d connected peers"
msgstr[0] ""
msgstr[1] ""
#: ../gtk/torrent-cell-renderer.c:275
#, c-format
msgid "Downloading metadata from %1$'d peer (%2$d%% done)"
msgid_plural "Downloading metadata from %1$'d peers (%2$d%% done)"
msgstr[0] ""
msgstr[1] ""
#: ../gtk/torrent-cell-renderer.c:301
#, c-format
msgid "Seeding to %1$'d of %2$'d connected peer"
msgid_plural "Seeding to %1$'d of %2$'d connected peers"
msgstr[0] ""
msgstr[1] ""
#: ../gtk/transmission-gtk.desktop.in.h:2
msgid "BitTorrent Client"
msgstr ""
#: ../gtk/transmission-gtk.desktop.in.h:3
msgid "Transmission BitTorrent Client"
msgstr ""
#: ../gtk/transmission-gtk.desktop.in.h:4
msgid "Download and share files over BitTorrent"
msgstr ""
#: ../gtk/tr-core.c:1181
#, c-format
msgid "Couldn't read \"%s\": %s"
msgstr ""
#: ../gtk/tr-core.c:1277
#, c-format
msgid "Skipping unknown torrent \"%s\""
msgstr ""
#: ../gtk/tr-core.c:1521
msgid "Inhibiting desktop hibernation"
msgstr ""
#: ../gtk/tr-core.c:1525
#, c-format
msgid "Couldn't inhibit desktop hibernation: %s"
msgstr ""
#: ../gtk/tr-core.c:1559
msgid "Allowing desktop hibernation"
msgstr ""
#: ../gtk/tr-icon.c:86 ../gtk/tr-icon.c:102
#, c-format
msgid "(Limit: %s)"
msgstr ""
#. %1$s: current upload speed
#. * %2$s: current upload limit, if any
#. * %3$s: current download speed
#. * %4$s: current download limit, if any
#: ../gtk/tr-icon.c:109
#, c-format
msgid ""
"Transmission\n"
"Up: %1$s %2$s\n"
"Down: %3$s %4$s"
msgstr ""
#: ../gtk/tr-prefs.c:272
msgid "Save to _Location:"
msgstr ""
#: ../gtk/tr-prefs.c:275
msgid "Queue"
msgstr ""
#: ../gtk/tr-prefs.c:277
msgid "Maximum active _downloads:"
msgstr ""
#: ../gtk/tr-prefs.c:281
msgid "Downloads sharing data in the last N minutes are _active:"
msgstr ""
#: ../gtk/tr-prefs.c:286 ../libtransmission/torrent.c:1919
msgid "Incomplete"
msgstr ""
#: ../gtk/tr-prefs.c:288
msgid "Append \"._part\" to incomplete files' names"
msgstr ""
#: ../gtk/tr-prefs.c:292
msgid "Keep _incomplete torrents in:"
msgstr ""
#: ../gtk/tr-prefs.c:299
msgid "Call _script when torrent is completed:"
msgstr ""
#: ../gtk/tr-prefs.c:324
msgctxt "Gerund"
msgid "Adding"
msgstr ""
#: ../gtk/tr-prefs.c:338
msgid "Automatically _add torrents from:"
msgstr ""
#: ../gtk/tr-prefs.c:346
msgctxt "Gerund"
msgid "Seeding"
msgstr ""
#: ../gtk/tr-prefs.c:348
msgid "Stop seeding at _ratio:"
msgstr ""
#: ../gtk/tr-prefs.c:355
msgid "Stop seeding if idle for _N minutes:"
msgstr ""
#: ../gtk/tr-prefs.c:378 ../gtk/tr-prefs.c:1291
msgid "Desktop"
msgstr ""
#: ../gtk/tr-prefs.c:380
msgid "_Inhibit hibernation when torrents are active"
msgstr ""
#: ../gtk/tr-prefs.c:384
msgid "Show Transmission icon in the _notification area"
msgstr ""
#: ../gtk/tr-prefs.c:389
msgid "Notification"
msgstr ""
#: ../gtk/tr-prefs.c:391
msgid "Show a notification when torrents are a_dded"
msgstr ""
#: ../gtk/tr-prefs.c:395
msgid "Show a notification when torrents _finish"
msgstr ""
#: ../gtk/tr-prefs.c:399
msgid "Play a _sound when torrents finish"
msgstr ""
#: ../gtk/tr-prefs.c:427
#, c-format
msgid "Blocklist contains %'d rule"
msgid_plural "Blocklist contains %'d rules"
msgstr[0] ""
msgstr[1] ""
#: ../gtk/tr-prefs.c:460
#, c-format
msgid "Blocklist has %'d rule."
msgid_plural "Blocklist has %'d rules."
msgstr[0] ""
msgstr[1] ""
#: ../gtk/tr-prefs.c:464
msgid "<b>Update succeeded!</b>"
msgstr ""
#: ../gtk/tr-prefs.c:464
msgid "<b>Unable to update.</b>"
msgstr ""
#: ../gtk/tr-prefs.c:479
msgid "Update Blocklist"
msgstr ""
#: ../gtk/tr-prefs.c:481
msgid "Getting new blocklist…"
msgstr ""
#: ../gtk/tr-prefs.c:509
msgid "Allow encryption"
msgstr ""
#: ../gtk/tr-prefs.c:510
msgid "Prefer encryption"
msgstr ""
#: ../gtk/tr-prefs.c:511
msgid "Require encryption"
msgstr ""
#: ../gtk/tr-prefs.c:535
msgid "Blocklist"
msgstr ""
#: ../gtk/tr-prefs.c:537
msgid "Enable _blocklist:"
msgstr ""
#: ../gtk/tr-prefs.c:551
msgid "_Update"
msgstr ""
#: ../gtk/tr-prefs.c:561
msgid "Enable _automatic updates"
msgstr ""
#: ../gtk/tr-prefs.c:569
msgid "_Encryption mode:"
msgstr ""
#: ../gtk/tr-prefs.c:573
msgid "Use PE_X to find more peers"
msgstr ""
#: ../gtk/tr-prefs.c:575
msgid ""
"PEX is a tool for exchanging peer lists with the peers you're connected to."
msgstr ""
#: ../gtk/tr-prefs.c:579
msgid "Use _DHT to find more peers"
msgstr ""
#: ../gtk/tr-prefs.c:581
msgid "DHT is a tool for finding peers without a tracker."
msgstr ""
#: ../gtk/tr-prefs.c:585
msgid "Use _Local Peer Discovery to find more peers"
msgstr ""
#: ../gtk/tr-prefs.c:587
msgid "LPD is a tool for finding peers on your local network."
msgstr ""
#: ../gtk/tr-prefs.c:803
msgid "Web Client"
msgstr ""
#. "enabled" checkbutton
#: ../gtk/tr-prefs.c:806
msgid "_Enable web client"
msgstr ""
#: ../gtk/tr-prefs.c:812
msgid "_Open web client"
msgstr ""
#: ../gtk/tr-prefs.c:821
msgid "HTTP _port:"
msgstr ""
#. require authentication
#: ../gtk/tr-prefs.c:825
msgid "Use _authentication"
msgstr ""
#. username
#: ../gtk/tr-prefs.c:833
msgid "_Username:"
msgstr ""
#. password
#: ../gtk/tr-prefs.c:840
msgid "Pass_word:"
msgstr ""
#. require authentication
#: ../gtk/tr-prefs.c:848
msgid "Only allow these IP a_ddresses to connect:"
msgstr ""
#: ../gtk/tr-prefs.c:873
msgid "IP addresses may use wildcards, such as 192.168.*.*"
msgstr ""
#: ../gtk/tr-prefs.c:895
msgid "Addresses:"
msgstr ""
#: ../gtk/tr-prefs.c:1012
msgid "Every Day"
msgstr ""
#: ../gtk/tr-prefs.c:1013
msgid "Weekdays"
msgstr ""
#: ../gtk/tr-prefs.c:1014
msgid "Weekends"
msgstr ""
#: ../gtk/tr-prefs.c:1015
msgid "Sunday"
msgstr ""
#: ../gtk/tr-prefs.c:1016
msgid "Monday"
msgstr ""
#: ../gtk/tr-prefs.c:1017
msgid "Tuesday"
msgstr ""
#: ../gtk/tr-prefs.c:1018
msgid "Wednesday"
msgstr ""
#: ../gtk/tr-prefs.c:1019
msgid "Thursday"
msgstr ""
#: ../gtk/tr-prefs.c:1020
msgid "Friday"
msgstr ""
#: ../gtk/tr-prefs.c:1021
msgid "Saturday"
msgstr ""
#: ../gtk/tr-prefs.c:1052
msgid "Speed Limits"
msgstr ""
#: ../gtk/tr-prefs.c:1054
#, c-format
msgid "_Upload (%s):"
msgstr ""
#: ../gtk/tr-prefs.c:1061
#, c-format
msgid "_Download (%s):"
msgstr ""
#: ../gtk/tr-prefs.c:1072
msgid "Alternative Speed Limits"
msgstr ""
#: ../gtk/tr-prefs.c:1079
msgid "Override normal speed limits manually or at scheduled times"
msgstr ""
#: ../gtk/tr-prefs.c:1086
#, c-format
msgid "U_pload (%s):"
msgstr ""
#: ../gtk/tr-prefs.c:1090
#, c-format
msgid "Do_wnload (%s):"
msgstr ""
#: ../gtk/tr-prefs.c:1094
msgid "_Scheduled times:"
msgstr ""
#: ../gtk/tr-prefs.c:1099
msgid " _to "
msgstr ""
#: ../gtk/tr-prefs.c:1110
msgid "_On days:"
msgstr ""
#: ../gtk/tr-prefs.c:1143 ../gtk/tr-prefs.c:1212
msgid "Status unknown"
msgstr ""
#: ../gtk/tr-prefs.c:1167
msgid "Port is <b>open</b>"
msgstr ""
#: ../gtk/tr-prefs.c:1167
msgid "Port is <b>closed</b>"
msgstr ""
#: ../gtk/tr-prefs.c:1182
msgid "<i>Testing TCP port…</i>"
msgstr ""
#: ../gtk/tr-prefs.c:1205
msgid "Listening Port"
msgstr ""
#: ../gtk/tr-prefs.c:1207
msgid "_Port used for incoming connections:"
msgstr ""
#: ../gtk/tr-prefs.c:1215
msgid "Te_st Port"
msgstr ""
#: ../gtk/tr-prefs.c:1222
msgid "Pick a _random port every time Transmission is started"
msgstr ""
#: ../gtk/tr-prefs.c:1226
msgid "Use UPnP or NAT-PMP port _forwarding from my router"
msgstr ""
#: ../gtk/tr-prefs.c:1231
msgid "Peer Limits"
msgstr ""
#: ../gtk/tr-prefs.c:1234
msgid "Maximum peers per _torrent:"
msgstr ""
#: ../gtk/tr-prefs.c:1236
msgid "Maximum peers _overall:"
msgstr ""
#: ../gtk/tr-prefs.c:1242
msgid "Enable _uTP for peer communication"
msgstr ""
#: ../gtk/tr-prefs.c:1244
msgid "uTP is a tool for reducing network congestion."
msgstr ""
#: ../gtk/tr-prefs.c:1262
msgid "Transmission Preferences"
msgstr ""
#: ../gtk/tr-prefs.c:1276
msgid "Torrents"
msgstr ""
#: ../gtk/tr-prefs.c:1279
msgctxt "Gerund"
msgid "Downloading"
msgstr ""
#: ../gtk/tr-prefs.c:1288
msgid "Network"
msgstr ""
#: ../gtk/tr-prefs.c:1294
msgid "Web"
msgstr ""
#: ../gtk/tr-window.c:148
msgid "Torrent"
msgstr ""
#: ../gtk/tr-window.c:252
msgid "Total Ratio"
msgstr ""
#: ../gtk/tr-window.c:253
msgid "Session Ratio"
msgstr ""
#: ../gtk/tr-window.c:254
msgid "Total Transfer"
msgstr ""
#: ../gtk/tr-window.c:255
msgid "Session Transfer"
msgstr ""
#: ../gtk/tr-window.c:284
#, c-format
msgid ""
"Click to disable Alternative Speed Limits\n"
"(%1$s down, %2$s up)"
msgstr ""
#: ../gtk/tr-window.c:285
#, c-format
msgid ""
"Click to enable Alternative Speed Limits\n"
"(%1$s down, %2$s up)"
msgstr ""
#: ../gtk/tr-window.c:350
#, c-format
msgid "Tracker will allow requests in %s"
msgstr ""
#: ../gtk/tr-window.c:419
msgid "Unlimited"
msgstr ""
#: ../gtk/tr-window.c:486
msgid "Seed Forever"
msgstr ""
#: ../gtk/tr-window.c:524
msgid "Limit Download Speed"
msgstr ""
#: ../gtk/tr-window.c:528
msgid "Limit Upload Speed"
msgstr ""
#: ../gtk/tr-window.c:535
msgid "Stop Seeding at Ratio"
msgstr ""
#: ../gtk/tr-window.c:569
#, c-format
msgid "Stop at Ratio (%s)"
msgstr ""
#: ../gtk/tr-window.c:771
#, c-format
msgid "%1$'d of %2$'d Torrent"
msgid_plural "%1$'d of %2$'d Torrents"
msgstr[0] ""
msgstr[1] ""
#: ../gtk/tr-window.c:777
#, c-format
msgid "%'d Torrent"
msgid_plural "%'d Torrents"
msgstr[0] ""
msgstr[1] ""
#: ../gtk/tr-window.c:797 ../gtk/tr-window.c:825
#, c-format
msgid "Ratio: %s"
msgstr ""
#: ../gtk/tr-window.c:808
#, c-format
msgid "Down: %1$s, Up: %2$s"
msgstr ""
#: ../gtk/tr-window.c:819
#, c-format
msgid "size|Down: %1$s, Up: %2$s"
msgstr ""
#: ../gtk/util.c:38
msgid "KiB"
msgstr ""
#: ../gtk/util.c:39
msgid "MiB"
msgstr ""
#: ../gtk/util.c:40
msgid "GiB"
msgstr ""
#: ../gtk/util.c:41
msgid "TiB"
msgstr ""
#: ../gtk/util.c:44
msgid "kB"
msgstr ""
#: ../gtk/util.c:45
msgid "MB"
msgstr ""
#: ../gtk/util.c:46
msgid "GB"
msgstr ""
#: ../gtk/util.c:47
msgid "TB"
msgstr ""
#: ../gtk/util.c:50
msgid "kB/s"
msgstr ""
#: ../gtk/util.c:51
msgid "MB/s"
msgstr ""
#: ../gtk/util.c:52
msgid "GB/s"
msgstr ""
#: ../gtk/util.c:53
msgid "TB/s"
msgstr ""
#: ../cli/cli.c:115 ../gtk/util.c:87 ../libtransmission/utils.c:1509
msgid "None"
msgstr ""
#: ../gtk/util.c:108
#, c-format
msgid "%'d day"
msgid_plural "%'d days"
msgstr[0] ""
msgstr[1] ""
#: ../gtk/util.c:109
#, c-format
msgid "%'d hour"
msgid_plural "%'d hours"
msgstr[0] ""
msgstr[1] ""
#: ../gtk/util.c:110
#, c-format
msgid "%'d minute"
msgid_plural "%'d minutes"
msgstr[0] ""
msgstr[1] ""
#: ../gtk/util.c:111
#, c-format
msgid "%'d second"
msgid_plural "%'d seconds"
msgstr[0] ""
msgstr[1] ""
#: ../gtk/util.c:223
#, c-format
msgid "The torrent file \"%s\" contains invalid data."
msgstr ""
#: ../gtk/util.c:224
#, c-format
msgid "The torrent file \"%s\" is already in use."
msgstr ""
#: ../gtk/util.c:225
#, c-format
msgid "The torrent file \"%s\" encountered an unknown error."
msgstr ""
#: ../gtk/util.c:233
msgid "Error opening torrent"
msgstr ""
#: ../gtk/util.c:540
#, c-format
msgid "Error opening \"%s\""
msgstr ""
#: ../gtk/util.c:543
#, c-format
msgid "Server returned \"%1$ld %2$s\""
msgstr ""
#: ../gtk/util.c:563
msgid "Unrecognized URL"
msgstr ""
#: ../gtk/util.c:565
#, c-format
msgid "Transmission doesn't know how to use \"%s\""
msgstr ""
#: ../gtk/util.c:570
#, c-format
msgid ""
"This magnet link appears to be intended for something other than BitTorrent. "
"BitTorrent magnet links have a section containing \"%s\"."
msgstr ""
#. did caller give us an uninitialized val?
#: ../libtransmission/bencode.c:1117
msgid "Invalid metadata"
msgstr ""
#: ../libtransmission/bencode.c:1723 ../libtransmission/bencode.c:1751
#, c-format
msgid "Couldn't save temporary file \"%1$s\": %2$s"
msgstr ""
#: ../libtransmission/bencode.c:1738
#, c-format
msgid "Saved \"%s\""
msgstr ""
#: ../libtransmission/bencode.c:1743 ../libtransmission/blocklist.c:417
#: ../libtransmission/rpcimpl.c:1260 ../libtransmission/rpcimpl.c:1271
#: ../libtransmission/rpcimpl.c:1287
#, c-format
msgid "Couldn't save file \"%1$s\": %2$s"
msgstr ""
#: ../libtransmission/blocklist.c:86 ../libtransmission/blocklist.c:325
#: ../libtransmission/utils.c:438
#, c-format
msgid "Couldn't read \"%1$s\": %2$s"
msgstr ""
#: ../libtransmission/blocklist.c:115
#, c-format
msgid "Blocklist \"%s\" contains %zu entries"
msgstr ""
#. don't try to display the actual lines - it causes issues
#: ../libtransmission/blocklist.c:368
#, c-format
msgid "blocklist skipped invalid address at line %d"
msgstr ""
#: ../libtransmission/blocklist.c:420
#, c-format
msgid "Blocklist \"%s\" updated with %zu entries"
msgstr ""
#: ../libtransmission/fdlimit.c:348 ../libtransmission/utils.c:574
#: ../libtransmission/utils.c:585
#, c-format
msgid "Couldn't create \"%1$s\": %2$s"
msgstr ""
#: ../libtransmission/fdlimit.c:369
#, c-format
msgid "Couldn't open \"%1$s\": %2$s"
msgstr ""
#: ../libtransmission/fdlimit.c:384
#, c-format
msgid "Couldn't truncate \"%1$s\": %2$s"
msgstr ""
#: ../libtransmission/fdlimit.c:670
#, c-format
msgid "Couldn't create socket: %s"
msgstr ""
#: ../libtransmission/makemeta.c:63
#, c-format
msgid "Torrent Creator is skipping file \"%s\": %s"
msgstr ""
#: ../libtransmission/metainfo.c:533
#, c-format
msgid "Invalid metadata entry \"%s\""
msgstr ""
#: ../libtransmission/natpmp.c:32
msgid "Port Forwarding (NAT-PMP)"
msgstr ""
#: ../libtransmission/natpmp.c:72
#, c-format
msgid "%s succeeded (%d)"
msgstr ""
#: ../libtransmission/natpmp.c:141
#, c-format
msgid "Found public address \"%s\""
msgstr ""
#: ../libtransmission/natpmp.c:176
#, c-format
msgid "no longer forwarding port %d"
msgstr ""
#: ../libtransmission/natpmp.c:221
#, c-format
msgid "Port %d forwarded successfully"
msgstr ""
#: ../libtransmission/net.c:268
#, c-format
msgid "Couldn't set source address %s on %d: %s"
msgstr ""
#: ../libtransmission/net.c:284
#, c-format
msgid "Couldn't connect socket %d to %s, port %d (errno %d - %s)"
msgstr ""
#: ../libtransmission/net.c:360
msgid "Is another copy of Transmission already running?"
msgstr ""
#: ../libtransmission/net.c:365
#, c-format
msgid "Couldn't bind port %d on %s: %s"
msgstr ""
#: ../libtransmission/net.c:367
#, c-format
msgid "Couldn't bind port %d on %s: %s (%s)"
msgstr ""
#: ../libtransmission/peer-msgs.c:1898
#, c-format
msgid "Please Verify Local Data! Piece #%zu is corrupt."
msgstr ""
#: ../libtransmission/port-forwarding.c:31
msgid "Port Forwarding"
msgstr ""
#: ../libtransmission/port-forwarding.c:58
msgid "Starting"
msgstr ""
#: ../libtransmission/port-forwarding.c:59
msgid "Forwarded"
msgstr ""
#: ../libtransmission/port-forwarding.c:60
msgid "Stopping"
msgstr ""
#: ../libtransmission/port-forwarding.c:61
msgid "Not forwarded"
msgstr ""
#: ../libtransmission/port-forwarding.c:91 ../libtransmission/torrent.c:2060
#, c-format
msgid "State changed from \"%1$s\" to \"%2$s\""
msgstr ""
#: ../libtransmission/port-forwarding.c:181
msgid "Stopped"
msgstr ""
#. first %s is the application name
#. second %s is the version number
#: ../libtransmission/session.c:722
#, c-format
msgid "%s %s started"
msgstr ""
#: ../libtransmission/session.c:1943
#, c-format
msgid "Loaded %d torrents"
msgstr ""
#: ../libtransmission/torrent.c:525
#, c-format
msgid "Tracker warning: \"%s\""
msgstr ""
#: ../libtransmission/torrent.c:532
#, c-format
msgid "Tracker error: \"%s\""
msgstr ""
#: ../libtransmission/torrent.c:787
msgid ""
"No data found! Ensure your drives are connected or use \"Set Location\". To "
"re-download, remove the torrent and re-add it."
msgstr ""
#: ../libtransmission/torrent.c:1677
msgid "Restarted manually -- disabling its seed ratio"
msgstr ""
#: ../libtransmission/torrent.c:1829
msgid "Removing torrent"
msgstr ""
#: ../libtransmission/torrent.c:1913
msgid "Done"
msgstr ""
#: ../libtransmission/torrent.c:1916
msgid "Complete"
msgstr ""
#: ../libtransmission/upnp.c:35
msgid "Port Forwarding (UPnP)"
msgstr ""
#: ../libtransmission/upnp.c:199
#, c-format
msgid "Found Internet Gateway Device \"%s\""
msgstr ""
#: ../libtransmission/upnp.c:202
#, c-format
msgid "Local Address is \"%s\""
msgstr ""
#: ../libtransmission/upnp.c:231
#, c-format
msgid "Port %d isn't forwarded"
msgstr ""
#: ../libtransmission/upnp.c:242
#, c-format
msgid "Stopping port forwarding through \"%s\", service \"%s\""
msgstr ""
#: ../libtransmission/upnp.c:275
#, c-format
msgid ""
"Port forwarding through \"%s\", service \"%s\". (local address: %s:%d)"
msgstr ""
#: ../libtransmission/upnp.c:280
msgid "Port forwarding successful!"
msgstr ""
#: ../libtransmission/utils.c:452
msgid "Not a regular file"
msgstr ""
#: ../libtransmission/utils.c:470
msgid "Memory allocation failed"
msgstr ""
#. Node exists but isn't a folder
#: ../libtransmission/utils.c:584
#, c-format
msgid "File \"%s\" is in the way"
msgstr ""
#: ../libtransmission/verify.c:229
msgid "Verifying torrent"
msgstr ""
| {
"pile_set_name": "Github"
} |
update rhnTemplateString
set description = 'Footer for Spacewalk e-mail'
where label = 'email_footer';
update rhnTemplateString
set value = '
Account Information:
Your Spacewalk login: <login />
Your Spacewalk email address: <email-address />',
description = 'Account info lines for Spacewalk e-mail'
where label = 'email_account_info';
| {
"pile_set_name": "Github"
} |
*count* -- Count all elements in an array, or properties in an object
int count(mixed var [, int mode])~
Counts all elements in an array, or properties in an object.
For objects, if you have SPL installed, you can hook into |count| by
implementing interface Countable. The interface has exactly one method,
|count|, which returns the return value for the |count| function.
Please see the Array section of the manual for a detailed explanation of how
arrays are implemented and used in PHP.
{var} The array.
{mode} If the optional {mode} parameter is set to COUNT_RECURSIVE (or 1),
|count| will recursively count the array. This is particularly useful for
counting all the elements of a multidimensional array. |count| does not detect
infinite recursion.
Returns the number of elements in {var}, which is typically an array, since
anything else will have one element.
If {var} is not an array or an object with implemented Countable interface, 1
will be returned. There is one exception, if {var} is NULL, 0 will be
returned.
|count| may return 0 for a variable that isn't set, but it may also return 0
for a variable that has been initialized with an empty array. Use |isset| to
test if a variable is set.
Version Description 4.2.0 The optional {mode} parameter was added.
|count| example
<?php >
$a[0] = 1;
$a[1] = 3;
$a[2] = 5;
$result = count($a);
// $result == 3
$b[0] = 7;
$b[5] = 9;
$b[10] = 11;
$result = count($b);
// $result == 3
$result = count(null);
// $result == 0
$result = count(false);
// $result == 1
?>
Recursive |count| example
<?php >
$food = array('fruits' => array('orange', 'banana', 'apple'),
'veggie' => array('carrot', 'collard', 'pea'));
// recursive count
echo count($food, COUNT_RECURSIVE); // output 8
// normal count
echo count($food); // output 2
?>
|is_array| |isset| |strlen|
vim:ft=help:
| {
"pile_set_name": "Github"
} |
"""
pynipap - a Python NIPAP client library
=======================================
pynipap is a Python client library for the NIPAP IP address planning
system. It is structured as a simple ORM.
To make it easy to maintain it's quite "thin", passing many arguments
straight through to the backend. Thus, also the pynipap-specific
documentation is quite thin. For in-depth information please look at the
main :py:mod:`NIPAP API documentation <nipap.backend>`.
There are four ORM-classes:
* :class:`VRF`
* :class:`Pool`
* :class:`Prefix`
* :class:`Tag`
Each of these maps to the NIPAP objects with the same name. See the main
:py:mod:`NIPAP API documentation <nipap.backend>` for an overview of the
different object types and what they are used for.
There are also a few supporting classes:
* :class:`AuthOptions` - Authentication options.
And a bunch of exceptions:
* :class:`NipapError`
* :class:`NipapNonExistentError`
* :class:`NipapInputError`
* :class:`NipapMissingInputError`
* :class:`NipapExtraneousInputError`
* :class:`NipapNoSuchOperatorError`
* :class:`NipapValueError`
* :class:`NipapDuplicateError`
* :class:`NipapAuthError`
* :class:`NipapAuthenticationError`
* :class:`NipapAuthorizationError`
General usage
-------------
pynipap has been designed to be simple to use.
Preparations
^^^^^^^^^^^^
Make sure that pynipap is accessible in your `sys.path`, you can test it by
starting a python shell and running::
import pynipap
If that works, you are good to go!
To simplify your code slightly, you can import the individual classes into
your main namespace::
import pynipap
from pynipap import VRF, Pool, Prefix
Before you can access NIPAP you need to specify the URL to the NIPAP
XML-RPC service and the authentication options to use for your connection.
NIPAP has a authentication system which is somewhat involved, see the main
NIPAP documentation.
The URL, including the user credentials, is set in the pynipap module
variable `xmlrpc_uri` as so::
pynipap.xmlrpc_uri = "http://user:[email protected]:1337/XMLRPC"
If you want to access the API externally, from another host, update the
corresponding lines in the nipap.conf file. Here you can also change the port. ::
listen = 0.0.0.0 ; IP address to listen on.
port = 1337 ; XML-RPC listen port (change requires restart)
The minimum authentication options which we need to set is the
`authoritative_source` option, which specifies what system is accessing
NIPAP. This is logged for each query which alters the NIPAP database and
attached to each prefix which is created or edited. Well-behaved clients
are required to honor this and verify that the user really want to alter
the prefix, when trying to edit a prefix which last was edited by another
system. The :class:`AuthOptions` class is a class with a shared state,
similar to a singleton class; that is, when a first instance is created
each consecutive instances will be copies of the first one. In this way the
authentication options can be accessed from all of the pynipap classes. ::
a = AuthOptions({
'authoritative_source': 'my_fancy_nipap_client'
})
After this, we are good to go!
Accessing data
^^^^^^^^^^^^^^
To fetch data from NIPAP, a set of static methods (@classmethod) has been
defined in each of the ORM classes. They are:
* :func:`get` - Get a single object from its ID.
* :func:`list` - List objects matching a simple criteria.
* :func:`search` - Perform a full-blown search.
* :func:`smart_search` - Perform a magic search from a string.
Each of these functions return either an instance of the requested class
(:py:class:`VRF`, :class:`Pool`, :class:`Prefix`) or a list of
instances. The :func:`search` and :func:`smart_search` functions also
embeds the lists in dicts which contain search meta data.
The easiest way to get data out of NIPAP is to use the :func:`get`-method,
given that you know the ID of the object you want to fetch::
# Fetch VRF with ID 1 and print its name
vrf = VRF.get(1)
print(vrf.name)
To list all objects each object has a :func:`list`-function. ::
# list all pools
pools = Pool.list()
# print the name of the pools
for p in pools:
print(p.name)
Each of the list functions can also take a `spec`-dict as a second
argument. With the spec you can perform a simple search operation by
specifying object attribute values. ::
# List pools with a default type of 'assignment'
pools = Pool.list({ 'default_type': 'assignment' })
Performing searches
^^^^^^^^^^^^^^^^^^^
Searches are easiest when using the object's :func:`smart_search`-method::
#Returns a dict which includes search metadata and
#a 'result' : [array, of, prefix, objects]
search_result = Prefix.smart_search('127.0.0.0/8')
prefix_objects = search_result['result']
prefix_objects[0].description
prefix_objects[0].prefix
You can also send query filters. ::
#Find the prefix for Vlan 901
vlan = 901
vlan_query = { 'val1': 'vlan', 'operator': 'equals', 'val2': vlan }
vlan_901 = Prefix.smart_search('', { }, vlan_query)['result'][0]
vlan_901.vlan
The following operators can be used. ::
* 'and'
* 'or'
* 'equals_any'
* '='
* 'equals'
* '<'
* 'less'
* '<='
* 'less_or_equal'
* '>'
* 'greater'
* '>='
* 'greater_or_equal'
* 'is'
* 'is_not'
* '!='
* 'not_equals'
* 'like': '
* 'regex_match'
* 'regex_not_match'
* '>>':
* 'contains'
* '>>='
* 'contains_equals'
* '<<'
* 'contained_within'
* '<<='
* 'contained_within_equals'
Saving changes
^^^^^^^^^^^^^^
Changes made to objects are not automatically saved. To save the changes,
simply run the object's :func:`save`-method::
vrf.name = "Spam spam spam"
vrf.save()
Error handling
--------------
As is customary in Python applications, an error results in an exception
being thrown. All pynipap exceptions extend the main exception
:class:`NipapError`. A goal with the pynipap library has been to make the
XML-RPC-channel to the backend as transparent as possible, so the XML-RPC
Faults which the NIPAP server returns in case of errors are converted and
re-thrown as new exceptions which also they extend :class:`NipapError`,
for example the NipapDuplicateError which is thrown when a duplicate key
error occurs in NIPAP.
Classes
-------
"""
import sys
import logging
if sys.version_info[0] < 3:
import xmlrpclib
int = long
else:
import xmlrpc.client as xmlrpclib
__version__ = "0.29.8"
__author__ = "Kristian Larsson, Lukas Garberg"
__author_email__= "[email protected], [email protected]"
__copyright__ = "Copyright 2011, Kristian Larsson, Lukas Garberg"
__license__ = "MIT"
__status__ = "Development"
__url__ = "http://SpriteLink.github.com/NIPAP"
# This variable holds the URI to the nipap XML-RPC service which will be used.
# It must be set before the Pynipap can be used!
xmlrpc_uri = None
# Caching of objects is enabled per default but can be disabled for certain
# scenarios. Since we don't have any cache expiration time it can be useful to
# disable for long running applications.
CACHE = True
class AuthOptions:
""" A global-ish authentication option container.
Note that this essentially is a global variable. If you handle multiple
queries from different users, you need to make sure that the
AuthOptions-instances are set to the current user's.
"""
__shared_state = {}
options = None
def __init__(self, options = None):
""" Create a shared option container.
The argument 'options' must be a dict containing authentication
options.
"""
self.__dict__ = self.__shared_state
if len(self.__shared_state) == 0 and options is None:
raise NipapMissingInputError("authentication options not set")
if options is not None:
self.options = options
class XMLRPCConnection:
""" Handles a shared XML-RPC connection.
"""
__shared_state = {}
connection = None
_logger = None
def __init__(self):
""" Create XML-RPC connection.
The connection will be created to the URL set in the module
variable `xmlrpc_uri`. The instanciation will fail unless this
variable is set.
"""
if xmlrpc_uri is None:
raise NipapError('XML-RPC URI not specified')
# creating new instance
self.connection = xmlrpclib.ServerProxy(xmlrpc_uri, allow_none=True,
use_datetime=True)
self._logger = logging.getLogger(self.__class__.__name__)
class Pynipap:
""" A base class for the pynipap model classes.
All Pynipap classes which maps to data in NIPAP (:py:class:`VRF`,
:py:class:`Pool`, :py:class:`Prefix`) extends this class.
"""
_logger = None
""" Logging instance for this object.
"""
id = None
""" Internal database ID of object.
"""
def __eq__(self, other):
""" Perform test for equality.
"""
# Only possible if we have ID numbers set
if self.id is None or other.id is None:
return False
return self.id == other.id
def __init__(self, id=None):
""" Creates logger and XML-RPC-connection.
"""
self._logger = logging.getLogger(self.__class__.__name__)
self._auth_opts = AuthOptions()
self.id = id
class Tag(Pynipap):
""" A Tag.
"""
name = None
""" The Tag name
"""
@classmethod
def from_dict(cls, tag=None):
""" Create new Tag-object from dict.
Suitable for creating objects from XML-RPC data.
All available keys must exist.
"""
if tag is None:
tag = {}
l = Tag()
l.name = tag['name']
return l
@classmethod
def search(cls, query, search_opts=None):
""" Search tags.
For more information, see the backend function
:py:func:`nipap.backend.Nipap.search_tag`.
"""
if search_opts is None:
search_opts = {}
xmlrpc = XMLRPCConnection()
try:
search_result = xmlrpc.connection.search_tag(
{
'query': query,
'search_options': search_opts,
'auth': AuthOptions().options
})
except xmlrpclib.Fault as xml_fault:
raise _fault_to_exception(xml_fault)
result = dict()
result['result'] = []
result['search_options'] = search_result['search_options']
for xml_tag in search_result['result']:
result['result'].append(Tag.from_dict(xml_tag))
return result
class VRF(Pynipap):
""" A VRF.
"""
rt = None
""" The VRF RT, as a string (x:y or x.x.x.x:y).
"""
name = None
""" The name of the VRF, as a string.
"""
description = None
""" VRF description, as a string.
"""
num_prefixes_v4 = None
""" Number of IPv4 prefixes in this VRF
"""
num_prefixes_v6 = None
""" Number of IPv6 prefixes in this VRF
"""
total_addresses_v4 = None
""" Total number of IPv4 addresses in this VRF
"""
total_addresses_v6 = None
""" Total number of IPv6 addresses in this VRF
"""
used_addresses_v4 = None
""" Number of used IPv4 addresses in this VRF
"""
used_addresses_v6 = None
""" Number of used IPv6 addresses in this VRF
"""
free_addresses_v4 = None
""" Number of free IPv4 addresses in this VRF
"""
free_addresses_v6 = None
""" Number of free IPv6 addresses in this VRF
"""
def __init__(self):
Pynipap.__init__(self)
self.tags = {}
self.avps = {}
@classmethod
def list(cls, vrf=None):
""" List VRFs.
Maps to the function :py:func:`nipap.backend.Nipap.list_vrf` in the
backend. Please see the documentation for the backend function for
information regarding input arguments and return values.
"""
if vrf is None:
vrf = {}
xmlrpc = XMLRPCConnection()
try:
vrf_list = xmlrpc.connection.list_vrf(
{
'vrf': vrf,
'auth': AuthOptions().options
})
except xmlrpclib.Fault as xml_fault:
raise _fault_to_exception(xml_fault)
res = list()
for v in vrf_list:
res.append(VRF.from_dict(v))
return res
@classmethod
def from_dict(cls, parm, vrf = None):
""" Create new VRF-object from dict.
Suitable for creating objects from XML-RPC data.
All available keys must exist.
"""
if vrf is None:
vrf = VRF()
vrf.id = parm['id']
vrf.rt = parm['rt']
vrf.name = parm['name']
vrf.description = parm['description']
vrf.tags = {}
for tag_name in parm['tags']:
tag = Tag.from_dict({'name': tag_name })
vrf.tags[tag_name] = tag
vrf.avps = parm['avps']
vrf.num_prefixes_v4 = int(parm['num_prefixes_v4'])
vrf.num_prefixes_v6 = int(parm['num_prefixes_v6'])
vrf.total_addresses_v4 = int(parm['total_addresses_v4'])
vrf.total_addresses_v6 = int(parm['total_addresses_v6'])
vrf.used_addresses_v4 = int(parm['used_addresses_v4'])
vrf.used_addresses_v6 = int(parm['used_addresses_v6'])
vrf.free_addresses_v4 = int(parm['free_addresses_v4'])
vrf.free_addresses_v6 = int(parm['free_addresses_v6'])
return vrf
@classmethod
def get(cls, id):
""" Get the VRF with id 'id'.
"""
# cached?
if CACHE:
if id in _cache['VRF']:
log.debug('cache hit for VRF %d' % id)
return _cache['VRF'][id]
log.debug('cache miss for VRF %d' % id)
try:
vrf = VRF.list({ 'id': id })[0]
except IndexError:
raise NipapNonExistentError('no VRF with ID ' + str(id) + ' found')
_cache['VRF'][id] = vrf
return vrf
@classmethod
def search(cls, query, search_opts=None):
""" Search VRFs.
Maps to the function :py:func:`nipap.backend.Nipap.search_vrf` in
the backend. Please see the documentation for the backend function
for information regarding input arguments and return values.
"""
if search_opts is None:
search_opts = {}
xmlrpc = XMLRPCConnection()
try:
search_result = xmlrpc.connection.search_vrf(
{
'query': query,
'search_options': search_opts,
'auth': AuthOptions().options
})
except xmlrpclib.Fault as xml_fault:
raise _fault_to_exception(xml_fault)
result = dict()
result['result'] = []
result['search_options'] = search_result['search_options']
for v in search_result['result']:
result['result'].append(VRF.from_dict(v))
return result
@classmethod
def smart_search(cls, query_string, search_options=None, extra_query = None):
""" Perform a smart VRF search.
Maps to the function
:py:func:`nipap.backend.Nipap.smart_search_vrf` in the backend.
Please see the documentation for the backend function for
information regarding input arguments and return values.
"""
if search_options is None:
search_options = {}
xmlrpc = XMLRPCConnection()
try:
smart_result = xmlrpc.connection.smart_search_vrf(
{
'query_string': query_string,
'search_options': search_options,
'auth': AuthOptions().options,
'extra_query': extra_query
})
except xmlrpclib.Fault as xml_fault:
raise _fault_to_exception(xml_fault)
result = dict()
result['interpretation'] = smart_result['interpretation']
result['search_options'] = smart_result['search_options']
result['error'] = smart_result['error']
if 'error_message' in smart_result:
result['error_message'] = smart_result['error_message']
result['result'] = list()
for v in smart_result['result']:
result['result'].append(VRF.from_dict(v))
return result
def save(self):
""" Save changes made to object to NIPAP.
If the object represents a new VRF unknown to NIPAP (attribute `id`
is `None`) this function maps to the function
:py:func:`nipap.backend.Nipap.add_vrf` in the backend, used to
create a new VRF. Otherwise it maps to the function
:py:func:`nipap.backend.Nipap.edit_vrf` in the backend, used to
modify the VRF. Please see the documentation for the backend
functions for information regarding input arguments and return
values.
"""
xmlrpc = XMLRPCConnection()
data = {
'rt': self.rt,
'name': self.name,
'description': self.description,
'tags': [],
'avps': self.avps
}
for tag_name in self.tags:
data['tags'].append(tag_name)
if self.id is None:
# New object, create
try:
vrf = xmlrpc.connection.add_vrf(
{
'attr': data,
'auth': self._auth_opts.options
})
except xmlrpclib.Fault as xml_fault:
raise _fault_to_exception(xml_fault)
else:
# Old object, edit
try:
vrfs = xmlrpc.connection.edit_vrf(
{
'vrf': { 'id': self.id },
'attr': data,
'auth': self._auth_opts.options
})
except xmlrpclib.Fault as xml_fault:
raise _fault_to_exception(xml_fault)
if len(vrfs) != 1:
raise NipapError('VRF edit returned %d entries, should be 1.' % len(vrfs))
vrf = vrfs[0]
# Refresh object data with attributes from add/edit operation
VRF.from_dict(vrf, self)
_cache['VRF'][self.id] = self
def remove(self):
""" Remove VRF.
Maps to the function :py:func:`nipap.backend.Nipap.remove_vrf` in
the backend. Please see the documentation for the backend function
for information regarding input arguments and return values.
"""
xmlrpc = XMLRPCConnection()
try:
xmlrpc.connection.remove_vrf(
{
'vrf': { 'id': self.id },
'auth': self._auth_opts.options
})
except xmlrpclib.Fault as xml_fault:
raise _fault_to_exception(xml_fault)
if self.id in _cache['VRF']:
del(_cache['VRF'][self.id])
class Pool(Pynipap):
""" An address pool.
"""
name = None
description = None
default_type = None
ipv4_default_prefix_length = None
ipv6_default_prefix_length = None
vrf = None
member_prefixes_v4 = None
member_prefixes_v6 = None
used_prefixes_v4 = None
used_prefixes_v6 = None
free_prefixes_v4 = None
free_prefixes_v6 = None
total_prefixes_v4 = None
total_prefixes_v6 = None
total_addresses_v4 = None
total_addresses_v6 = None
used_addresses_v4 = None
used_addresses_v6 = None
free_addresses_v4 = None
free_addresses_v6 = None
def __init__(self):
Pynipap.__init__(self)
self.tags = {}
self.avps = {}
def save(self):
""" Save changes made to pool to NIPAP.
If the object represents a new pool unknown to NIPAP (attribute
`id` is `None`) this function maps to the function
:py:func:`nipap.backend.Nipap.add_pool` in the backend, used to
create a new pool. Otherwise it maps to the function
:py:func:`nipap.backend.Nipap.edit_pool` in the backend, used to
modify the pool. Please see the documentation for the backend
functions for information regarding input arguments and return
values.
"""
xmlrpc = XMLRPCConnection()
data = {
'name': self.name,
'description': self.description,
'default_type': self.default_type,
'ipv4_default_prefix_length': self.ipv4_default_prefix_length,
'ipv6_default_prefix_length': self.ipv6_default_prefix_length,
'tags': [],
'avps': self.avps
}
for tag_name in self.tags:
data['tags'].append(tag_name)
if self.id is None:
# New object, create
try:
pool = xmlrpc.connection.add_pool(
{
'attr': data,
'auth': self._auth_opts.options
})
except xmlrpclib.Fault as xml_fault:
raise _fault_to_exception(xml_fault)
else:
# Old object, edit
try:
pools = xmlrpc.connection.edit_pool(
{
'pool': { 'id': self.id },
'attr': data,
'auth': self._auth_opts.options
})
except xmlrpclib.Fault as xml_fault:
raise _fault_to_exception(xml_fault)
if len(pools) != 1:
raise NipapError('Pool edit returned %d entries, should be 1.' % len(pools))
pool = pools[0]
# Refresh object data with attributes from add/edit operation
Pool.from_dict(pool, self)
_cache['Pool'][self.id] = self
def remove(self):
""" Remove pool.
Maps to the function :py:func:`nipap.backend.Nipap.remove_pool` in
the backend. Please see the documentation for the backend function
for information regarding input arguments and return values.
"""
xmlrpc = XMLRPCConnection()
try:
xmlrpc.connection.remove_pool(
{
'pool': { 'id': self.id },
'auth': self._auth_opts.options
})
except xmlrpclib.Fault as xml_fault:
raise _fault_to_exception(xml_fault)
if self.id in _cache['Pool']:
del(_cache['Pool'][self.id])
@classmethod
def get(cls, id):
""" Get the pool with id 'id'.
"""
# cached?
if CACHE:
if id in _cache['Pool']:
log.debug('cache hit for pool %d' % id)
return _cache['Pool'][id]
log.debug('cache miss for pool %d' % id)
try:
pool = Pool.list({'id': id})[0]
except (IndexError, KeyError):
raise NipapNonExistentError('no pool with ID ' + str(id) + ' found')
_cache['Pool'][id] = pool
return pool
@classmethod
def search(cls, query, search_opts=None):
""" Search pools.
Maps to the function :py:func:`nipap.backend.Nipap.search_pool` in
the backend. Please see the documentation for the backend function
for information regarding input arguments and return values.
"""
if search_opts is None:
search_opts = {}
xmlrpc = XMLRPCConnection()
try:
search_result = xmlrpc.connection.search_pool(
{
'query': query,
'search_options': search_opts,
'auth': AuthOptions().options
})
except xmlrpclib.Fault as xml_fault:
raise _fault_to_exception(xml_fault)
result = dict()
result['result'] = []
result['search_options'] = search_result['search_options']
for pool in search_result['result']:
p = Pool.from_dict(pool)
result['result'].append(p)
return result
@classmethod
def smart_search(cls, query_string, search_options=None, extra_query = None):
""" Perform a smart pool search.
Maps to the function
:py:func:`nipap.backend.Nipap.smart_search_pool` in the backend.
Please see the documentation for the backend function for
information regarding input arguments and return values.
"""
if search_options is None:
search_options = {}
xmlrpc = XMLRPCConnection()
try:
smart_result = xmlrpc.connection.smart_search_pool(
{
'query_string': query_string,
'search_options': search_options,
'auth': AuthOptions().options,
'extra_query': extra_query
})
except xmlrpclib.Fault as xml_fault:
raise _fault_to_exception(xml_fault)
result = dict()
result['interpretation'] = smart_result['interpretation']
result['search_options'] = smart_result['search_options']
result['error'] = smart_result['error']
if 'error_message' in smart_result:
result['error_message'] = smart_result['error_message']
result['result'] = list()
for pool in smart_result['result']:
p = Pool.from_dict(pool)
result['result'].append(p)
return result
@classmethod
def from_dict(cls, parm, pool = None):
""" Create new Pool-object from dict.
Suitable for creating objects from XML-RPC data.
All available keys must exist.
"""
if pool is None:
pool = Pool()
pool.id = parm['id']
pool.name = parm['name']
pool.description = parm['description']
pool.default_type = parm['default_type']
pool.ipv4_default_prefix_length = parm['ipv4_default_prefix_length']
pool.ipv6_default_prefix_length = parm['ipv6_default_prefix_length']
for val in ('member_prefixes_v4', 'member_prefixes_v6',
'used_prefixes_v4', 'used_prefixes_v6', 'free_prefixes_v4',
'free_prefixes_v6', 'total_prefixes_v4', 'total_prefixes_v6',
'total_addresses_v4', 'total_addresses_v6', 'used_addresses_v4',
'used_addresses_v6', 'free_addresses_v4', 'free_addresses_v6'):
if parm[val] is not None:
setattr(pool, val, int(parm[val]))
pool.tags = {}
for tag_name in parm['tags']:
tag = Tag.from_dict({'name': tag_name })
pool.tags[tag_name] = tag
pool.avps = parm['avps']
# store VRF object in pool.vrf
if parm['vrf_id'] is not None:
pool.vrf = VRF.get(parm['vrf_id'])
return pool
@classmethod
def list(self, spec=None):
""" List pools.
Maps to the function :py:func:`nipap.backend.Nipap.list_pool` in
the backend. Please see the documentation for the backend function
for information regarding input arguments and return values.
"""
if spec is None:
spec = {}
xmlrpc = XMLRPCConnection()
try:
pool_list = xmlrpc.connection.list_pool(
{
'pool': spec,
'auth': AuthOptions().options
})
except xmlrpclib.Fault as xml_fault:
raise _fault_to_exception(xml_fault)
res = list()
for pool in pool_list:
p = Pool.from_dict(pool)
res.append(p)
return res
class Prefix(Pynipap):
""" A prefix.
"""
family = None
vrf = None
prefix = None
display_prefix = None
description = None
comment = None
node = None
pool = None
type = None
indent = None
country = None
external_key = None
order_id = None
customer_id = None
authoritative_source = None
alarm_priority = None
monitor = None
display = True
match = False
children = -2
vlan = None
added = None
last_modified = None
total_addresses = None
used_addreses = None
free_addreses = None
status = None
expires = None
def __init__(self):
Pynipap.__init__(self)
self.inherited_tags = {}
self.tags = {}
self.avps = {}
@classmethod
def get(cls, id):
""" Get the prefix with id 'id'.
"""
# cached?
if CACHE:
if id in _cache['Prefix']:
log.debug('cache hit for prefix %d' % id)
return _cache['Prefix'][id]
log.debug('cache miss for prefix %d' % id)
try:
prefix = Prefix.list({'id': id})[0]
except IndexError:
raise NipapNonExistentError('no prefix with ID ' + str(id) + ' found')
_cache['Prefix'][id] = prefix
return prefix
@classmethod
def find_free(cls, vrf, args):
""" Finds a free prefix.
Maps to the function
:py:func:`nipap.backend.Nipap.find_free_prefix` in the backend.
Please see the documentation for the backend function for
information regarding input arguments and return values.
"""
xmlrpc = XMLRPCConnection()
q = {
'args': args,
'auth': AuthOptions().options
}
# sanity checks
if isinstance(vrf, VRF):
q['vrf'] = { 'id': vrf.id }
elif vrf is None:
q['vrf'] = None
else:
raise NipapValueError('vrf parameter must be instance of VRF class')
# run XML-RPC query
try:
find_res = xmlrpc.connection.find_free_prefix(q)
except xmlrpclib.Fault as xml_fault:
raise _fault_to_exception(xml_fault)
pass
return find_res
@classmethod
def search(cls, query, search_opts=None):
""" Search for prefixes.
Maps to the function :py:func:`nipap.backend.Nipap.search_prefix`
in the backend. Please see the documentation for the backend
function for information regarding input arguments and return
values.
"""
if search_opts is None:
search_opts = {}
xmlrpc = XMLRPCConnection()
try:
search_result = xmlrpc.connection.search_prefix(
{
'query': query,
'search_options': search_opts,
'auth': AuthOptions().options
})
except xmlrpclib.Fault as xml_fault:
raise _fault_to_exception(xml_fault)
result = dict()
result['result'] = []
result['search_options'] = search_result['search_options']
for prefix in search_result['result']:
p = Prefix.from_dict(prefix)
result['result'].append(p)
return result
@classmethod
def smart_search(cls, query_string, search_options=None, extra_query = None):
""" Perform a smart prefix search.
Maps to the function
:py:func:`nipap.backend.Nipap.smart_search_prefix` in the backend.
Please see the documentation for the backend function for
information regarding input arguments and return values.
"""
if search_options is None:
search_options = {}
xmlrpc = XMLRPCConnection()
try:
smart_result = xmlrpc.connection.smart_search_prefix(
{
'query_string': query_string,
'search_options': search_options,
'auth': AuthOptions().options,
'extra_query': extra_query
})
except xmlrpclib.Fault as xml_fault:
raise _fault_to_exception(xml_fault)
result = dict()
result['interpretation'] = smart_result['interpretation']
result['search_options'] = smart_result['search_options']
result['error'] = smart_result['error']
if 'error_message' in smart_result:
result['error_message'] = smart_result['error_message']
result['result'] = list()
for prefix in smart_result['result']:
p = Prefix.from_dict(prefix)
result['result'].append(p)
return result
@classmethod
def list(cls, spec=None):
""" List prefixes.
Maps to the function :py:func:`nipap.backend.Nipap.list_prefix` in
the backend. Please see the documentation for the backend function
for information regarding input arguments and return values.
"""
if spec is None:
spec = {}
xmlrpc = XMLRPCConnection()
try:
pref_list = xmlrpc.connection.list_prefix(
{
'prefix': spec,
'auth': AuthOptions().options
})
except xmlrpclib.Fault as xml_fault:
raise _fault_to_exception(xml_fault)
res = list()
for pref in pref_list:
p = Prefix.from_dict(pref)
res.append(p)
return res
def save(self, args=None):
""" Save prefix to NIPAP.
If the object represents a new prefix unknown to NIPAP (attribute
`id` is `None`) this function maps to the function
:py:func:`nipap.backend.Nipap.add_prefix` in the backend, used to
create a new prefix. Otherwise it maps to the function
:py:func:`nipap.backend.Nipap.edit_prefix` in the backend, used to
modify the VRF. Please see the documentation for the backend
functions for information regarding input arguments and return
values.
"""
if args is None:
args = {}
xmlrpc = XMLRPCConnection()
data = {
'description': self.description,
'comment': self.comment,
'tags': [],
'node': self.node,
'type': self.type,
'country': self.country,
'order_id': self.order_id,
'customer_id': self.customer_id,
'external_key': self.external_key,
'alarm_priority': self.alarm_priority,
'monitor': self.monitor,
'vlan': self.vlan,
'avps': self.avps,
'expires': self.expires
}
if self.status is not None:
data['status'] = self.status
for tag_name in self.tags:
data['tags'].append(tag_name)
if self.vrf is not None:
if not isinstance(self.vrf, VRF):
raise NipapValueError("'vrf' attribute not instance of VRF class.")
data['vrf_id'] = self.vrf.id
# Prefix can be none if we are creating a new prefix
# from a pool or other prefix!
if self.prefix is not None:
data['prefix'] = self.prefix
if self.pool is None:
data['pool_id'] = None
else:
if not isinstance(self.pool, Pool):
raise NipapValueError("'pool' attribute not instance of Pool class.")
data['pool_id'] = self.pool.id
# New object, create from scratch
if self.id is None:
# format args
x_args = {}
if 'from-pool' in args:
x_args['from-pool'] = { 'id': args['from-pool'].id }
if 'family' in args:
x_args['family'] = args['family']
if 'from-prefix' in args:
x_args['from-prefix'] = args['from-prefix']
if 'prefix_length' in args:
x_args['prefix_length'] = args['prefix_length']
try:
prefix = xmlrpc.connection.add_prefix(
{
'attr': data,
'args': x_args,
'auth': self._auth_opts.options
})
except xmlrpclib.Fault as xml_fault:
raise _fault_to_exception(xml_fault)
# Old object, edit
else:
# Add authoritative source to data
data['authoritative_source'] = self.authoritative_source
try:
# save
prefixes = xmlrpc.connection.edit_prefix(
{
'prefix': { 'id': self.id },
'attr': data,
'auth': self._auth_opts.options
})
except xmlrpclib.Fault as xml_fault:
raise _fault_to_exception(xml_fault)
if len(prefixes) != 1:
raise NipapError('Prefix edit returned %d entries, should be 1.' % len(prefixes))
prefix = prefixes[0]
# Refresh object data with attributes from add/edit operation
Prefix.from_dict(prefix, self)
# update cache
_cache['Prefix'][self.id] = self
if self.pool is not None:
if self.pool.id in _cache['Pool']:
del _cache['Pool'][self.pool.id]
def remove(self, recursive = False):
""" Remove the prefix.
Maps to the function :py:func:`nipap.backend.Nipap.remove_prefix`
in the backend. Please see the documentation for the backend
function for information regarding input arguments and return
values.
"""
xmlrpc = XMLRPCConnection()
try:
xmlrpc.connection.remove_prefix(
{
'prefix': { 'id': self.id },
'recursive': recursive,
'auth': self._auth_opts.options
})
except xmlrpclib.Fault as xml_fault:
raise _fault_to_exception(xml_fault)
# update cache
if self.id in _cache['Prefix']:
del(_cache['Prefix'][self.id])
if self.pool is not None:
if self.pool.id in _cache['Pool']:
del _cache['Pool'][self.pool.id]
@classmethod
def from_dict(cls, pref, prefix = None):
""" Create a Prefix object from a dict.
Suitable for creating Prefix objects from XML-RPC input.
"""
if prefix is None:
prefix = Prefix()
prefix.id = pref['id']
if pref['vrf_id'] is not None: # VRF is not mandatory
prefix.vrf = VRF.get(pref['vrf_id'])
prefix.family = pref['family']
prefix.prefix = pref['prefix']
prefix.display_prefix = pref['display_prefix']
prefix.description = pref['description']
prefix.comment = pref['comment']
prefix.node = pref['node']
if pref['pool_id'] is not None: # Pool is not mandatory
prefix.pool = Pool.get(pref['pool_id'])
prefix.type = pref['type']
prefix.indent = pref['indent']
prefix.country = pref['country']
prefix.order_id = pref['order_id']
prefix.customer_id = pref['customer_id']
prefix.external_key = pref['external_key']
prefix.authoritative_source = pref['authoritative_source']
prefix.alarm_priority = pref['alarm_priority']
prefix.monitor = pref['monitor']
prefix.vlan = pref['vlan']
prefix.added = pref['added']
prefix.last_modified = pref['last_modified']
prefix.total_addresses = int(pref['total_addresses'])
prefix.used_addresses = int(pref['used_addresses'])
prefix.free_addresses = int(pref['free_addresses'])
prefix.status = pref['status']
prefix.avps = pref['avps']
prefix.expires = pref['expires']
prefix.inherited_tags = {}
for tag_name in pref['inherited_tags']:
tag = Tag.from_dict({'name': tag_name })
prefix.inherited_tags[tag_name] = tag
prefix.tags = {}
for tag_name in pref['tags']:
tag = Tag.from_dict({'name': tag_name })
prefix.tags[tag_name] = tag
if 'match' in pref:
prefix.match = pref['match']
if 'display' in pref:
prefix.display = pref['display']
if 'children' in pref:
prefix.children = pref['children']
return prefix
def nipapd_version():
""" Get version of nipapd we're connected to.
Maps to the function :py:func:`nipap.xmlrpc.NipapXMLRPC.version` in the
XML-RPC API. Please see the documentation for the XML-RPC function for
information regarding the return value.
"""
xmlrpc = XMLRPCConnection()
try:
return xmlrpc.connection.version(
{
'auth': AuthOptions().options
})
except xmlrpclib.Fault as xml_fault:
raise _fault_to_exception(xml_fault)
def nipap_db_version():
""" Get schema version of database we're connected to.
Maps to the function :py:func:`nipap.backend.Nipap._get_db_version` in
the backend. Please see the documentation for the backend function for
information regarding the return value.
"""
xmlrpc = XMLRPCConnection()
try:
return xmlrpc.connection.db_version(
{
'auth': AuthOptions().options
})
except xmlrpclib.Fault as xml_fault:
raise _fault_to_exception(xml_fault)
#
# Define exceptions
#
class NipapError(Exception):
""" A generic NIPAP model exception.
All errors thrown from the NIPAP model extends this exception.
"""
pass
class NipapNonExistentError(NipapError):
""" Thrown when something can not be found.
For example when a given ID can not be found in the NIPAP database.
"""
class NipapInputError(NipapError):
""" Something wrong with the input we received
A general case.
"""
pass
class NipapMissingInputError(NipapInputError):
""" Missing input
Most input is passed in dicts, this could mean a missing key in a dict.
"""
pass
class NipapExtraneousInputError(NipapInputError):
""" Extraneous input
Most input is passed in dicts, this could mean an unknown key in a dict.
"""
pass
class NipapNoSuchOperatorError(NipapInputError):
""" A non existent operator was specified.
"""
pass
class NipapValueError(NipapError):
""" Something wrong with a value we have
For example, trying to send an integer when an IP address is expected.
"""
pass
class NipapDuplicateError(NipapError):
""" A duplicate entry was encountered
"""
pass
class NipapAuthError(NipapError):
""" General NIPAP AAA error
"""
pass
class NipapAuthenticationError(NipapAuthError):
""" Authentication failed.
"""
pass
class NipapAuthorizationError(NipapAuthError):
""" Authorization failed.
"""
pass
#
# GLOBAL STUFF
#
# Simple object cache
# TODO: fix some kind of timeout
_cache = {
'Pool': {},
'Prefix': {},
'VRF': {}
}
# Map from XML-RPC Fault codes to Exception classes
_fault_to_exception_map = {
1000: NipapError,
1100: NipapInputError,
1110: NipapMissingInputError,
1120: NipapExtraneousInputError,
1200: NipapValueError,
1300: NipapNonExistentError,
1400: NipapDuplicateError,
1500: NipapAuthError,
1510: NipapAuthenticationError,
1520: NipapAuthorizationError
}
log = logging.getLogger("Pynipap")
def _fault_to_exception(f):
""" Converts XML-RPC Fault objects to Pynipap-exceptions.
TODO: Is this one neccesary? Can be done inline...
"""
e = _fault_to_exception_map.get(f.faultCode)
if e is None:
e = NipapError
return e(f.faultString)
| {
"pile_set_name": "Github"
} |
/*
* Copyright © 2014 - 2020 Leipzig University (Database Research Group)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.gradoop.flink.model.impl.operators.split;
import org.gradoop.common.model.impl.pojo.EPGMVertex;
import org.gradoop.common.model.impl.properties.PropertyValue;
import org.gradoop.flink.model.GradoopFlinkTestBase;
import org.gradoop.flink.model.impl.epgm.GraphCollection;
import org.gradoop.flink.model.impl.epgm.LogicalGraph;
import org.gradoop.flink.util.FlinkAsciiGraphLoader;
import org.junit.Test;
import java.util.ArrayList;
import java.util.List;
public class SplitTest extends GradoopFlinkTestBase {
private static List<PropertyValue> getSplitValues(EPGMVertex v) {
String key1 = "key1";
String key2 = "key2";
List<PropertyValue> valueList = new ArrayList<>();
if (v.hasProperty(key1)) {
valueList.add(v.getPropertyValue(key1));
}
if (v.hasProperty(key2)) {
valueList.add(v.getPropertyValue(key2));
}
return valueList;
}
@Test
public void testSplit() throws Exception {
FlinkAsciiGraphLoader loader = getLoaderFromString(
"input[" +
"(v0 {key1 : 0})" +
"(v1 {key1 : 1})" +
"(v2 {key1 : 1})" +
"(v3 {key1 : 0})" +
"(v1)-[e1]->(v2)" +
"(v3)-[e2]->(v0)" +
"(v2)-[e3]->(v0)" +
"]" +
"graph1[" +
"(v1)-[e1]->(v2)" +
"]" +
"graph2[" +
"(v3)-[e2]->(v0)" +
"]"
);
LogicalGraph input = loader.getLogicalGraphByVariable("input");
GraphCollection result =
input.callForCollection(new Split(SplitTest::getSplitValues));
collectAndAssertTrue(result.equalsByGraphElementIds(
loader.getGraphCollectionByVariables("graph1", "graph2")));
}
@Test
public void testSplit2() throws Exception {
FlinkAsciiGraphLoader loader =
getLoaderFromString("" +
"input[" +
"(v0 {key1 : 0})" +
"(v1 {key1 : 1})" +
"(v2 {key1 : 1})" +
"(v3 {key1 : 0})" +
"(v4 {key1 : 2})" +
"(v5 {key1 : 2})" +
"(v1)-[e1]->(v2)" +
"(v3)-[e2]->(v0)" +
"(v2)-[e3]->(v0)" +
"]" +
"graph1[" +
"(v1)-[e1]->(v2)" +
"]" +
"graph2[" +
"(v3)-[e2]->(v0)" +
"]" +
"graph3[" +
"(v4)" +
"(v5)" +
"]"
);
LogicalGraph input = loader.getLogicalGraphByVariable("input");
GraphCollection result = input
.callForCollection(new Split(SplitTest::getSplitValues));
GraphCollection expectation = loader.getGraphCollectionByVariables(
"graph1", "graph2", "graph3");
collectAndAssertTrue(result.equalsByGraphElementIds(expectation));
}
@Test
public void testSplitWithMultipleKeys() throws Exception {
FlinkAsciiGraphLoader loader =
getLoaderFromString("" +
"input[" +
"(v0 {key1 : 0})" +
"(v1 {key1 : 1})" +
"(v2 {key1 : 1, key2 : 0})" +
"(v3 {key1 : 0})" +
"(v1)-[e1]->(v2)" +
"(v3)-[e2]->(v0)" +
"(v2)-[e3]->(v0)" +
"]" +
"graph1[" +
"(v1)-[e1]->(v2)" +
"]" +
"graph2[" +
"(v2)-[e3]->(v0)" +
"(v3)-[e2]->(v0)" +
"]"
);
LogicalGraph input = loader.getLogicalGraphByVariable("input");
GraphCollection result = input
.callForCollection(new Split(SplitTest::getSplitValues));
collectAndAssertTrue(result.equalsByGraphElementIds(
loader.getGraphCollectionByVariables("graph1", "graph2")));
}
@Test
public void testSplitWithSingleResultGraph() throws Exception {
FlinkAsciiGraphLoader loader =
getLoaderFromString("" +
"g1:Persons [" +
"(v0:Person {id : 0, author : \"value0\"})" +
"(v1:Person {id : 0, author : \"value1\"})" +
"(v2:Person {id : 0, author : \"value2\"})" +
"(v3:Person {id : 0, author : \"value3\"})" +
"(v0)-[e0:sameAs {id : 0, sim : \"0.91\"}]->(v1)" +
"(v0)-[e1:sameAs {id : 1, sim : \"0.3\"}]->(v2)" +
"(v2)-[e2:sameAs {id : 2, sim : \"0.1\"}]->(v1)" +
"(v2)-[e3:sameAs {id : 3, sim : \"0.99\"}]->(v3)" +
"]" +
"g2 [" +
"(v0)-[e0:sameAs {id : 0, sim : \"0.91\"}]->(v1)" +
"(v0)-[e1:sameAs {id : 1, sim : \"0.3\"}]->(v2)" +
"(v2)-[e2:sameAs {id : 2, sim : \"0.1\"}]->(v1)" +
"(v2)-[e3:sameAs {id : 3, sim : \"0.99\"}]->(v3)" +
"]"
);
LogicalGraph input = loader.getLogicalGraphByVariable("g1");
GraphCollection result = input.splitBy("id");
collectAndAssertTrue(result.equalsByGraphElementIds(
loader.getGraphCollectionByVariables("g2")));
collectAndAssertTrue(result.equalsByGraphElementData(
loader.getGraphCollectionByVariables("g2")));
}
}
| {
"pile_set_name": "Github"
} |
/*
* Copyright (c) 2010-2014 Evolveum and contributors
*
* This work is dual-licensed under the Apache License 2.0
* and European Union Public License. See LICENSE file for details.
*/
package com.evolveum.midpoint.schema.processor;
import com.evolveum.midpoint.prism.ComplexTypeDefinition;
import com.evolveum.midpoint.prism.PrismContainerValue;
import com.evolveum.midpoint.prism.PrismContext;
import com.evolveum.midpoint.prism.PrismProperty;
import com.evolveum.midpoint.prism.impl.schema.PrismSchemaImpl;
import com.evolveum.midpoint.schema.constants.SchemaConstants;
import com.evolveum.midpoint.util.DOMUtil;
import com.evolveum.midpoint.util.exception.SchemaException;
import com.evolveum.midpoint.xml.ns._public.common.common_3.ConnectorType;
import com.evolveum.midpoint.xml.ns._public.common.common_3.ExtensionType;
import org.w3c.dom.Element;
import javax.xml.namespace.QName;
import java.util.Collection;
/**
* @author semancik
* @author mederly
*
*/
public class ConnectorSchemaImpl extends PrismSchemaImpl implements ConnectorSchema {
private String usualNamespacePrefix;
public ConnectorSchemaImpl(String namespace, PrismContext prismContext) {
super(namespace, prismContext);
}
private ConnectorSchemaImpl(Element element, String shortDesc, PrismContext prismContext) throws SchemaException {
super(DOMUtil.getSchemaTargetNamespace(element), prismContext);
parseThis(element, true, shortDesc, prismContext);
}
public static ConnectorSchemaImpl parse(Element element, String shortDesc, PrismContext prismContext) throws SchemaException {
return new ConnectorSchemaImpl(element, shortDesc, prismContext);
}
public static String retrieveUsualNamespacePrefix(ConnectorType connectorType) {
if (connectorType.getExtension() != null) {
PrismContainerValue<ExtensionType> ext = connectorType.getExtension().asPrismContainerValue();
PrismProperty<String> prefixProp = ext.findProperty(SchemaConstants.ICF_CONNECTOR_USUAL_NAMESPACE_PREFIX);
if (prefixProp != null) {
return prefixProp.getRealValue();
}
}
return null;
}
@Override
public Collection<ObjectClassComplexTypeDefinition> getObjectClassDefinitions() {
return getDefinitions(ObjectClassComplexTypeDefinition.class);
}
// /**
// * Creates a new resource object definition and adds it to the schema.
// *
// * This is a preferred way how to create definition in the schema.
// *
// * @param localTypeName
// * type name "relative" to schema namespace
// * @return new resource object definition
// */
// public ObjectClassComplexTypeDefinition createObjectClassDefinition(String localTypeName) {
// QName typeName = new QName(getNamespace(), localTypeName);
// return createObjectClassDefinition(typeName);
// }
// /**
// * Creates a new resource object definition and adds it to the schema.
// *
// * This is a preferred way how to create definition in the schema.
// *
// * @param typeName
// * type QName
// * @return new resource object definition
// */
// public ObjectClassComplexTypeDefinition createObjectClassDefinition(QName typeName) {
// ObjectClassComplexTypeDefinition cTypeDef = new ObjectClassComplexTypeDefinitionImpl(typeName, getPrismContext());
// add(cTypeDef);
// return cTypeDef;
// }
@Override
public ObjectClassComplexTypeDefinition findObjectClassDefinition(QName qName) {
ComplexTypeDefinition complexTypeDefinition = findComplexTypeDefinitionByType(qName);
if (complexTypeDefinition == null) {
return null;
}
if (complexTypeDefinition instanceof ObjectClassComplexTypeDefinition) {
return (ObjectClassComplexTypeDefinition)complexTypeDefinition;
} else {
throw new IllegalStateException("Expected the definition "+qName+" to be of type "+
ObjectClassComplexTypeDefinition.class+" but it was "+complexTypeDefinition.getClass());
}
}
public void setUsualNamespacePrefix(String usualNamespacePrefix) {
this.usualNamespacePrefix = usualNamespacePrefix;
}
@Override
public String getUsualNamespacePrefix() {
return usualNamespacePrefix;
}
}
| {
"pile_set_name": "Github"
} |
# RT-Thread building script for component
from building import *
cwd = GetCurrentDir()
src = Glob('*.c') + Glob('*.cpp')
CPPPATH = [cwd]
group = DefineGroup('libc', src,
depend = ['RT_USING_LIBC', 'RT_USING_POSIX', 'RT_USING_POSIX_TERMIOS'],
CPPPATH = CPPPATH)
Return('group')
| {
"pile_set_name": "Github"
} |
import hudson.model.AbstractProject
import hudson.plugins.git.GitSCM
import hudson.plugins.git.extensions.impl.*
Jenkins.instance.getAllItems(AbstractProject.class)
.findAll { job -> job.isBuildable()}
.findAll { job -> job.scm != null && job.scm instanceof GitSCM}
.each { project ->
scm = project.scm
cloneOption = scm.extensions.find {it instanceof CloneOption}
if (!cloneOption) {
scm.extensions.add(new CloneOption(true, false, "", 10))
} else {
scm.extensions.remove(cloneOption)
scm.extensions.add(new CloneOption(true, cloneOption.noTags, cloneOption.reference, cloneOption.timeout))
}
project.save()
}
null | {
"pile_set_name": "Github"
} |
package jiguang.chat.utils;
import java.util.Comparator;
import cn.jpush.im.android.api.model.Conversation;
public class SortConvList implements Comparator<Conversation> {
@Override
public int compare(Conversation o, Conversation o2) {
if (o.getLastMsgDate() > o2.getLastMsgDate()) {
return -1;
} else if (o.getLastMsgDate() < o2.getLastMsgDate()) {
return 1;
}
return 0;
}
}
| {
"pile_set_name": "Github"
} |
#ifndef BOOST_THREAD_PTHREAD_THREAD_DATA_HPP
#define BOOST_THREAD_PTHREAD_THREAD_DATA_HPP
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// (C) Copyright 2007 Anthony Williams
// (C) Copyright 2011-2012 Vicente J. Botet Escriba
#include <boost/thread/detail/config.hpp>
#include <boost/thread/exceptions.hpp>
#include <boost/thread/lock_guard.hpp>
#include <boost/thread/lock_types.hpp>
#include <boost/thread/mutex.hpp>
#include <boost/thread/pthread/condition_variable_fwd.hpp>
#include <boost/shared_ptr.hpp>
#include <boost/enable_shared_from_this.hpp>
#include <boost/assert.hpp>
#ifdef BOOST_THREAD_USES_CHRONO
#include <boost/chrono/system_clocks.hpp>
#endif
#include <map>
#include <vector>
#include <utility>
#if defined(__ANDROID__)
# ifndef PAGE_SIZE
# define PAGE_SIZE 4096
# endif
#endif
#include <pthread.h>
#include <unistd.h>
#include <boost/config/abi_prefix.hpp>
namespace boost
{
class thread_attributes {
public:
thread_attributes() BOOST_NOEXCEPT {
int res = pthread_attr_init(&val_);
BOOST_VERIFY(!res && "pthread_attr_init failed");
}
~thread_attributes() {
int res = pthread_attr_destroy(&val_);
BOOST_VERIFY(!res && "pthread_attr_destroy failed");
}
// stack
void set_stack_size(std::size_t size) BOOST_NOEXCEPT {
if (size==0) return;
std::size_t page_size = getpagesize();
#ifdef PTHREAD_STACK_MIN
if (size<PTHREAD_STACK_MIN) size=PTHREAD_STACK_MIN;
#endif
size = ((size+page_size-1)/page_size)*page_size;
int res = pthread_attr_setstacksize(&val_, size);
BOOST_VERIFY(!res && "pthread_attr_setstacksize failed");
}
std::size_t get_stack_size() const BOOST_NOEXCEPT {
std::size_t size;
int res = pthread_attr_getstacksize(&val_, &size);
BOOST_VERIFY(!res && "pthread_attr_getstacksize failed");
return size;
}
#define BOOST_THREAD_DEFINES_THREAD_ATTRIBUTES_NATIVE_HANDLE
typedef pthread_attr_t native_handle_type;
native_handle_type* native_handle() BOOST_NOEXCEPT {
return &val_;
}
const native_handle_type* native_handle() const BOOST_NOEXCEPT {
return &val_;
}
private:
pthread_attr_t val_;
};
class thread;
namespace detail
{
struct shared_state_base;
struct tss_cleanup_function;
struct thread_exit_callback_node;
struct tss_data_node
{
boost::shared_ptr<boost::detail::tss_cleanup_function> func;
void* value;
tss_data_node(boost::shared_ptr<boost::detail::tss_cleanup_function> func_,
void* value_):
func(func_),value(value_)
{}
};
struct thread_data_base;
typedef boost::shared_ptr<thread_data_base> thread_data_ptr;
struct BOOST_THREAD_DECL thread_data_base:
enable_shared_from_this<thread_data_base>
{
thread_data_ptr self;
pthread_t thread_handle;
boost::mutex data_mutex;
boost::condition_variable done_condition;
boost::mutex sleep_mutex;
boost::condition_variable sleep_condition;
bool done;
bool join_started;
bool joined;
boost::detail::thread_exit_callback_node* thread_exit_callbacks;
std::map<void const*,boost::detail::tss_data_node> tss_data;
//#if defined BOOST_THREAD_PROVIDES_INTERRUPTIONS
// These data must be at the end so that the access to the other fields doesn't change
// when BOOST_THREAD_PROVIDES_INTERRUPTIONS is defined.
// Another option is to have them always
pthread_mutex_t* cond_mutex;
pthread_cond_t* current_cond;
//#endif
typedef std::vector<std::pair<condition_variable*, mutex*>
//, hidden_allocator<std::pair<condition_variable*, mutex*> >
> notify_list_t;
notify_list_t notify;
typedef std::vector<shared_ptr<shared_state_base> > async_states_t;
async_states_t async_states_;
//#if defined BOOST_THREAD_PROVIDES_INTERRUPTIONS
// These data must be at the end so that the access to the other fields doesn't change
// when BOOST_THREAD_PROVIDES_INTERRUPTIONS is defined.
// Another option is to have them always
bool interrupt_enabled;
bool interrupt_requested;
//#endif
thread_data_base():
thread_handle(0),
done(false),join_started(false),joined(false),
thread_exit_callbacks(0),
//#if defined BOOST_THREAD_PROVIDES_INTERRUPTIONS
cond_mutex(0),
current_cond(0),
//#endif
notify(),
async_states_()
//#if defined BOOST_THREAD_PROVIDES_INTERRUPTIONS
, interrupt_enabled(true)
, interrupt_requested(false)
//#endif
{}
virtual ~thread_data_base();
typedef pthread_t native_handle_type;
virtual void run()=0;
virtual void notify_all_at_thread_exit(condition_variable* cv, mutex* m)
{
notify.push_back(std::pair<condition_variable*, mutex*>(cv, m));
}
void make_ready_at_thread_exit(shared_ptr<shared_state_base> as)
{
async_states_.push_back(as);
}
};
BOOST_THREAD_DECL thread_data_base* get_current_thread_data();
#if defined BOOST_THREAD_PROVIDES_INTERRUPTIONS
class interruption_checker
{
thread_data_base* const thread_info;
pthread_mutex_t* m;
bool set;
void check_for_interruption()
{
#ifndef BOOST_NO_EXCEPTIONS
if(thread_info->interrupt_requested)
{
thread_info->interrupt_requested=false;
throw thread_interrupted(); // BOOST_NO_EXCEPTIONS protected
}
#endif
}
void operator=(interruption_checker&);
public:
explicit interruption_checker(pthread_mutex_t* cond_mutex,pthread_cond_t* cond):
thread_info(detail::get_current_thread_data()),m(cond_mutex),
set(thread_info && thread_info->interrupt_enabled)
{
if(set)
{
lock_guard<mutex> guard(thread_info->data_mutex);
check_for_interruption();
thread_info->cond_mutex=cond_mutex;
thread_info->current_cond=cond;
BOOST_VERIFY(!pthread_mutex_lock(m));
}
else
{
BOOST_VERIFY(!pthread_mutex_lock(m));
}
}
~interruption_checker()
{
if(set)
{
BOOST_VERIFY(!pthread_mutex_unlock(m));
lock_guard<mutex> guard(thread_info->data_mutex);
thread_info->cond_mutex=NULL;
thread_info->current_cond=NULL;
}
else
{
BOOST_VERIFY(!pthread_mutex_unlock(m));
}
}
};
#endif
}
namespace this_thread
{
namespace hidden
{
void BOOST_THREAD_DECL sleep_for(const timespec& ts);
void BOOST_THREAD_DECL sleep_until(const timespec& ts);
}
#ifdef BOOST_THREAD_USES_CHRONO
#ifdef BOOST_THREAD_SLEEP_FOR_IS_STEADY
inline
void BOOST_SYMBOL_VISIBLE sleep_for(const chrono::nanoseconds& ns)
{
return boost::this_thread::hidden::sleep_for(boost::detail::to_timespec(ns));
}
#endif
#endif // BOOST_THREAD_USES_CHRONO
namespace no_interruption_point
{
namespace hidden
{
void BOOST_THREAD_DECL sleep_for(const timespec& ts);
void BOOST_THREAD_DECL sleep_until(const timespec& ts);
}
#ifdef BOOST_THREAD_USES_CHRONO
#ifdef BOOST_THREAD_SLEEP_FOR_IS_STEADY
inline
void BOOST_SYMBOL_VISIBLE sleep_for(const chrono::nanoseconds& ns)
{
return boost::this_thread::no_interruption_point::hidden::sleep_for(boost::detail::to_timespec(ns));
}
#endif
#endif // BOOST_THREAD_USES_CHRONO
} // no_interruption_point
void BOOST_THREAD_DECL yield() BOOST_NOEXCEPT;
#if defined BOOST_THREAD_USES_DATETIME
#ifdef __DECXXX
/// Workaround of DECCXX issue of incorrect template substitution
template<>
#endif
inline void sleep(system_time const& abs_time)
{
return boost::this_thread::hidden::sleep_until(boost::detail::to_timespec(abs_time));
}
template<typename TimeDuration>
inline BOOST_SYMBOL_VISIBLE void sleep(TimeDuration const& rel_time)
{
this_thread::sleep(get_system_time()+rel_time);
}
#endif // BOOST_THREAD_USES_DATETIME
} // this_thread
}
#include <boost/config/abi_suffix.hpp>
#endif
| {
"pile_set_name": "Github"
} |
/*
* Copyright 2019 dialog LLC <[email protected]>
*/
@import '../../styles/variables.css';
:root {
--sidebar-search-result-font-size: var(--default-font-family);
--sidebar-search-result-header-background: #dcdcdc;
--sidebar-search-result-header-font-size: 13px;
--sidebar-search-result-header-color: #000;
--sidebar-search-result-text-color: #000;
}
/* Styles */
.container {
all: initial;
font-family: var(--sidebar-search-result-font-size);
position: relative;
flex: 1 1 auto;
overflow: auto;
@mixin flex-fix;
}
.spinnerWrapper {
padding: calc(var(--default-padding-large) * 2.5) 0;
text-align: center;
}
.spinner {
display: inline-block;
}
.text {
color: var(--sidebar-search-result-text-color);
padding: calc(var(--default-padding-large) * 2) var(--default-padding);
text-align: center;
& b {
font-weight: 500;
}
}
.textEmoji {
display: inline-block;
margin-bottom: 1rem;
}
.error {
display: block;
text-align: center;
& b {
font-weight: 500;
}
}
.header {
background-color: var(--sidebar-search-result-header-background);
color: var(--sidebar-search-result-header-color);
padding: 6px 14px;
line-height: 20px;
font-size: var(--sidebar-search-result-header-font-size);
}
| {
"pile_set_name": "Github"
} |
// Learning Processing
// Daniel Shiffman
// http://www.learningprocessing.com
// Exercise 14-2: Run any Processing sketch in JAVA2D,
// then switch to P2D and P3D. Notice any difference?
| {
"pile_set_name": "Github"
} |
HTTP/1.1 200 OK
Connection: keep-alive
Server: Rapidoid
Date: XXXXX GMT
Content-Type: application/json
Content-Length: 63
{"address":"127.0.0.1","contextPath":"","home":"/","port":8080} | {
"pile_set_name": "Github"
} |
.TH "NPM\-INDEX" "7" "August 2018" "" ""
.SH "NAME"
\fBnpm-index\fR \- Index of all npm documentation
.SS npm help README
.P
a JavaScript package manager
.SH Command Line Documentation
.P
Using npm on the command line
.SS npm help npm
.P
javascript package manager
.SS npm help access
.P
Set access level on published packages
.SS npm help adduser
.P
Add a registry user account
.SS npm help audit
.P
Run a security audit
.SS npm help bin
.P
Display npm bin folder
.SS npm help bugs
.P
Bugs for a package in a web browser maybe
.SS npm help build
.P
Build a package
.SS npm help bundle
.P
REMOVED
.SS npm help cache
.P
Manipulates packages cache
.SS npm help ci
.P
Install a project with a clean slate
.SS npm help completion
.P
Tab Completion for npm
.SS npm help config
.P
Manage the npm configuration files
.SS npm help dedupe
.P
Reduce duplication
.SS npm help deprecate
.P
Deprecate a version of a package
.SS npm help dist\-tag
.P
Modify package distribution tags
.SS npm help docs
.P
Docs for a package in a web browser maybe
.SS npm help doctor
.P
Check your environments
.SS npm help edit
.P
Edit an installed package
.SS npm help explore
.P
Browse an installed package
.SS npm help help\-search
.P
Search npm help documentation
.SS npm help help
.P
Get help on npm
.SS npm help hook
.P
Manage registry hooks
.SS npm help init
.P
create a package\.json file
.SS npm help install\-ci\-test
.P
Install a project with a clean slate and run tests
.SS npm help install\-test
.P
Install package(s) and run tests
.SS npm help install
.P
Install a package
.SS npm help link
.P
Symlink a package folder
.SS npm help logout
.P
Log out of the registry
.SS npm help ls
.P
List installed packages
.SS npm help outdated
.P
Check for outdated packages
.SS npm help owner
.P
Manage package owners
.SS npm help pack
.P
Create a tarball from a package
.SS npm help ping
.P
Ping npm registry
.SS npm help prefix
.P
Display prefix
.SS npm help profile
.P
Change settings on your registry profile
.SS npm help prune
.P
Remove extraneous packages
.SS npm help publish
.P
Publish a package
.SS npm help rebuild
.P
Rebuild a package
.SS npm help repo
.P
Open package repository page in the browser
.SS npm help restart
.P
Restart a package
.SS npm help root
.P
Display npm root
.SS npm help run\-script
.P
Run arbitrary package scripts
.SS npm help search
.P
Search for packages
.SS npm help shrinkwrap
.P
Lock down dependency versions for publication
.SS npm help star
.P
Mark your favorite packages
.SS npm help stars
.P
View packages marked as favorites
.SS npm help start
.P
Start a package
.SS npm help stop
.P
Stop a package
.SS npm help team
.P
Manage organization teams and team memberships
.SS npm help test
.P
Test a package
.SS npm help token
.P
Manage your authentication tokens
.SS npm help uninstall
.P
Remove a package
.SS npm help unpublish
.P
Remove a package from the registry
.SS npm help update
.P
Update a package
.SS npm help version
.P
Bump a package version
.SS npm help view
.P
View registry info
.SS npm help whoami
.P
Display npm username
.SH API Documentation
.P
Using npm in your Node programs
.SH Files
.P
File system structures npm uses
.SS npm help 5 folders
.P
Folder Structures Used by npm
.SS npm help 5 package\-locks
.P
An explanation of npm lockfiles
.SS npm help 5 shrinkwrap\.json
.P
A publishable lockfile
.SS npm help 5 npmrc
.P
The npm config files
.SS npm help 5 package\-lock\.json
.P
A manifestation of the manifest
.SS npm help 5 package\.json
.P
Specifics of npm's package\.json handling
.SH Misc
.P
Various other bits and bobs
.SS npm help 7 coding\-style
.P
npm's "funny" coding style
.SS npm help 7 config
.P
More than you probably want to know about npm configuration
.SS npm help 7 developers
.P
Developer Guide
.SS npm help 7 disputes
.P
Handling Module Name Disputes
.SS npm help 7 index
.P
Index of all npm documentation
.SS npm help 7 orgs
.P
Working with Teams & Orgs
.SS npm help 7 registry
.P
The JavaScript Package Registry
.SS npm help 7 scope
.P
Scoped packages
.SS npm help 7 scripts
.P
How npm handles the "scripts" field
.SS npm help 7 removing\-npm
.P
Cleaning the Slate
.SS npm help 7 semver
.P
The semantic versioner for npm
| {
"pile_set_name": "Github"
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.