text
stringlengths 2
99.9k
| meta
dict |
---|---|
// RUN: %clang_cc1 -std=c++1y %s -verify
static_assert(0b1001 == 9, "");
using I = int;
using I = decltype(0b101001);
using ULL = unsigned long long;
using ULL = decltype(0b10101001ULL);
constexpr unsigned long long operator""_foo(unsigned long long n) {
return n * 2;
}
static_assert(0b10001111_foo == 286, "");
int k1 = 0b1234; // expected-error {{invalid digit '2' in binary constant}}
// FIXME: If we ever need to support a standard suffix starting with [a-f],
// we'll need to rework our binary literal parsing rules.
int k2 = 0b10010f; // expected-error {{invalid digit 'f' in binary constant}}
int k3 = 0b10010g; // expected-error {{invalid suffix 'g' on integer constant}}
int k4 = 0b; // expected-error {{invalid digit 'b' in octal constant}}
| {
"pile_set_name": "Github"
} |
import { defineMessages } from 'react-intl';
export const remoteSelectTranslations = defineMessages({
never: {
id: 'Developer.RemoteFrequency.Select.Never',
defaultMessage: 'No'
},
occasionally: {
id: 'Developer.RemoteFrequency.Select.Occasionally',
defaultMessage: 'Occasionally'
},
regularly: {
id: 'Developer.RemoteFrequency.Select.Regularly',
defaultMessage: 'Regularly'
},
fullTime: {
id: 'Developer.RemoteFrequency.Select.FullTime',
defaultMessage: 'Full-time'
},
others: {
id: 'Developer.RemoteFrequency.Select.Other',
defaultMessage: 'Other'
}
});
export const remoteDisplayTranslations = defineMessages({
never: {
id: 'Developer.RemoteFrequency.Display.never',
defaultMessage: "I'm not interested by remote work"
},
occasionally: {
id: 'Developer.RemoteFrequency.Display.occasionally',
defaultMessage: "I'm interested by occasional remote work (a few times a month)"
},
regularly: {
id: 'Developer.RemoteFrequency.Display.regularly',
defaultMessage: "I'm interested by regular remote work (a few times a week)"
},
fullTime: {
id: 'Developer.RemoteFrequency.Display.fullTime',
defaultMessage: "I'm interested by full-time remote work"
},
others: {
id: 'Developer.RemoteFrequency.Display.other',
defaultMessage: 'Autres'
}
});
| {
"pile_set_name": "Github"
} |
fileFormatVersion: 2
guid: 2fb5a2cdb138579498eb20d8b7818ad8
timeCreated: 1455373898
licenseType: Store
MonoImporter:
serializedVersion: 2
defaultReferences: []
executionOrder: 0
icon: {instanceID: 0}
userData:
assetBundleName:
assetBundleVariant:
| {
"pile_set_name": "Github"
} |
package github.com/sturluson/testpkg
require {
github.com/athas/fut-baz 0.2.0 #44da85224224d37803976c1d30cedb1d2cd20b74
}
| {
"pile_set_name": "Github"
} |
import Foundation
class ObjectWithLazyProperty {
init() {}
lazy var value: String = "hello"
lazy var anotherValue: String = { return "world" }()
}
| {
"pile_set_name": "Github"
} |
(require
'[cljs.repl]
'[cljs.repl.node])
(cljs.repl/repl
(cljs.repl.node/repl-env)
:output-dir "out"
:cache-analysis true)
| {
"pile_set_name": "Github"
} |
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package net
import (
"strings"
"k8s.io/apimachinery/pkg/util/sets"
)
var validSchemes = sets.NewString("http", "https", "")
// SplitSchemeNamePort takes a string of the following forms:
// * "<name>", returns "", "<name>","", true
// * "<name>:<port>", returns "", "<name>","<port>",true
// * "<scheme>:<name>:<port>", returns "<scheme>","<name>","<port>",true
//
// Name must be non-empty or valid will be returned false.
// Scheme must be "http" or "https" if specified
// Port is returned as a string, and it is not required to be numeric (could be
// used for a named port, for example).
func SplitSchemeNamePort(id string) (scheme, name, port string, valid bool) {
parts := strings.Split(id, ":")
switch len(parts) {
case 1:
name = parts[0]
case 2:
name = parts[0]
port = parts[1]
case 3:
scheme = parts[0]
name = parts[1]
port = parts[2]
default:
return "", "", "", false
}
if len(name) > 0 && validSchemes.Has(scheme) {
return scheme, name, port, true
} else {
return "", "", "", false
}
}
// JoinSchemeNamePort returns a string that specifies the scheme, name, and port:
// * "<name>"
// * "<name>:<port>"
// * "<scheme>:<name>:<port>"
// None of the parameters may contain a ':' character
// Name is required
// Scheme must be "", "http", or "https"
func JoinSchemeNamePort(scheme, name, port string) string {
if len(scheme) > 0 {
// Must include three segments to specify scheme
return scheme + ":" + name + ":" + port
}
if len(port) > 0 {
// Must include two segments to specify port
return name + ":" + port
}
// Return name alone
return name
}
| {
"pile_set_name": "Github"
} |
// Copyright 2000-2019 JetBrains s.r.o. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file.
package com.intellij.serialization
@Suppress("ArrayInDataClass")
internal data class InterfacePropertyBinding(private val allowedTypes: Array<Class<*>>) : Binding {
override fun serialize(obj: Any, context: WriteContext) = throw IllegalStateException("InterfacePropertyBinding cannot be used as root binding")
override fun deserialize(context: ReadContext, hostObject: Any?) = throw IllegalStateException("InterfacePropertyBinding cannot be used as root binding")
override fun serialize(hostObject: Any, property: MutableAccessor, context: WriteContext) {
write(hostObject, property, context) { value ->
val valueClass = value.javaClass
if (!allowedTypes.contains(valueClass)) {
throw SerializationException("Type $valueClass is not allowed for field ${property.name}")
}
addTypeAnnotation(valueClass.simpleName)
context.bindingProducer.getRootBinding(valueClass).serialize(value, context)
}
}
override fun deserialize(hostObject: Any, property: MutableAccessor, context: ReadContext) {
read(hostObject, property, context) {
val beanClass: Class<*>
val typeAnnotationIterator = iterateTypeAnnotations()
if (typeAnnotationIterator.hasNext()) {
val simpleName = typeAnnotationIterator.next()
beanClass = allowedTypes.firstOrNull { it.simpleName == simpleName } ?: throw SerializationException(
"Unknown class simple name: $simpleName (allowedClasses=$allowedTypes)")
}
else {
throw SerializationException("Class simple name is not specified (allowedClasses=$allowedTypes)")
}
context.bindingProducer.getRootBinding(beanClass).deserialize(context, hostObject)
}
}
} | {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<launchConfiguration type="org.eclipse.jdt.launching.localJavaApplication">
<listAttribute key="org.eclipse.debug.core.MAPPED_RESOURCE_PATHS">
<listEntry value="/OM/test/com/rr/om/session/fixsocket/TstFixExchangeSimulator.java"/>
</listAttribute>
<listAttribute key="org.eclipse.debug.core.MAPPED_RESOURCE_TYPES">
<listEntry value="1"/>
</listAttribute>
<stringAttribute key="org.eclipse.jdt.launching.MAIN_TYPE" value="com.rr.om.session.fixsocket.TstFixExchangeSimulator"/>
<stringAttribute key="org.eclipse.jdt.launching.PROJECT_ATTR" value="OM"/>
</launchConfiguration>
| {
"pile_set_name": "Github"
} |
/***
* Copyright 2002-2010 jamod development team
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
***/
package net.wimpi.modbus.io;
import net.wimpi.modbus.Modbus;
import net.wimpi.modbus.ModbusException;
import net.wimpi.modbus.ModbusIOException;
import net.wimpi.modbus.ModbusSlaveException;
import net.wimpi.modbus.msg.ExceptionResponse;
import net.wimpi.modbus.msg.ModbusRequest;
import net.wimpi.modbus.msg.ModbusResponse;
import net.wimpi.modbus.net.UDPMasterConnection;
import net.wimpi.modbus.net.UDPTerminal;
import net.wimpi.modbus.util.AtomicCounter;
import net.wimpi.modbus.util.Mutex;
/**
* Class implementing the <tt>ModbusTransaction</tt>
* interface for the UDP transport mechanism.
*
* @author Dieter Wimberger
* @version @version@ (@date@)
*/
public class ModbusUDPTransaction implements ModbusTransaction {
// class attributes
private static AtomicCounter c_TransactionID = new AtomicCounter(Modbus.DEFAULT_TRANSACTION_ID);
// instance attributes and associations
private UDPTerminal m_Terminal;
private ModbusTransport m_IO;
private ModbusRequest m_Request;
private ModbusResponse m_Response;
private boolean m_ValidityCheck = Modbus.DEFAULT_VALIDITYCHECK;
private int m_Retries = Modbus.DEFAULT_RETRIES;
private int m_RetryCounter = 0;
private Mutex m_TransactionLock = new Mutex();
private long m_RetryDelayMillis;
/**
* Constructs a new <tt>ModbusUDPTransaction</tt>
* instance.
*/
public ModbusUDPTransaction() {
}// constructor
/**
* Constructs a new <tt>ModbusUDPTransaction</tt>
* instance with a given <tt>ModbusRequest</tt> to
* be send when the transaction is executed.
* <p/>
*
* @param request a <tt>ModbusRequest</tt> instance.
*/
public ModbusUDPTransaction(ModbusRequest request) {
setRequest(request);
}// constructor
/**
* Constructs a new <tt>ModbusUDPTransaction</tt>
* instance with a given <tt>UDPTerminal</tt> to
* be used for transactions.
* <p/>
*
* @param terminal a <tt>UDPTerminal</tt> instance.
*/
public ModbusUDPTransaction(UDPTerminal terminal) {
setTerminal(terminal);
}// constructor
/**
* Constructs a new <tt>ModbusUDPTransaction</tt>
* instance with a given <tt>ModbusUDPConnection</tt>
* to be used for transactions.
* <p/>
*
* @param con a <tt>ModbusUDPConnection</tt> instance.
*/
public ModbusUDPTransaction(UDPMasterConnection con) {
setTerminal(con.getTerminal());
}// constructor
/**
* Sets the terminal on which this <tt>ModbusTransaction</tt>
* should be executed.
* <p>
*
* @param terminal a <tt>UDPSlaveTerminal</tt>.
*/
public void setTerminal(UDPTerminal terminal) {
m_Terminal = terminal;
if (terminal.isActive()) {
m_IO = terminal.getModbusTransport();
}
}// setConnection
@Override
public void setRequest(ModbusRequest req) {
m_Request = req;
// m_Response = req.getResponse();
}// setRequest
@Override
public ModbusRequest getRequest() {
return m_Request;
}// getRequest
@Override
public ModbusResponse getResponse() {
return m_Response;
}// getResponse
@Override
public int getTransactionID() {
return c_TransactionID.get();
}// getTransactionID
@Override
public void setCheckingValidity(boolean b) {
m_ValidityCheck = b;
}// setCheckingValidity
@Override
public boolean isCheckingValidity() {
return m_ValidityCheck;
}// isCheckingValidity
@Override
public int getRetries() {
return m_Retries;
}// getRetries
@Override
public void setRetries(int num) {
m_Retries = num;
}// setRetries
@Override
public void execute() throws ModbusIOException, ModbusSlaveException, ModbusException {
// 1. assert executeability
assertExecutable();
try {
// 2. Lock transaction
/**
* Note: The way this explicit synchronization is implemented at the moment,
* there is no ordering of pending threads. The Mutex will simply call notify()
* and the JVM will handle the rest.
*/
m_TransactionLock.acquire();
// 3. open the connection if not connected
if (!m_Terminal.isActive()) {
try {
m_Terminal.activate();
m_IO = m_Terminal.getModbusTransport();
} catch (Exception ex) {
throw new ModbusIOException("Activation failed.");
}
}
// 3. Retry transaction m_Retries times, in case of
// I/O Exception problems.
m_RetryCounter = 0;
while (m_RetryCounter <= m_Retries) {
if (m_RetryCounter != 0) {
Thread.sleep(m_RetryDelayMillis);
}
try {
// toggle the id
m_Request.setTransactionID(c_TransactionID.increment());
// 3. write request, and read response,
// while holding the lock on the IO object
synchronized (m_IO) {
// write request message
m_IO.writeMessage(m_Request);
// read response message
m_Response = m_IO.readResponse();
break;
}
} catch (ModbusIOException ex) {
m_RetryCounter++;
continue;
}
}
// 4. deal with "application level" exceptions
if (m_Response instanceof ExceptionResponse) {
throw new ModbusSlaveException(((ExceptionResponse) m_Response).getExceptionCode());
}
if (isCheckingValidity()) {
checkValidity();
}
} catch (InterruptedException ex) {
throw new ModbusIOException("Thread acquiring lock was interrupted.");
} finally {
m_TransactionLock.release();
}
}// execute
/**
* Asserts if this <tt>ModbusTCPTransaction</tt> is
* executable.
*
* @throws ModbusException if this transaction cannot be
* asserted as executable.
*/
private void assertExecutable() throws ModbusException {
if (m_Request == null || m_Terminal == null) {
throw new ModbusException("Assertion failed, transaction not executable");
}
}// assertExecuteable
/**
* Checks the validity of the transaction, by
* checking if the values of the response correspond
* to the values of the request.
* Use an override to provide some checks, this method will only return.
*
* @throws ModbusException if this transaction has not been valid.
*/
protected void checkValidity() throws ModbusException {
}// checkValidity
@Override
public long getRetryDelayMillis() {
return m_RetryDelayMillis;
}
@Override
public void setRetryDelayMillis(long retryDelayMillis) {
this.m_RetryDelayMillis = retryDelayMillis;
}
}// class ModbusUDPTransaction
| {
"pile_set_name": "Github"
} |
// mem_alert_batch
// metric: used_percent
// available_fields: "active","available","available_percent","buffered","cached","free","inactive","total","used"
// TELEGRAF CONFIGURATION
// [[inputs.mem]]
// DEFINE: kapacitor define mem_alert_batch -type batch -tick mem/mem_alert_batch.tick -dbrp telegraf.autogen
// ENABLE: kapacitor enable mem_alert_batch
// Parameters
var info = 70
var warn = 85
var crit = 92
var infoSig = 2.5
var warnSig = 3
var critSig = 3.5
var period = 10s
var every = 10s
// Dataframe
var data = batch
|query('''SELECT mean(used_percent) AS stat FROM "telegraf"."autogen"."mem" ''')
.period(period)
.every(every)
.groupBy('host')
// Thresholds
var alert = data
|eval(lambda: sigma("stat"))
.as('sigma')
.keep()
|alert()
.id('{{ index .Tags "host"}}/mem_used')
.message('{{ .ID }}:{{ index .Fields "stat" }}')
.info(lambda: "stat" > info OR "sigma" > infoSig)
.warn(lambda: "stat" > warn OR "sigma" > warnSig)
.crit(lambda: "stat" > crit OR "sigma" > critSig)
// Alert
alert
.log('/tmp/mem_alert_log.txt') | {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="utf-8"?>
<android.support.v7.widget.RecyclerView
xmlns:android="http://schemas.android.com/apk/res/android"
android:id="@+id/history_comic_list"
android:layout_width="match_parent"
android:layout_height="match_parent"
android:paddingTop="8dp"
android:paddingLeft="4dp"
android:paddingRight="4dp"/> | {
"pile_set_name": "Github"
} |
<?php
class CoverageMethodTest extends PHPUnit_Framework_TestCase
{
/**
* @covers CoveredClass::publicMethod
*/
public function testSomething()
{
$o = new CoveredClass;
$o->publicMethod();
}
}
| {
"pile_set_name": "Github"
} |
# Norwegian translations for dnsmasq package.
# This file is put in the public domain.
# Simon Kelley <[email protected]>, 2006.
#
# Current translator: Jan Erik Askildt <[email protected]>, 2006
#
msgid ""
msgstr ""
"Project-Id-Version: dnsmasq 2.25\n"
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2009-06-18 12:24+0100\n"
"PO-Revision-Date: 2006-01-11 17:39+0000\n"
"Last-Translator: Jan Erik Askildt <[email protected]>\n"
"Language-Team: Norwegian <[email protected]>\n"
"Language: no\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=ISO-8859-1\n"
"Content-Transfer-Encoding: 8bit\n"
"Plural-Forms: nplurals=2; plural=(n != 1);\n"
#: cache.c:513
msgid "Internal error in cache."
msgstr ""
#: cache.c:922
#, fuzzy, c-format
msgid "failed to load names from %s: %s"
msgstr "feilet å laste navn fra %s: %s"
#: cache.c:948 dhcp.c:835
#, c-format
msgid "bad address at %s line %d"
msgstr "dårlig adresse ved %s linje %d"
#: cache.c:1001 dhcp.c:851
#, c-format
msgid "bad name at %s line %d"
msgstr "dårlig navn ved %s linje %d"
#: cache.c:1010 dhcp.c:926
#, c-format
msgid "read %s - %d addresses"
msgstr "les %s - %d adresser"
#: cache.c:1122
msgid "cleared cache"
msgstr "mellomlager tømt"
#: cache.c:1151
#, c-format
msgid "No IPv4 address found for %s"
msgstr ""
#: cache.c:1229
#, c-format
msgid "%s is a CNAME, not giving it to the DHCP lease of %s"
msgstr ""
#: cache.c:1253
#, c-format
msgid "not giving name %s to the DHCP lease of %s because the name exists in %s with address %s"
msgstr "gir ikke navnet %s til DHCP leien for %s fordi navnet eksisterer i %s med adressen %s"
#: cache.c:1408
#, c-format
msgid "time %lu"
msgstr ""
#: cache.c:1409
#, fuzzy, c-format
msgid "cache size %d, %d/%d cache insertions re-used unexpired cache entries."
msgstr "mellomlager størrelse %d, %d/%d mellomlager innsettinger re-bruker mellomlager plasser som ikke er utløpt"
#: cache.c:1411
#, c-format
msgid "queries forwarded %u, queries answered locally %u"
msgstr ""
#: cache.c:1414
#, c-format
msgid "queries for authoritative zones %u"
msgstr ""
#: cache.c:1440
#, c-format
msgid "server %s#%d: queries sent %u, retried or failed %u"
msgstr ""
#: util.c:45
#, fuzzy, c-format
msgid "failed to seed the random number generator: %s"
msgstr "feilet å lytte på socket: %s"
#: util.c:205
#, fuzzy
msgid "failed to allocate memory"
msgstr "feilet å laste %d bytes"
#: util.c:250 option.c:616
msgid "could not get memory"
msgstr "kunne ikke få minne"
#: util.c:260
#, fuzzy, c-format
msgid "cannot create pipe: %s"
msgstr "kan ikke lese %s: %s"
#: util.c:268
#, fuzzy, c-format
msgid "failed to allocate %d bytes"
msgstr "feilet å laste %d bytes"
#: util.c:437
#, c-format
msgid "infinite"
msgstr "uendelig"
#: option.c:342
msgid "Specify local address(es) to listen on."
msgstr "Spesifiser lokal(e) adresse(r) å lytte på."
#: option.c:343
msgid "Return ipaddr for all hosts in specified domains."
msgstr "Returner ipaddr for alle verter i det spesifiserte domenet."
#: option.c:344
msgid "Fake reverse lookups for RFC1918 private address ranges."
msgstr "Forfalsk revers oppslag for RFC1918 private adresse områder."
#: option.c:345
msgid "Treat ipaddr as NXDOMAIN (defeats Verisign wildcard)."
msgstr "Behandle ipaddr som NXDOMAIN (omgår Verisign wildcard)."
#: option.c:346
#, c-format
msgid "Specify the size of the cache in entries (defaults to %s)."
msgstr "Spesifiser størrelsen på mellomlager plassene (standard er %s)."
#: option.c:347
#, c-format
msgid "Specify configuration file (defaults to %s)."
msgstr "Spesifiser konfigurasjonsfil (standard er %s)."
#: option.c:348
msgid "Do NOT fork into the background: run in debug mode."
msgstr "IKKE legg (fork) som bakgrunnsprosess: kjør i debug modus."
#: option.c:349
msgid "Do NOT forward queries with no domain part."
msgstr "IKKE videresend oppslag som mangler domene del."
#: option.c:350
msgid "Return self-pointing MX records for local hosts."
msgstr "Returner selv-pekende MX post for lokale verter."
#: option.c:351
msgid "Expand simple names in /etc/hosts with domain-suffix."
msgstr "Utvid enkle navn i /etc/hosts med domene-suffiks."
#: option.c:352
msgid "Don't forward spurious DNS requests from Windows hosts."
msgstr "Ikke videresend falske/uekte DNS forespørsler fra Windows verter."
#: option.c:353
msgid "Enable DHCP in the range given with lease duration."
msgstr "Aktiver DHCP i det gitte området med leie varighet"
#: option.c:354
#, c-format
msgid "Change to this group after startup (defaults to %s)."
msgstr "Skift til denne gruppen etter oppstart (standard er %s)."
#: option.c:355
msgid "Set address or hostname for a specified machine."
msgstr "Sett adresse eller vertsnavn for en spesifikk maskin."
#: option.c:356
#, fuzzy
msgid "Read DHCP host specs from file."
msgstr "dårlig MX navn"
#: option.c:357
msgid "Read DHCP option specs from file."
msgstr ""
#: option.c:358
#, fuzzy
msgid "Read DHCP host specs from a directory."
msgstr "dårlig MX navn"
#: option.c:359
#, fuzzy
msgid "Read DHCP options from a directory."
msgstr "dårlig MX navn"
#: option.c:360
msgid "Evaluate conditional tag expression."
msgstr ""
#: option.c:361
#, c-format
msgid "Do NOT load %s file."
msgstr "IKKE last %s filen."
#: option.c:362
#, c-format
msgid "Specify a hosts file to be read in addition to %s."
msgstr "Spesifiser en verts (hosts) fil som skal leses i tilleg til %s."
#: option.c:363
#, fuzzy
msgid "Read hosts files from a directory."
msgstr "dårlig MX navn"
#: option.c:364
msgid "Specify interface(s) to listen on."
msgstr "Spesifiser nettverkskort det skal lyttes på."
#: option.c:365
msgid "Specify interface(s) NOT to listen on."
msgstr "Spesifiser nettverkskort det IKKE skal lyttes på."
#: option.c:366
#, fuzzy
msgid "Map DHCP user class to tag."
msgstr "Map DHCP bruker klasse til opsjon sett."
#: option.c:367
msgid "Map RFC3046 circuit-id to tag."
msgstr ""
#: option.c:368
msgid "Map RFC3046 remote-id to tag."
msgstr ""
#: option.c:369
msgid "Map RFC3993 subscriber-id to tag."
msgstr ""
#: option.c:370
#, fuzzy
msgid "Don't do DHCP for hosts with tag set."
msgstr "Ikke utfør DHCP for klienter i opsjon sett."
#: option.c:371
#, fuzzy
msgid "Force broadcast replies for hosts with tag set."
msgstr "Ikke utfør DHCP for klienter i opsjon sett."
#: option.c:372
msgid "Do NOT fork into the background, do NOT run in debug mode."
msgstr "IKKE last (fork) som bakgrunnsprosess, IKKE kjør i debug modus."
#: option.c:373
msgid "Assume we are the only DHCP server on the local network."
msgstr "Anta at vi er den eneste DHCP tjeneren på det lokale nettverket."
#: option.c:374
#, c-format
msgid "Specify where to store DHCP leases (defaults to %s)."
msgstr "Spesifiser hvor DHCP leiene skal lagres (standard er %s)."
#: option.c:375
msgid "Return MX records for local hosts."
msgstr "Returner MX records for lokale verter."
#: option.c:376
msgid "Specify an MX record."
msgstr "Spesifiser en MX post."
#: option.c:377
msgid "Specify BOOTP options to DHCP server."
msgstr "Spesifiser BOOTP opsjoner til DHCP tjener."
#: option.c:378
#, c-format
msgid "Do NOT poll %s file, reload only on SIGHUP."
msgstr "IKKE spør (poll) %s fil, les på nytt kun ved SIGHUP"
#: option.c:379
msgid "Do NOT cache failed search results."
msgstr "IKKE mellomlagre søkeresultater som feiler."
#: option.c:380
#, c-format
msgid "Use nameservers strictly in the order given in %s."
msgstr "Bruk navnetjenere kun som bestemt i rekkefølgen gitt i %s."
#: option.c:381
#, fuzzy
msgid "Specify options to be sent to DHCP clients."
msgstr "Sett ekstra opsjoner som skal fordeles til DHCP klientene."
#: option.c:382
msgid "DHCP option sent even if the client does not request it."
msgstr ""
#: option.c:383
msgid "Specify port to listen for DNS requests on (defaults to 53)."
msgstr "Spesifiser lytteport for DNS oppslag (standard er 53)."
#: option.c:384
#, c-format
msgid "Maximum supported UDP packet size for EDNS.0 (defaults to %s)."
msgstr "Maksimal støttet UDP pakkestørrelse for EDNS.0 (standard er %s)."
#: option.c:385
#, fuzzy
msgid "Log DNS queries."
msgstr "Logg oppslag."
#: option.c:386
#, fuzzy
msgid "Force the originating port for upstream DNS queries."
msgstr "Tving bruk av opprinnelig port for oppstrøms oppslag."
#: option.c:387
msgid "Do NOT read resolv.conf."
msgstr "IKKE les resolv.conf."
#: option.c:388
#, c-format
msgid "Specify path to resolv.conf (defaults to %s)."
msgstr "Spesifiser stien til resolv.conf (standard er %s)."
#: option.c:389
#, fuzzy
msgid "Specify path to file with server= options"
msgstr "Spesifiser stien til PID fil. (standard er %s)."
#: option.c:390
msgid "Specify address(es) of upstream servers with optional domains."
msgstr "Spesifiser adressen(e) til oppstrøms tjenere med valgfrie domener."
#: option.c:391
#, fuzzy
msgid "Specify address of upstream servers for reverse address queries"
msgstr "Spesifiser adressen(e) til oppstrøms tjenere med valgfrie domener."
#: option.c:392
msgid "Never forward queries to specified domains."
msgstr "Aldri videresend oppslag til spesifiserte domener."
#: option.c:393
msgid "Specify the domain to be assigned in DHCP leases."
msgstr "Spesifiser domenet som skal tildeles i DHCP leien."
#: option.c:394
msgid "Specify default target in an MX record."
msgstr "Spesifiser default mål i en MX post."
#: option.c:395
msgid "Specify time-to-live in seconds for replies from /etc/hosts."
msgstr "Spesifiser time-to-live i sekunder for svar fra /etc/hosts."
#: option.c:396
#, fuzzy
msgid "Specify time-to-live in seconds for negative caching."
msgstr "Spesifiser time-to-live i sekunder for svar fra /etc/hosts."
#: option.c:397
#, fuzzy
msgid "Specify time-to-live in seconds for maximum TTL to send to clients."
msgstr "Spesifiser time-to-live i sekunder for svar fra /etc/hosts."
#: option.c:398
#, fuzzy
msgid "Specify time-to-live ceiling for cache."
msgstr "Spesifiser time-to-live i sekunder for svar fra /etc/hosts."
#: option.c:399
#, fuzzy
msgid "Specify time-to-live floor for cache."
msgstr "Spesifiser time-to-live i sekunder for svar fra /etc/hosts."
#: option.c:400
#, c-format
msgid "Change to this user after startup. (defaults to %s)."
msgstr "Skift til denne bruker etter oppstart (standard er %s)."
#: option.c:401
#, fuzzy
msgid "Map DHCP vendor class to tag."
msgstr "Map DHCP produsent klasse til opsjon sett."
#: option.c:402
msgid "Display dnsmasq version and copyright information."
msgstr "Vis dnsmasq versjon og copyright informasjon."
#: option.c:403
msgid "Translate IPv4 addresses from upstream servers."
msgstr "Oversett IPv4 adresser fra oppstrøms tjenere."
#: option.c:404
msgid "Specify a SRV record."
msgstr "Spesifiser en SRV post."
#: option.c:405
msgid "Display this message. Use --help dhcp or --help dhcp6 for known DHCP options."
msgstr ""
#: option.c:406
#, fuzzy, c-format
msgid "Specify path of PID file (defaults to %s)."
msgstr "Spesifiser stien til PID fil. (standard er %s)."
#: option.c:407
#, c-format
msgid "Specify maximum number of DHCP leases (defaults to %s)."
msgstr "Spesifiser maksimum antall DHCP leier (standard er %s)"
#: option.c:408
msgid "Answer DNS queries based on the interface a query was sent to."
msgstr "Svar DNS oppslag basert på nettverkskortet oppslaget ble sendt til."
#: option.c:409
msgid "Specify TXT DNS record."
msgstr "Spesifiser TXT DNS post."
#: option.c:410
#, fuzzy
msgid "Specify PTR DNS record."
msgstr "Spesifiser TXT DNS post."
#: option.c:411
msgid "Give DNS name to IPv4 address of interface."
msgstr ""
#: option.c:412
msgid "Bind only to interfaces in use."
msgstr "Bind kun til nettverkskort som er i bruk."
#: option.c:413
#, c-format
msgid "Read DHCP static host information from %s."
msgstr "Les DHCP statisk vert informasjon fra %s."
#: option.c:414
msgid "Enable the DBus interface for setting upstream servers, etc."
msgstr "Aktiver DBus interface for å sette oppstrøms tjenere, osv."
#: option.c:415
msgid "Do not provide DHCP on this interface, only provide DNS."
msgstr "Ikke lever DHCP på dette nettverkskortet, kun lever DNS."
#: option.c:416
msgid "Enable dynamic address allocation for bootp."
msgstr "Aktiver dynamisk adresse allokering for bootp."
#: option.c:417
#, fuzzy
msgid "Map MAC address (with wildcards) to option set."
msgstr "Map DHCP produsent klasse til opsjon sett."
#: option.c:418
msgid "Treat DHCP requests on aliases as arriving from interface."
msgstr ""
#: option.c:419
msgid "Disable ICMP echo address checking in the DHCP server."
msgstr ""
#: option.c:420
msgid "Shell script to run on DHCP lease creation and destruction."
msgstr ""
#: option.c:421
msgid "Lua script to run on DHCP lease creation and destruction."
msgstr ""
#: option.c:422
msgid "Run lease-change scripts as this user."
msgstr ""
#: option.c:423
msgid "Call dhcp-script with changes to local ARP table."
msgstr ""
#: option.c:424
msgid "Read configuration from all the files in this directory."
msgstr ""
#: option.c:425
#, fuzzy
msgid "Log to this syslog facility or file. (defaults to DAEMON)"
msgstr "Skift til denne bruker etter oppstart (standard er %s)."
#: option.c:426
msgid "Do not use leasefile."
msgstr ""
#: option.c:427
#, fuzzy, c-format
msgid "Maximum number of concurrent DNS queries. (defaults to %s)"
msgstr "Spesifiser maksimum antall DHCP leier (standard er %s)"
#: option.c:428
#, c-format
msgid "Clear DNS cache when reloading %s."
msgstr ""
#: option.c:429
msgid "Ignore hostnames provided by DHCP clients."
msgstr ""
#: option.c:430
msgid "Do NOT reuse filename and server fields for extra DHCP options."
msgstr ""
#: option.c:431
msgid "Enable integrated read-only TFTP server."
msgstr ""
#: option.c:432
msgid "Export files by TFTP only from the specified subtree."
msgstr ""
#: option.c:433
msgid "Add client IP address to tftp-root."
msgstr ""
#: option.c:434
msgid "Allow access only to files owned by the user running dnsmasq."
msgstr ""
#: option.c:435
msgid "Do not terminate the service if TFTP directories are inaccessible."
msgstr ""
#: option.c:436
#, fuzzy, c-format
msgid "Maximum number of conncurrent TFTP transfers (defaults to %s)."
msgstr "Spesifiser maksimum antall DHCP leier (standard er %s)"
#: option.c:437
#, fuzzy
msgid "Maximum MTU to use for TFTP transfers."
msgstr "Spesifiser maksimum antall DHCP leier (standard er %s)"
#: option.c:438
msgid "Disable the TFTP blocksize extension."
msgstr ""
#: option.c:439
msgid "Convert TFTP filenames to lowercase"
msgstr ""
#: option.c:440
msgid "Ephemeral port range for use by TFTP transfers."
msgstr ""
#: option.c:441
msgid "Extra logging for DHCP."
msgstr ""
#: option.c:442
msgid "Enable async. logging; optionally set queue length."
msgstr ""
#: option.c:443
msgid "Stop DNS rebinding. Filter private IP ranges when resolving."
msgstr ""
#: option.c:444
msgid "Allow rebinding of 127.0.0.0/8, for RBL servers."
msgstr ""
#: option.c:445
msgid "Inhibit DNS-rebind protection on this domain."
msgstr ""
#: option.c:446
msgid "Always perform DNS queries to all servers."
msgstr ""
#: option.c:447
msgid "Set tag if client includes matching option in request."
msgstr ""
#: option.c:448
msgid "Use alternative ports for DHCP."
msgstr ""
#: option.c:449
#, fuzzy
msgid "Specify NAPTR DNS record."
msgstr "Spesifiser TXT DNS post."
#: option.c:450
msgid "Specify lowest port available for DNS query transmission."
msgstr ""
#: option.c:451
msgid "Specify highest port available for DNS query transmission."
msgstr ""
#: option.c:452
msgid "Use only fully qualified domain names for DHCP clients."
msgstr ""
#: option.c:453
msgid "Generate hostnames based on MAC address for nameless clients."
msgstr ""
#: option.c:454
msgid "Use these DHCP relays as full proxies."
msgstr ""
#: option.c:455
msgid "Relay DHCP requests to a remote server"
msgstr ""
#: option.c:456
msgid "Specify alias name for LOCAL DNS name."
msgstr ""
#: option.c:457
#, fuzzy
msgid "Prompt to send to PXE clients."
msgstr "Sett ekstra opsjoner som skal fordeles til DHCP klientene."
#: option.c:458
msgid "Boot service for PXE menu."
msgstr ""
#: option.c:459
msgid "Check configuration syntax."
msgstr ""
#: option.c:460
msgid "Add requestor's MAC address to forwarded DNS queries."
msgstr ""
#: option.c:461
msgid "Add specified IP subnet to forwarded DNS queries."
msgstr ""
#: option.c:462
#, fuzzy
msgid "Add client identification to forwarded DNS queries."
msgstr "Tving bruk av opprinnelig port for oppstrøms oppslag."
#: option.c:463
#, fuzzy
msgid "Proxy DNSSEC validation results from upstream nameservers."
msgstr "Oversett IPv4 adresser fra oppstrøms tjenere."
#: option.c:464
msgid "Attempt to allocate sequential IP addresses to DHCP clients."
msgstr ""
#: option.c:465
msgid "Copy connection-track mark from queries to upstream connections."
msgstr ""
#: option.c:466
msgid "Allow DHCP clients to do their own DDNS updates."
msgstr ""
#: option.c:467
msgid "Send router-advertisements for interfaces doing DHCPv6"
msgstr ""
#: option.c:468
msgid "Specify DUID_EN-type DHCPv6 server DUID"
msgstr ""
#: option.c:469
#, fuzzy
msgid "Specify host (A/AAAA and PTR) records"
msgstr "Spesifiser en MX post."
#: option.c:470
#, fuzzy
msgid "Specify arbitrary DNS resource record"
msgstr "Spesifiser TXT DNS post."
#: option.c:471
#, fuzzy
msgid "Bind to interfaces in use - check for new interfaces"
msgstr "ukjent tilknytning (interface) %s"
#: option.c:472
msgid "Export local names to global DNS"
msgstr ""
#: option.c:473
msgid "Domain to export to global DNS"
msgstr ""
#: option.c:474
msgid "Set TTL for authoritative replies"
msgstr ""
#: option.c:475
msgid "Set authoritive zone information"
msgstr ""
#: option.c:476
msgid "Secondary authoritative nameservers for forward domains"
msgstr ""
#: option.c:477
msgid "Peers which are allowed to do zone transfer"
msgstr ""
#: option.c:478
msgid "Specify ipsets to which matching domains should be added"
msgstr ""
#: option.c:479
msgid "Specify a domain and address range for synthesised names"
msgstr ""
#: option.c:480
msgid "Activate DNSSEC validation"
msgstr ""
#: option.c:481
msgid "Specify trust anchor key digest."
msgstr ""
#: option.c:482
msgid "Disable upstream checking for DNSSEC debugging."
msgstr ""
#: option.c:483
msgid "Ensure answers without DNSSEC are in unsigned zones."
msgstr ""
#: option.c:484
msgid "Don't check DNSSEC signature timestamps until first cache-reload"
msgstr ""
#: option.c:485
msgid "Timestamp file to verify system clock for DNSSEC"
msgstr ""
#: option.c:487
msgid "Specify DHCPv6 prefix class"
msgstr ""
#: option.c:489
msgid "Set priority, resend-interval and router-lifetime"
msgstr ""
#: option.c:490
msgid "Do not log routine DHCP."
msgstr ""
#: option.c:491
msgid "Do not log routine DHCPv6."
msgstr ""
#: option.c:492
msgid "Do not log RA."
msgstr ""
#: option.c:493
msgid "Accept queries only from directly-connected networks."
msgstr ""
#: option.c:494
msgid "Detect and remove DNS forwarding loops."
msgstr ""
#: option.c:495
msgid "Ignore DNS responses containing ipaddr."
msgstr ""
#: option.c:496
msgid "Set TTL in DNS responses with DHCP-derived addresses."
msgstr ""
#: option.c:698
#, c-format
msgid ""
"Usage: dnsmasq [options]\n"
"\n"
msgstr ""
"Bruk: dnsmasq [opsjoner]\n"
"\n"
#: option.c:700
#, c-format
msgid "Use short options only on the command line.\n"
msgstr "Bruk korte opsjoner kun på kommandolinjen.\n"
#: option.c:702
#, fuzzy, c-format
msgid "Valid options are:\n"
msgstr "Gyldige opsjoner er :\n"
#: option.c:749 option.c:843
#, fuzzy
msgid "bad address"
msgstr "les %s - %d adresser"
#: option.c:773 option.c:777
msgid "bad port"
msgstr "dårlig port"
#: option.c:804 option.c:836
msgid "interface binding not supported"
msgstr ""
#: option.c:813 option.c:3683
#, fuzzy
msgid "bad interface name"
msgstr "dårlig MX navn"
#: option.c:1025
msgid "unsupported encapsulation for IPv6 option"
msgstr ""
#: option.c:1039
msgid "bad dhcp-option"
msgstr "dårlig dhcp-opsjon"
#: option.c:1107
#, fuzzy
msgid "bad IP address"
msgstr "les %s - %d adresser"
#: option.c:1110 option.c:1249 option.c:3000
#, fuzzy
msgid "bad IPv6 address"
msgstr "les %s - %d adresser"
#: option.c:1203
#, fuzzy
msgid "bad IPv4 address"
msgstr "les %s - %d adresser"
#: option.c:1276 option.c:1370
msgid "bad domain in dhcp-option"
msgstr "dårlig domene i dhcp-opsjon"
#: option.c:1408
msgid "dhcp-option too long"
msgstr "dhcp-opsjon for lang"
#: option.c:1415
msgid "illegal dhcp-match"
msgstr ""
#: option.c:1477
msgid "illegal repeated flag"
msgstr ""
#: option.c:1485
msgid "illegal repeated keyword"
msgstr ""
#: option.c:1556 option.c:4306
#, fuzzy, c-format
msgid "cannot access directory %s: %s"
msgstr "kan ikke lese %s: %s"
#: option.c:1602 tftp.c:504
#, fuzzy, c-format
msgid "cannot access %s: %s"
msgstr "kan ikke lese %s: %s"
#: option.c:1690
msgid "setting log facility is not possible under Android"
msgstr ""
#: option.c:1699
msgid "bad log facility"
msgstr ""
#: option.c:1752
msgid "bad MX preference"
msgstr "dårlig MX preferanse"
#: option.c:1757
msgid "bad MX name"
msgstr "dårlig MX navn"
#: option.c:1771
msgid "bad MX target"
msgstr "dårlig MX mål"
#: option.c:1783
msgid "cannot run scripts under uClinux"
msgstr ""
#: option.c:1785
msgid "recompile with HAVE_SCRIPT defined to enable lease-change scripts"
msgstr ""
#: option.c:1789
msgid "recompile with HAVE_LUASCRIPT defined to enable Lua scripts"
msgstr ""
#: option.c:2041 option.c:2086 option.c:2142
#, fuzzy
msgid "bad prefix"
msgstr "dårlig port"
#: option.c:2443
msgid "recompile with HAVE_IPSET defined to enable ipset directives"
msgstr ""
#: option.c:2652
#, fuzzy
msgid "bad port range"
msgstr "dårlig port"
#: option.c:2668
msgid "bad bridge-interface"
msgstr ""
#: option.c:2728
msgid "only one tag allowed"
msgstr ""
#: option.c:2748 option.c:2760 option.c:2869 option.c:2874 option.c:2913
msgid "bad dhcp-range"
msgstr "dårlig dhcp-område"
#: option.c:2775
msgid "inconsistent DHCP range"
msgstr "ikke konsistent DHCP område"
#: option.c:2837
msgid "prefix length must be exactly 64 for RA subnets"
msgstr ""
#: option.c:2839
msgid "prefix length must be exactly 64 for subnet constructors"
msgstr ""
#: option.c:2843
msgid "prefix length must be at least 64"
msgstr ""
#: option.c:2846
#, fuzzy
msgid "inconsistent DHCPv6 range"
msgstr "ikke konsistent DHCP område"
#: option.c:2857
msgid "prefix must be zero with \"constructor:\" argument"
msgstr ""
#: option.c:2970 option.c:3018
#, fuzzy
msgid "bad hex constant"
msgstr "dårlig dhcp-vert"
#: option.c:2992
msgid "cannot match tags in --dhcp-host"
msgstr ""
#: option.c:3040
#, fuzzy, c-format
msgid "duplicate dhcp-host IP address %s"
msgstr "dubliserte IP adresser i %s dhcp-config direktiv."
#: option.c:3098
#, fuzzy
msgid "bad DHCP host name"
msgstr "dårlig MX navn"
#: option.c:3180
#, fuzzy
msgid "bad tag-if"
msgstr "dårlig MX mål"
#: option.c:3505 option.c:3903
msgid "invalid port number"
msgstr "ugyldig portnummer"
#: option.c:3567
#, fuzzy
msgid "bad dhcp-proxy address"
msgstr "les %s - %d adresser"
#: option.c:3593
#, fuzzy
msgid "Bad dhcp-relay"
msgstr "dårlig dhcp-område"
#: option.c:3619
msgid "bad RA-params"
msgstr ""
#: option.c:3628
msgid "bad DUID"
msgstr ""
#: option.c:3670
#, fuzzy
msgid "invalid alias range"
msgstr "ugyldig vekt"
#: option.c:3721
msgid "bad TTL"
msgstr ""
#: option.c:3727
msgid "bad CNAME"
msgstr ""
#: option.c:3732
msgid "duplicate CNAME"
msgstr ""
#: option.c:3753
#, fuzzy
msgid "bad PTR record"
msgstr "dårlig SRV post"
#: option.c:3784
#, fuzzy
msgid "bad NAPTR record"
msgstr "dårlig SRV post"
#: option.c:3818
#, fuzzy
msgid "bad RR record"
msgstr "dårlig SRV post"
#: option.c:3848
msgid "bad TXT record"
msgstr "dårlig TXT post"
#: option.c:3889
msgid "bad SRV record"
msgstr "dårlig SRV post"
#: option.c:3896
msgid "bad SRV target"
msgstr "dårlig SRV mål"
#: option.c:3910
msgid "invalid priority"
msgstr "ugyldig prioritet"
#: option.c:3913
msgid "invalid weight"
msgstr "ugyldig vekt"
#: option.c:3937
#, fuzzy
msgid "Bad host-record"
msgstr "dårlig SRV post"
#: option.c:3961
#, fuzzy
msgid "Bad name in host-record"
msgstr "dårlig navn i %s"
#: option.c:4026
#, fuzzy
msgid "bad trust anchor"
msgstr "dårlig port"
#: option.c:4040
msgid "bad HEX in trust anchor"
msgstr ""
#: option.c:4050
msgid "unsupported option (check that dnsmasq was compiled with DHCP/TFTP/DNSSEC/DBus support)"
msgstr ""
#: option.c:4109
msgid "missing \""
msgstr "mangler \""
#: option.c:4166
msgid "bad option"
msgstr "dårlig opsjon"
#: option.c:4168
msgid "extraneous parameter"
msgstr "overflødig parameter"
#: option.c:4170
msgid "missing parameter"
msgstr "mangler parameter"
#: option.c:4172
#, fuzzy
msgid "illegal option"
msgstr "dårlig opsjon"
#: option.c:4179
msgid "error"
msgstr "feil"
#: option.c:4181
#, fuzzy, c-format
msgid " at line %d of %s"
msgstr "%s på linje %d av %%s"
#: option.c:4196 option.c:4443 option.c:4479
#, fuzzy, c-format
msgid "read %s"
msgstr "leser %s"
#: option.c:4259 option.c:4382 tftp.c:678
#, c-format
msgid "cannot read %s: %s"
msgstr "kan ikke lese %s: %s"
#: option.c:4546
msgid "junk found in command line"
msgstr ""
#: option.c:4581
#, c-format
msgid "Dnsmasq version %s %s\n"
msgstr "Dnsmasq versjon %s %s\n"
#: option.c:4582
#, fuzzy, c-format
msgid ""
"Compile time options: %s\n"
"\n"
msgstr ""
"Kompileringsopsjoner %s\n"
"\n"
#: option.c:4583
#, c-format
msgid "This software comes with ABSOLUTELY NO WARRANTY.\n"
msgstr "Denne programvaren kommer med ABSOLUTT INGEN GARANTI.\n"
#: option.c:4584
#, c-format
msgid "Dnsmasq is free software, and you are welcome to redistribute it\n"
msgstr "DNsmasq er fri programvare, du er velkommen til å redistribuere den\n"
#: option.c:4585
#, fuzzy, c-format
msgid "under the terms of the GNU General Public License, version 2 or 3.\n"
msgstr "under vilkårene gitt i GNU General Public License, versjon 2.\n"
#: option.c:4596
msgid "try --help"
msgstr ""
#: option.c:4598
msgid "try -w"
msgstr ""
#: option.c:4600
#, fuzzy, c-format
msgid "bad command line options: %s"
msgstr "dårlige kommandlinje opsjoner: %s."
#: option.c:4671
#, c-format
msgid "cannot get host-name: %s"
msgstr "klarer ikke å få vertsnavn: %s"
#: option.c:4699
msgid "only one resolv.conf file allowed in no-poll mode."
msgstr "kun en resolv.conf fil tillat i no-poll modus."
#: option.c:4709
msgid "must have exactly one resolv.conf to read domain from."
msgstr "må ha nøyaktig en resolv.conf å lese domene fra."
#: option.c:4712 network.c:1564 dhcp.c:784
#, fuzzy, c-format
msgid "failed to read %s: %s"
msgstr "feilet å lese %s: %s"
#: option.c:4729
#, c-format
msgid "no search directive found in %s"
msgstr "intet søke direktiv funnet i %s"
#: option.c:4750
msgid "there must be a default domain when --dhcp-fqdn is set"
msgstr ""
#: option.c:4759
msgid "syntax check OK"
msgstr ""
#: forward.c:102
#, fuzzy, c-format
msgid "failed to send packet: %s"
msgstr "feilet å lytte på socket: %s"
#: forward.c:595
msgid "discarding DNS reply: subnet option mismatch"
msgstr ""
#: forward.c:649
#, c-format
msgid "nameserver %s refused to do a recursive query"
msgstr "navnetjener %s nektet å gjøre et rekursivt oppslag"
#: forward.c:681
#, c-format
msgid "possible DNS-rebind attack detected: %s"
msgstr ""
#: forward.c:1240 forward.c:1670
msgid "Ignoring query from non-local network"
msgstr ""
#: forward.c:2138
#, fuzzy, c-format
msgid "Maximum number of concurrent DNS queries reached (max: %d)"
msgstr "Spesifiser maksimum antall DHCP leier (standard er %s)"
#: network.c:716
#, fuzzy, c-format
msgid "failed to create listening socket for %s: %s"
msgstr "feilet å lage lytte socket: %s"
#: network.c:1027
#, c-format
msgid "LOUD WARNING: listening on %s may accept requests via interfaces other than %s"
msgstr ""
#: network.c:1034
msgid "LOUD WARNING: use --bind-dynamic rather than --bind-interfaces to avoid DNS amplification attacks via these interface(s)"
msgstr ""
#: network.c:1043
#, fuzzy, c-format
msgid "warning: no addresses found for interface %s"
msgstr "benytter lokale adresser kun for %s %s"
#: network.c:1101
#, fuzzy, c-format
msgid "interface %s failed to join DHCPv6 multicast group: %s"
msgstr "feilet å binde DHCP tjener socket: %s"
#: network.c:1106
msgid "try increasing /proc/sys/net/core/optmem_max"
msgstr ""
#: network.c:1302
#, fuzzy, c-format
msgid "failed to bind server socket for %s: %s"
msgstr "feilet å binde lytte socket for %s: %s"
#: network.c:1492
#, c-format
msgid "ignoring nameserver %s - local interface"
msgstr "ignorerer navnetjener %s - lokal tilknytning"
#: network.c:1503
#, fuzzy, c-format
msgid "ignoring nameserver %s - cannot make/bind socket: %s"
msgstr "ignorerer navnetjener %s - kan ikke lage/dinde socket: %s"
#: network.c:1520
msgid "(no DNSSEC)"
msgstr ""
#: network.c:1523
msgid "unqualified"
msgstr "ikke kvalifisert"
#: network.c:1523
msgid "names"
msgstr ""
#: network.c:1525
msgid "default"
msgstr ""
#: network.c:1527
msgid "domain"
msgstr "domene"
#: network.c:1530
#, c-format
msgid "using local addresses only for %s %s"
msgstr "benytter lokale adresser kun for %s %s"
#: network.c:1532
#, fuzzy, c-format
msgid "using standard nameservers for %s %s"
msgstr "benytter navnetjener %s#%d for %s %s"
#: network.c:1534
#, fuzzy, c-format
msgid "using nameserver %s#%d for %s %s %s"
msgstr "benytter navnetjener %s#%d for %s %s"
#: network.c:1538
#, fuzzy, c-format
msgid "NOT using nameserver %s#%d - query loop detected"
msgstr "benytter navnetjener %s#%d for %s %s"
#: network.c:1541
#, fuzzy, c-format
msgid "using nameserver %s#%d(via %s)"
msgstr "benytter navnetjener %s#%d"
#: network.c:1543
#, c-format
msgid "using nameserver %s#%d"
msgstr "benytter navnetjener %s#%d"
#: network.c:1548
#, fuzzy, c-format
msgid "using %d more nameservers"
msgstr "benytter navnetjener %s#%d"
#: dnsmasq.c:166
msgid "dhcp-hostsdir, dhcp-optsdir and hostsdir are not supported on this platform"
msgstr ""
#: dnsmasq.c:181
msgid "no root trust anchor provided for DNSSEC"
msgstr ""
#: dnsmasq.c:184
msgid "cannot reduce cache size from default when DNSSEC enabled"
msgstr ""
#: dnsmasq.c:186
#, fuzzy
msgid "DNSSEC not available: set HAVE_DNSSEC in src/config.h"
msgstr "DBus ikke tilgjengelig: sett HAVE_DBUS i src/config.h"
#: dnsmasq.c:192
#, fuzzy
msgid "TFTP server not available: set HAVE_TFTP in src/config.h"
msgstr "DBus ikke tilgjengelig: sett HAVE_DBUS i src/config.h"
#: dnsmasq.c:197
msgid "cannot use --conntrack AND --query-port"
msgstr ""
#: dnsmasq.c:200
#, fuzzy
msgid "conntrack support not available: set HAVE_CONNTRACK in src/config.h"
msgstr "DBus ikke tilgjengelig: sett HAVE_DBUS i src/config.h"
#: dnsmasq.c:205
msgid "asychronous logging is not available under Solaris"
msgstr ""
#: dnsmasq.c:210
msgid "asychronous logging is not available under Android"
msgstr ""
#: dnsmasq.c:215
#, fuzzy
msgid "authoritative DNS not available: set HAVE_AUTH in src/config.h"
msgstr "DBus ikke tilgjengelig: sett HAVE_DBUS i src/config.h"
#: dnsmasq.c:220
#, fuzzy
msgid "loop detection not available: set HAVE_LOOP in src/config.h"
msgstr "DBus ikke tilgjengelig: sett HAVE_DBUS i src/config.h"
#: dnsmasq.c:227
msgid "max_port cannot be smaller than min_port"
msgstr ""
#: dnsmasq.c:234
msgid "zone serial must be configured in --auth-soa"
msgstr ""
#: dnsmasq.c:252
msgid "dhcp-range constructor not available on this platform"
msgstr ""
#: dnsmasq.c:298
msgid "cannot set --bind-interfaces and --bind-dynamic"
msgstr ""
#: dnsmasq.c:301
#, c-format
msgid "failed to find list of interfaces: %s"
msgstr "feilet å finne liste av tilknytninger (interfaces): %s"
#: dnsmasq.c:310
#, c-format
msgid "unknown interface %s"
msgstr "ukjent tilknytning (interface) %s"
#: dnsmasq.c:374 dnsmasq.c:1031
#, c-format
msgid "DBus error: %s"
msgstr "DBus feil: %s"
#: dnsmasq.c:377
msgid "DBus not available: set HAVE_DBUS in src/config.h"
msgstr "DBus ikke tilgjengelig: sett HAVE_DBUS i src/config.h"
#: dnsmasq.c:405
#, c-format
msgid "unknown user or group: %s"
msgstr ""
#: dnsmasq.c:460
#, c-format
msgid "cannot chdir to filesystem root: %s"
msgstr ""
#: dnsmasq.c:716
#, fuzzy, c-format
msgid "started, version %s DNS disabled"
msgstr "startet, versjon %s mellomlager deaktivert"
#: dnsmasq.c:718
#, c-format
msgid "started, version %s cachesize %d"
msgstr "startet, versjon %s mellomlager størrelse %d"
#: dnsmasq.c:720
#, c-format
msgid "started, version %s cache disabled"
msgstr "startet, versjon %s mellomlager deaktivert"
#: dnsmasq.c:722
#, c-format
msgid "compile time options: %s"
msgstr "kompilerings opsjoner: %s"
#: dnsmasq.c:728
msgid "DBus support enabled: connected to system bus"
msgstr "DBus støtte aktivert: koblet til system buss"
#: dnsmasq.c:730
msgid "DBus support enabled: bus connection pending"
msgstr "DBus støtte aktivert: avventer buss tilkobling"
#: dnsmasq.c:735
msgid "DNS service limited to local subnets"
msgstr ""
#: dnsmasq.c:751
msgid "DNSSEC validation enabled"
msgstr ""
#: dnsmasq.c:754
msgid "DNSSEC signature timestamps not checked until first cache reload"
msgstr ""
#: dnsmasq.c:757
msgid "DNSSEC signature timestamps not checked until system time valid"
msgstr ""
#: dnsmasq.c:762
#, fuzzy, c-format
msgid "warning: failed to change owner of %s: %s"
msgstr "feilet å laste navn fra %s: %s"
#: dnsmasq.c:766
msgid "setting --bind-interfaces option because of OS limitations"
msgstr "setter --bind-interfaces opsjon på grunn av OS begrensninger"
#: dnsmasq.c:776
#, c-format
msgid "warning: interface %s does not currently exist"
msgstr "advarsel: nettverkskort %s eksisterer ikke for tiden"
#: dnsmasq.c:781
msgid "warning: ignoring resolv-file flag because no-resolv is set"
msgstr ""
#: dnsmasq.c:784
#, fuzzy
msgid "warning: no upstream servers configured"
msgstr "setter oppstrøms tjener fra DBus"
#: dnsmasq.c:788
#, c-format
msgid "asynchronous logging enabled, queue limit is %d messages"
msgstr ""
#: dnsmasq.c:809
msgid "IPv6 router advertisement enabled"
msgstr ""
#: dnsmasq.c:814
#, c-format
msgid "DHCP, sockets bound exclusively to interface %s"
msgstr ""
#: dnsmasq.c:828
msgid "root is "
msgstr ""
#: dnsmasq.c:828
#, fuzzy
msgid "enabled"
msgstr "deaktivert"
#: dnsmasq.c:830
msgid "secure mode"
msgstr ""
#: dnsmasq.c:833
#, c-format
msgid "warning: %s inaccessible"
msgstr ""
#: dnsmasq.c:837
#, c-format
msgid "warning: TFTP directory %s inaccessible"
msgstr ""
#: dnsmasq.c:863
#, c-format
msgid "restricting maximum simultaneous TFTP transfers to %d"
msgstr ""
#: dnsmasq.c:1033
msgid "connected to system DBus"
msgstr "tilkoblet til system DBus"
#: dnsmasq.c:1183
#, c-format
msgid "cannot fork into background: %s"
msgstr ""
#: dnsmasq.c:1186
#, fuzzy, c-format
msgid "failed to create helper: %s"
msgstr "feilet å lese %s: %s"
#: dnsmasq.c:1189
#, c-format
msgid "setting capabilities failed: %s"
msgstr ""
#: dnsmasq.c:1192
#, fuzzy, c-format
msgid "failed to change user-id to %s: %s"
msgstr "feilet å laste navn fra %s: %s"
#: dnsmasq.c:1195
#, fuzzy, c-format
msgid "failed to change group-id to %s: %s"
msgstr "feilet å laste navn fra %s: %s"
#: dnsmasq.c:1198
#, fuzzy, c-format
msgid "failed to open pidfile %s: %s"
msgstr "feilet å lese %s: %s"
#: dnsmasq.c:1201
#, fuzzy, c-format
msgid "cannot open log %s: %s"
msgstr "kan ikke åpne %s:%s"
#: dnsmasq.c:1204
#, fuzzy, c-format
msgid "failed to load Lua script: %s"
msgstr "feilet å laste %s: %s"
#: dnsmasq.c:1207
#, c-format
msgid "TFTP directory %s inaccessible: %s"
msgstr ""
#: dnsmasq.c:1210
#, fuzzy, c-format
msgid "cannot create timestamp file %s: %s"
msgstr "kan ikke åpne eller lage leie fil: %s"
#: dnsmasq.c:1231
msgid "now checking DNSSEC signature timestamps"
msgstr ""
#: dnsmasq.c:1298
#, c-format
msgid "script process killed by signal %d"
msgstr ""
#: dnsmasq.c:1302
#, c-format
msgid "script process exited with status %d"
msgstr ""
#: dnsmasq.c:1306
#, fuzzy, c-format
msgid "failed to execute %s: %s"
msgstr "feilet å få tilgang til %s: %s"
#: dnsmasq.c:1363 dnssec.c:479 dnssec.c:523
#, fuzzy, c-format
msgid "failed to update mtime on %s: %s"
msgstr "feilet å lese %s: %s"
#: dnsmasq.c:1370
msgid "exiting on receipt of SIGTERM"
msgstr "avslutter etter mottak av SIGTERM"
#: dnsmasq.c:1398
#, fuzzy, c-format
msgid "failed to access %s: %s"
msgstr "feilet å få tilgang til %s: %s"
#: dnsmasq.c:1428
#, c-format
msgid "reading %s"
msgstr "leser %s"
#: dnsmasq.c:1439
#, fuzzy, c-format
msgid "no servers found in %s, will retry"
msgstr "intet søke direktiv funnet i %s"
#: dhcp.c:53
#, c-format
msgid "cannot create DHCP socket: %s"
msgstr "kan ikke lage DHCP socket: %s"
#: dhcp.c:68
#, c-format
msgid "failed to set options on DHCP socket: %s"
msgstr "feilet å sette opsjoner på DHCP socket: %s"
#: dhcp.c:89
#, fuzzy, c-format
msgid "failed to set SO_REUSE{ADDR|PORT} on DHCP socket: %s"
msgstr "feilet å sette SO_REUSEADDR på DHCP socket: %s"
#: dhcp.c:101
#, c-format
msgid "failed to bind DHCP server socket: %s"
msgstr "feilet å binde DHCP tjener socket: %s"
#: dhcp.c:127
#, c-format
msgid "cannot create ICMP raw socket: %s."
msgstr "kan ikke lage ICMP raw socket: %s"
#: dhcp.c:243 dhcp6.c:173
#, fuzzy, c-format
msgid "unknown interface %s in bridge-interface"
msgstr "ukjent tilknytning (interface) %s"
#: dhcp.c:283
#, c-format
msgid "DHCP packet received on %s which has no address"
msgstr ""
#: dhcp.c:417
#, c-format
msgid "ARP-cache injection failed: %s"
msgstr ""
#: dhcp.c:460
#, c-format
msgid "Error sending DHCP packet to %s: %s"
msgstr ""
#: dhcp.c:521
#, c-format
msgid "DHCP range %s -- %s is not consistent with netmask %s"
msgstr "DHCP område %s -- %s er ikke konsistent med nettmaske %s"
#: dhcp.c:822
#, c-format
msgid "bad line at %s line %d"
msgstr "dårlig linje ved %s linje %d"
#: dhcp.c:865
#, c-format
msgid "ignoring %s line %d, duplicate name or IP address"
msgstr ""
#: dhcp.c:1009 rfc3315.c:2136
#, c-format
msgid "DHCP relay %s -> %s"
msgstr ""
#: lease.c:61
#, fuzzy, c-format
msgid "cannot open or create lease file %s: %s"
msgstr "kan ikke åpne eller lage leie fil: %s"
#: lease.c:134
msgid "too many stored leases"
msgstr "for mange lagrede leier"
#: lease.c:165
#, fuzzy, c-format
msgid "cannot run lease-init script %s: %s"
msgstr "kan ikke lese %s: %s"
#: lease.c:171
#, c-format
msgid "lease-init script returned exit code %s"
msgstr ""
#: lease.c:342
#, fuzzy, c-format
msgid "failed to write %s: %s (retry in %us)"
msgstr "feilet å lese %s: %s"
#: lease.c:906
#, c-format
msgid "Ignoring domain %s for DHCP host name %s"
msgstr ""
#: rfc2131.c:344
#, c-format
msgid "no address range available for DHCP request %s %s"
msgstr "ingen adresse område tilgjengelig for DHCP krav %s %s"
#: rfc2131.c:345
msgid "with subnet selector"
msgstr "med subnet velger"
#: rfc2131.c:345
msgid "via"
msgstr "via"
#: rfc2131.c:357
#, fuzzy, c-format
msgid "%u available DHCP subnet: %s/%s"
msgstr "ingen adresse område tilgjengelig for DHCP krav %s %s"
#: rfc2131.c:360 rfc3315.c:300
#, c-format
msgid "%u available DHCP range: %s -- %s"
msgstr ""
#: rfc2131.c:471
#, fuzzy, c-format
msgid "%u vendor class: %s"
msgstr "DBus feil: %s"
#: rfc2131.c:473
#, fuzzy, c-format
msgid "%u user class: %s"
msgstr "DBus feil: %s"
#: rfc2131.c:500
msgid "disabled"
msgstr "deaktivert"
#: rfc2131.c:541 rfc2131.c:985 rfc2131.c:1391 rfc3315.c:603 rfc3315.c:856
#: rfc3315.c:1135
msgid "ignored"
msgstr "oversett"
#: rfc2131.c:556 rfc2131.c:1218 rfc3315.c:906
msgid "address in use"
msgstr "adresse i bruk"
#: rfc2131.c:570 rfc2131.c:1039
msgid "no address available"
msgstr "ingen adresse tilgjengelig"
#: rfc2131.c:577 rfc2131.c:1181
msgid "wrong network"
msgstr "galt nettverk"
#: rfc2131.c:592
msgid "no address configured"
msgstr "ingen adresse konfigurert"
#: rfc2131.c:598 rfc2131.c:1231
msgid "no leases left"
msgstr "ingen leier igjen"
#: rfc2131.c:693 rfc3315.c:476
#, c-format
msgid "%u client provides name: %s"
msgstr ""
#: rfc2131.c:798
msgid "PXE BIS not supported"
msgstr ""
#: rfc2131.c:953 rfc3315.c:1229
#, fuzzy, c-format
msgid "disabling DHCP static address %s for %s"
msgstr "deaktiverer DHCP statisk adresse %s"
#: rfc2131.c:974
msgid "unknown lease"
msgstr "ukjent leie"
#: rfc2131.c:1008
#, c-format
msgid "not using configured address %s because it is leased to %s"
msgstr ""
#: rfc2131.c:1018
#, c-format
msgid "not using configured address %s because it is in use by the server or relay"
msgstr ""
#: rfc2131.c:1021
#, c-format
msgid "not using configured address %s because it was previously declined"
msgstr ""
#: rfc2131.c:1037 rfc2131.c:1224
msgid "no unique-id"
msgstr ""
#: rfc2131.c:1119
msgid "wrong server-ID"
msgstr ""
#: rfc2131.c:1138
msgid "wrong address"
msgstr "gal adresse"
#: rfc2131.c:1156 rfc3315.c:1002
msgid "lease not found"
msgstr "leie ikke funnet"
#: rfc2131.c:1189
msgid "address not available"
msgstr "adresse ikke tilgjengelig"
#: rfc2131.c:1200
msgid "static lease available"
msgstr "statisk leie tilgjengelig"
#: rfc2131.c:1204
msgid "address reserved"
msgstr "adresse reservert"
#: rfc2131.c:1212
#, c-format
msgid "abandoning lease to %s of %s"
msgstr ""
#: rfc2131.c:1718
#, c-format
msgid "%u bootfile name: %s"
msgstr ""
#: rfc2131.c:1727
#, fuzzy, c-format
msgid "%u server name: %s"
msgstr "DBus feil: %s"
#: rfc2131.c:1735
#, fuzzy, c-format
msgid "%u next server: %s"
msgstr "DBus feil: %s"
#: rfc2131.c:1738
#, c-format
msgid "%u broadcast response"
msgstr ""
#: rfc2131.c:1801
#, fuzzy, c-format
msgid "cannot send DHCP/BOOTP option %d: no space left in packet"
msgstr "kan ikke sende DHCP opsjon %d: ikke mer plass i pakken"
#: rfc2131.c:2092
msgid "PXE menu too large"
msgstr ""
#: rfc2131.c:2231 rfc3315.c:1502
#, fuzzy, c-format
msgid "%u requested options: %s"
msgstr "kompilerings opsjoner: %s"
#: rfc2131.c:2548
#, c-format
msgid "cannot send RFC3925 option: too many options for enterprise number %d"
msgstr ""
#: netlink.c:77
#, fuzzy, c-format
msgid "cannot create netlink socket: %s"
msgstr "kan ikke binde netlink socket: %s"
#: netlink.c:349
#, fuzzy, c-format
msgid "netlink returns error: %s"
msgstr "DBus feil: %s"
#: dbus.c:186
msgid "attempt to set an IPv6 server address via DBus - no IPv6 support"
msgstr "forsøk på å sette en IPv6 tjener adresse via DBus - ingen IPv6 støtte"
#: dbus.c:439
#, c-format
msgid "Enabling --%s option from D-Bus"
msgstr ""
#: dbus.c:444
#, c-format
msgid "Disabling --%s option from D-Bus"
msgstr ""
#: dbus.c:691
msgid "setting upstream servers from DBus"
msgstr "setter oppstrøms tjener fra DBus"
#: dbus.c:738
msgid "could not register a DBus message handler"
msgstr "kunne ikke registrere en DBus meldingshåndterer"
#: bpf.c:265
#, c-format
msgid "cannot create DHCP BPF socket: %s"
msgstr "kan ikke lage DHCP BPF socket: %s"
#: bpf.c:293
#, fuzzy, c-format
msgid "DHCP request for unsupported hardware type (%d) received on %s"
msgstr "DHCP krav for ikke støttet maskinvare type (%d) mottatt på %s"
#: bpf.c:378
#, fuzzy, c-format
msgid "cannot create PF_ROUTE socket: %s"
msgstr "kan ikke lage DHCP socket: %s"
#: bpf.c:399
msgid "Unknown protocol version from route socket"
msgstr ""
#: helper.c:153
msgid "lease() function missing in Lua script"
msgstr ""
#: tftp.c:319
msgid "unable to get free port for TFTP"
msgstr ""
#: tftp.c:335
#, c-format
msgid "unsupported request from %s"
msgstr ""
#: tftp.c:450
#, fuzzy, c-format
msgid "file %s not found"
msgstr "leie ikke funnet"
#: tftp.c:559
#, c-format
msgid "error %d %s received from %s"
msgstr ""
#: tftp.c:601
#, fuzzy, c-format
msgid "failed sending %s to %s"
msgstr "feilet å lese %s: %s"
#: tftp.c:601
#, c-format
msgid "sent %s to %s"
msgstr ""
#: log.c:190
#, c-format
msgid "overflow: %d log entries lost"
msgstr ""
#: log.c:268
#, c-format
msgid "log failed: %s"
msgstr ""
#: log.c:469
msgid "FAILED to start up"
msgstr "FEILET å starte opp"
#: conntrack.c:65
#, c-format
msgid "Conntrack connection mark retrieval failed: %s"
msgstr ""
#: dhcp6.c:52
#, fuzzy, c-format
msgid "cannot create DHCPv6 socket: %s"
msgstr "kan ikke lage DHCP socket: %s"
#: dhcp6.c:73
#, fuzzy, c-format
msgid "failed to set SO_REUSE{ADDR|PORT} on DHCPv6 socket: %s"
msgstr "feilet å sette SO_REUSEADDR på DHCP socket: %s"
#: dhcp6.c:85
#, fuzzy, c-format
msgid "failed to bind DHCPv6 server socket: %s"
msgstr "feilet å binde DHCP tjener socket: %s"
#: rfc3315.c:157
#, fuzzy, c-format
msgid "no address range available for DHCPv6 request from relay at %s"
msgstr "ingen adresse område tilgjengelig for DHCP krav %s %s"
#: rfc3315.c:166
#, fuzzy, c-format
msgid "no address range available for DHCPv6 request via %s"
msgstr "ingen adresse område tilgjengelig for DHCP krav %s %s"
#: rfc3315.c:297
#, fuzzy, c-format
msgid "%u available DHCPv6 subnet: %s/%d"
msgstr "ingen adresse område tilgjengelig for DHCP krav %s %s"
#: rfc3315.c:380
#, fuzzy, c-format
msgid "%u vendor class: %u"
msgstr "DBus feil: %s"
#: rfc3315.c:428
#, fuzzy, c-format
msgid "%u client MAC address: %s"
msgstr "ingen tilknytning (interface) med adresse %s"
#: rfc3315.c:660
#, fuzzy, c-format
msgid "unknown prefix-class %d"
msgstr "ukjent leie"
#: rfc3315.c:803 rfc3315.c:898
#, fuzzy
msgid "address unavailable"
msgstr "adresse ikke tilgjengelig"
#: rfc3315.c:815 rfc3315.c:946 rfc3315.c:1279
msgid "success"
msgstr ""
#: rfc3315.c:830 rfc3315.c:839 rfc3315.c:954 rfc3315.c:956
#, fuzzy
msgid "no addresses available"
msgstr "ingen adresse tilgjengelig"
#: rfc3315.c:933
msgid "not on link"
msgstr ""
#: rfc3315.c:1006 rfc3315.c:1191 rfc3315.c:1268
msgid "no binding found"
msgstr ""
#: rfc3315.c:1044
msgid "deprecated"
msgstr ""
#: rfc3315.c:1049
#, fuzzy
msgid "address invalid"
msgstr "adresse i bruk"
#: rfc3315.c:1096
msgid "confirm failed"
msgstr ""
#: rfc3315.c:1112
#, fuzzy
msgid "all addresses still on link"
msgstr "dårlig adresse ved %s linje %d"
#: rfc3315.c:1200
msgid "release received"
msgstr ""
#: rfc3315.c:2127
msgid "Cannot multicast to DHCPv6 server without correct interface"
msgstr ""
#: dhcp-common.c:145
#, c-format
msgid "Ignoring duplicate dhcp-option %d"
msgstr ""
#: dhcp-common.c:222
#, c-format
msgid "%u tags: %s"
msgstr ""
#: dhcp-common.c:407
#, c-format
msgid "%s has more than one address in hostsfile, using %s for DHCP"
msgstr ""
#: dhcp-common.c:430
#, c-format
msgid "duplicate IP address %s (%s) in dhcp-config directive"
msgstr "dubliserte IP adresser i %s (%s) i dhcp-config direktiv"
#: dhcp-common.c:494
#, fuzzy, c-format
msgid "failed to set SO_BINDTODEVICE on DHCP socket: %s"
msgstr "feilet å sette SO_REUSEADDR på DHCP socket: %s"
#: dhcp-common.c:615
#, c-format
msgid "Known DHCP options:\n"
msgstr ""
#: dhcp-common.c:626
#, c-format
msgid "Known DHCPv6 options:\n"
msgstr ""
#: dhcp-common.c:823
msgid ", prefix deprecated"
msgstr ""
#: dhcp-common.c:826
#, c-format
msgid ", lease time "
msgstr ""
#: dhcp-common.c:868
#, c-format
msgid "%s stateless on %s%.0s%.0s%s"
msgstr ""
#: dhcp-common.c:870
#, fuzzy, c-format
msgid "%s, static leases only on %.0s%s%s%.0s"
msgstr "DHCP, statisk leie kun på %.0s%s, leie tid %s"
#: dhcp-common.c:872
#, c-format
msgid "%s, proxy on subnet %.0s%s%.0s%.0s"
msgstr ""
#: dhcp-common.c:873
#, fuzzy, c-format
msgid "%s, IP range %s -- %s%s%.0s"
msgstr "DHCP, IP område %s -- %s, leie tid %s"
#: dhcp-common.c:886
#, c-format
msgid "DHCPv4-derived IPv6 names on %s%s"
msgstr ""
#: dhcp-common.c:889
#, fuzzy, c-format
msgid "router advertisement on %s%s"
msgstr "DHCP, statisk leie kun på %.0s%s, leie tid %s"
#: dhcp-common.c:900
#, c-format
msgid "DHCP relay from %s to %s via %s"
msgstr ""
#: dhcp-common.c:902
#, c-format
msgid "DHCP relay from %s to %s"
msgstr ""
#: radv.c:110
#, fuzzy, c-format
msgid "cannot create ICMPv6 socket: %s"
msgstr "kan ikke lage DHCP socket: %s"
#: auth.c:449
#, c-format
msgid "ignoring zone transfer request from %s"
msgstr ""
#: ipset.c:95
#, fuzzy, c-format
msgid "failed to find kernel version: %s"
msgstr "feilet å binde DHCP tjener socket: %s"
#: ipset.c:114
#, fuzzy, c-format
msgid "failed to create IPset control socket: %s"
msgstr "feilet å lage lytte socket: %s"
#: blockdata.c:58
#, c-format
msgid "DNSSEC memory in use %u, max %u, allocated %u"
msgstr ""
#: tables.c:80
msgid "error: fill_addr missused"
msgstr ""
#: tables.c:109
#, fuzzy, c-format
msgid "failed to access pf devices: %s"
msgstr "feilet å få tilgang til %s: %s"
#: tables.c:123
#, fuzzy, c-format
msgid "warning: no opened pf devices %s"
msgstr "benytter lokale adresser kun for %s %s"
#: tables.c:131
#, fuzzy, c-format
msgid "error: cannot use table name %s"
msgstr "klarer ikke å få vertsnavn: %s"
#: tables.c:139
#, c-format
msgid "error: cannot strlcpy table name %s"
msgstr ""
#: tables.c:145
#, c-format
msgid "warning: pfr_add_tables: %s(%d)"
msgstr ""
#: tables.c:151
msgid "info: table created"
msgstr ""
#: tables.c:162
#, c-format
msgid "warning: DIOCR%sADDRS: %s"
msgstr ""
#: tables.c:166
#, fuzzy, c-format
msgid "%d addresses %s"
msgstr "les %s - %d adresser"
#: inotify.c:62
#, fuzzy, c-format
msgid "cannot access path %s: %s"
msgstr "kan ikke lese %s: %s"
#: inotify.c:95
#, fuzzy, c-format
msgid "failed to create inotify: %s"
msgstr "feilet å lese %s: %s"
#: inotify.c:111
#, c-format
msgid "too many symlinks following %s"
msgstr ""
#: inotify.c:127
#, c-format
msgid "directory %s for resolv-file is missing, cannot poll"
msgstr ""
#: inotify.c:131 inotify.c:168
#, fuzzy, c-format
msgid "failed to create inotify for %s: %s"
msgstr "feilet å lage lytte socket: %s"
#: inotify.c:153
#, fuzzy, c-format
msgid "bad dynamic directory %s: %s"
msgstr "kan ikke lese %s: %s"
#: inotify.c:255
#, c-format
msgid "inotify, new or changed file %s"
msgstr ""
#, fuzzy
#~ msgid "cannot cannonicalise resolv-file %s: %s"
#~ msgstr "kan ikke åpne eller lage leie fil: %s"
#~ msgid "duplicate IP address %s in dhcp-config directive."
#~ msgstr "dubliserte IP adresser i %s dhcp-config direktiv."
#, fuzzy
#~ msgid "Specify path to Lua script (no default)."
#~ msgstr "Spesifiser stien til PID fil. (standard er %s)."
#~ msgid "TXT record string too long"
#~ msgstr "TXT post streng for lang"
#~ msgid "failed to set IPV6 options on listening socket: %s"
#~ msgstr "feilet å sette IPv6 opsjoner på lytte socket: %s"
#~ msgid "failed to bind listening socket for %s: %s"
#~ msgstr "feilet å binde lytte socket for %s: %s"
#~ msgid "must set exactly one interface on broken systems without IP_RECVIF"
#~ msgstr "må sette nøyaktig et interface på ødelagte systemer uten IP_RECVIF"
#~ msgid "Ignoring DHCP lease for %s because it has an illegal domain part"
#~ msgstr "Ignorerer DHCP leie for %s siden den har en ulovlig domene del"
#~ msgid "ISC dhcpd integration not available: set HAVE_ISC_READER in src/config.h"
#~ msgstr "ISC dhcpf integrasjon ikke tilgjengelig: sett HAVE_ISC_READER i src/config.h"
#, fuzzy
#~ msgid "illegal domain %s in dhcp-config directive."
#~ msgstr "dubliserte IP adresser i %s dhcp-config direktiv."
#~ msgid "running as root"
#~ msgstr "kjører som rot (root)"
#, fuzzy
#~ msgid "read %s - %d hosts"
#~ msgstr "les %s - %d adresser"
#~ msgid "domains"
#~ msgstr "domener"
#~ msgid "Ignoring DHCP host name %s because it has an illegal domain part"
#~ msgstr "Ignorerer DHCP verts navn %s på grunn av ulovlig domene del"
#~ msgid "Display this message."
#~ msgstr "Vis denne meldingen."
#~ msgid "failed to read %s: %m"
#~ msgstr "feilet å lese %s: %m"
#~ msgid "failed to read %s:%m"
#~ msgstr "feilet å lese %s:%m"
#, fuzzy
#~ msgid "cannot send encapsulated option %d: no space left in wrapper"
#~ msgstr "kan ikke sende DHCP opsjon %d: ikke mer plass i pakken"
#~ msgid "More than one vendor class matches, using %s"
#~ msgstr "Mer enn en produsent klasse som passer, bruker %s"
#~ msgid "forwarding table overflow: check for server loops."
#~ msgstr "fremsendelse (forwarding) tabell overflyt: sjekk etter tjener løkker."
#~ msgid "nested includes not allowed"
#~ msgstr "nøstede inkluderinger er ikke tillatt"
#~ msgid "DHCP, %s will be written every %s"
#~ msgstr "DHCP, %s vil bli skrevet hver %s"
#~ msgid "cannot create DHCP packet socket: %s. Is CONFIG_PACKET enabled in your kernel?"
#~ msgstr "kan ikke lage DHCP pakke socket: %s. Er CONFIG_PACKET aktivert i din kjerne?"
#~ msgid "Cannot use RTnetlink socket, falling back to ioctl API"
#~ msgstr "Kan ikke benytte RTnetlink socket, faller tilbake til ioctl API"
| {
"pile_set_name": "Github"
} |
# Editing a package.json for apps
All apps need to have a package.json file to describe themselves with a few required fields. This is based on the [CommonJS Packages](http://wiki.commonjs.org/wiki/Packages) format and [NPM](http://npmjs.org/doc/json.html), with the additional data needed in the repository field.
## Minimum template to start
{
"name": "myapp",
"version": "0.0.0",
"description": "This is my awesome app!",
"repository": {
"title": "My App"
}
}
The `name` field is a short name or 'handle', required but not visible anywhere.
The `version` is good to start with `0.0.0` and only increment the first two (x.x.0) if you want to track your own versions here, the last one (0.0.y) is auto-incremented every time an app is published.
Set `description` to a sentence describing what the app does in the gallery or details page before someone installs it.
For `repository.title` choose a short recognizable title (show in the list of apps after it's added).
## Screenshot
The default screenshot used to describe the app is simply the `screenshot.png` file included with it. The preferred size is 360x360, and it should represent what the app looks like in use (not an icon or cover).
## Optional repository fields
To better describe what data an app needs in order to be useful, the uses field can express those requirements ahead of time:
{
"name": "myapp",
"version": "0.0.0",
"description": "This is my awesome app!",
"repository": {
"title": "My App",
"uses": {
"services": [ "twitter", "foursquare", "instagram" ],
"types": [ "places" ]
}
},
"author": "My Name <my@email>"
}
Add any helpful or required services to the `repository.uses.services` array.
If any collections are used, add them to the `repository.uses.types` array.
By default the `author` is set to your github information when you publish, but you can override it here too.
## Problems/Questions?
Just ask! Check out the IRC for a quick answer or post to our forum or mailing list for anything bigger, thanks!
| {
"pile_set_name": "Github"
} |
// Copyright (c) 2016 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package fs
import (
"bytes"
"errors"
"os"
"path/filepath"
"sort"
"testing"
"time"
"github.com/m3db/m3/src/dbnode/digest"
"github.com/m3db/m3/src/dbnode/persist"
"github.com/m3db/m3/src/x/checked"
"github.com/m3db/m3/src/x/ident"
xtime "github.com/m3db/m3/src/x/time"
"github.com/m3db/bloom/v4"
"github.com/pborman/uuid"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
var (
testSnapshotID = uuid.Parse("bbc85a98-bd0c-47fe-8b9a-89cde1b4540f")
)
type testEntry struct {
id string
tags map[string]string
data []byte
}
func (e testEntry) ID() ident.ID {
return ident.StringID(e.id)
}
func (e testEntry) Tags() ident.Tags {
if e.tags == nil {
return ident.Tags{}
}
// Return in sorted order for deterministic order
var keys []string
for key := range e.tags {
keys = append(keys, key)
}
sort.Strings(keys)
var tags ident.Tags
for _, key := range keys {
tags.Append(ident.StringTag(key, e.tags[key]))
}
return tags
}
type testEntries []testEntry
func (e testEntries) Less(i, j int) bool {
return e[i].id < e[j].id
}
func (e testEntries) Len() int {
return len(e)
}
func (e testEntries) Swap(i, j int) {
e[i], e[j] = e[j], e[i]
}
func newTestWriter(t *testing.T, filePathPrefix string) DataFileSetWriter {
writer, err := NewWriter(testDefaultOpts.
SetFilePathPrefix(filePathPrefix).
SetWriterBufferSize(testWriterBufferSize))
require.NoError(t, err)
return writer
}
func writeTestData(
t *testing.T,
w DataFileSetWriter,
shard uint32,
timestamp time.Time,
entries []testEntry,
fileSetType persist.FileSetType,
) {
writeTestDataWithVolume(
t, w, shard, timestamp, 0, entries, fileSetType)
}
func writeTestDataWithVolume(
t *testing.T,
w DataFileSetWriter,
shard uint32,
timestamp time.Time,
volume int,
entries []testEntry,
fileSetType persist.FileSetType,
) {
writerOpts := DataWriterOpenOptions{
Identifier: FileSetFileIdentifier{
Namespace: testNs1ID,
Shard: shard,
BlockStart: timestamp,
VolumeIndex: volume,
},
BlockSize: testBlockSize,
FileSetType: fileSetType,
}
if fileSetType == persist.FileSetSnapshotType {
writerOpts.Snapshot.SnapshotTime = timestamp
writerOpts.Snapshot.SnapshotID = testSnapshotID
}
err := w.Open(writerOpts)
assert.NoError(t, err)
for i := range entries {
metadata := persist.NewMetadataFromIDAndTags(entries[i].ID(),
entries[i].Tags(),
persist.MetadataOptions{})
assert.NoError(t, w.Write(metadata,
bytesRefd(entries[i].data),
digest.Checksum(entries[i].data)))
}
assert.NoError(t, w.Close())
// Assert that any index entries released references they held
writer, ok := w.(*writer)
require.True(t, ok)
// Take ref to wholly allocated index entries slice
slice := writer.indexEntries[:cap(writer.indexEntries)]
// Check every entry has ID and Tags nil
for _, elem := range slice {
assert.Equal(t, persist.Metadata{}, elem.metadata)
}
}
type readTestType uint
const (
readTestTypeData readTestType = iota
readTestTypeMetadata
)
var readTestTypes = []readTestType{
readTestTypeData,
readTestTypeMetadata,
}
func readTestData(t *testing.T, r DataFileSetReader, shard uint32, timestamp time.Time, entries []testEntry) {
readTestDataWithStreamingOpt(t, r, shard, timestamp, entries, false)
sortedEntries := append(make(testEntries, 0, len(entries)), entries...)
sort.Sort(sortedEntries)
readTestDataWithStreamingOpt(t, r, shard, timestamp, sortedEntries, true)
}
// readTestDataWithStreamingOpt will test reading back the data matches what was written,
// note that this test also tests reuse of the reader since it first reads
// all the data then closes it, reopens and reads through again but just
// reading the metadata the second time.
// If it starts to fail during the pass that reads just the metadata it could
// be a newly introduced reader reuse bug.
func readTestDataWithStreamingOpt(
t *testing.T,
r DataFileSetReader,
shard uint32,
timestamp time.Time,
entries []testEntry,
streamingEnabled bool,
) {
for _, underTest := range readTestTypes {
if underTest == readTestTypeMetadata && streamingEnabled {
// ATM there is no streaming support for metadata.
continue
}
rOpenOpts := DataReaderOpenOptions{
Identifier: FileSetFileIdentifier{
Namespace: testNs1ID,
Shard: shard,
BlockStart: timestamp,
},
StreamingEnabled: streamingEnabled,
}
err := r.Open(rOpenOpts)
require.NoError(t, err)
require.Equal(t, len(entries), r.Entries())
require.Equal(t, 0, r.EntriesRead())
bloomFilter, err := r.ReadBloomFilter()
assert.NoError(t, err)
// Make sure the bloom filter doesn't always return true
assert.False(t, bloomFilter.Test([]byte("some_random_data")))
expectedEntries := uint(len(entries))
if expectedEntries == 0 {
expectedEntries = 1
}
expectedM, expectedK := bloom.EstimateFalsePositiveRate(
expectedEntries, defaultIndexBloomFilterFalsePositivePercent)
assert.Equal(t, expectedK, bloomFilter.K())
// EstimateFalsePositiveRate always returns at least 1, so skip this check
// if len entries is 0
if len(entries) > 0 {
assert.Equal(t, expectedM, bloomFilter.M())
}
for i := 0; i < r.Entries(); i++ {
switch underTest {
case readTestTypeData:
id, tags, data, checksum, err := readData(t, r)
require.NoError(t, err)
data.IncRef()
// Assert id
assert.Equal(t, entries[i].id, id.String())
// Assert tags
tagMatcher := ident.NewTagIterMatcher(ident.NewTagsIterator(entries[i].Tags()))
assert.True(t, tagMatcher.Matches(tags))
assert.True(t, bytes.Equal(entries[i].data, data.Bytes()))
assert.Equal(t, digest.Checksum(entries[i].data), checksum)
assert.Equal(t, i+1, r.EntriesRead())
// Verify that the bloomFilter was bootstrapped properly by making sure it
// at least contains every ID
assert.True(t, bloomFilter.Test(id.Bytes()))
id.Finalize()
tags.Close()
data.DecRef()
data.Finalize()
case readTestTypeMetadata:
id, tags, length, checksum, err := r.ReadMetadata()
require.NoError(t, err)
// Assert id
assert.True(t, id.Equal(id))
// Assert tags
tagMatcher := ident.NewTagIterMatcher(ident.NewTagsIterator(entries[i].Tags()))
assert.True(t, tagMatcher.Matches(tags))
assert.Equal(t, digest.Checksum(entries[i].data), checksum)
assert.Equal(t, len(entries[i].data), length)
assert.Equal(t, i+1, r.MetadataRead())
// Verify that the bloomFilter was bootstrapped properly by making sure it
// at least contains every ID
assert.True(t, bloomFilter.Test(id.Bytes()))
id.Finalize()
tags.Close()
}
}
require.NoError(t, r.Close())
}
}
func TestSimpleReadWrite(t *testing.T) {
dir := createTempDir(t)
filePathPrefix := filepath.Join(dir, "")
defer os.RemoveAll(dir)
entries := []testEntry{
{"foo", nil, []byte{1, 2, 3}},
{"bar", nil, []byte{4, 5, 6}},
{"baz", nil, make([]byte, 65536)},
{"cat", nil, make([]byte, 100000)},
{"foo+bar=baz,qux=qaz", map[string]string{
"bar": "baz",
"qux": "qaz",
}, []byte{7, 8, 9}},
}
w := newTestWriter(t, filePathPrefix)
writeTestData(t, w, 0, testWriterStart, entries, persist.FileSetFlushType)
r := newTestReader(t, filePathPrefix)
readTestData(t, r, 0, testWriterStart, entries)
}
func TestCheckpointFileSizeBytesSize(t *testing.T) {
// These values need to match so that the logic for determining whether
// a checkpoint file is complete or not remains correct.
require.Equal(t, digest.DigestLenBytes, CheckpointFileSizeBytes)
}
func TestDuplicateWrite(t *testing.T) {
dir := createTempDir(t)
filePathPrefix := filepath.Join(dir, "")
defer os.RemoveAll(dir)
entries := []testEntry{
{"foo", nil, []byte{1, 2, 3}},
{"foo", nil, []byte{4, 5, 6}},
}
w := newTestWriter(t, filePathPrefix)
writerOpts := DataWriterOpenOptions{
Identifier: FileSetFileIdentifier{
Namespace: testNs1ID,
Shard: 0,
BlockStart: testWriterStart,
},
BlockSize: testBlockSize,
}
err := w.Open(writerOpts)
require.NoError(t, err)
for i := range entries {
metadata := persist.NewMetadataFromIDAndTags(entries[i].ID(),
entries[i].Tags(),
persist.MetadataOptions{})
require.NoError(t, w.Write(metadata,
bytesRefd(entries[i].data),
digest.Checksum(entries[i].data)))
}
require.Equal(t, errors.New("encountered duplicate ID: foo"), w.Close())
}
func TestReadWithReusedReader(t *testing.T) {
dir := createTempDir(t)
filePathPrefix := filepath.Join(dir, "")
defer os.RemoveAll(dir)
entries := []testEntry{
{"foo", nil, []byte{1, 2, 3}},
{"bar", nil, []byte{4, 5, 6}},
{"baz", nil, make([]byte, 65536)},
{"cat", nil, make([]byte, 100000)},
{"foo+bar=baz,qux=qaz", map[string]string{
"bar": "baz",
"qux": "qaz",
}, []byte{7, 8, 9}},
}
w := newTestWriter(t, filePathPrefix)
writeTestData(t, w, 0, testWriterStart, entries, persist.FileSetFlushType)
r := newTestReader(t, filePathPrefix)
readTestData(t, r, 0, testWriterStart, entries)
// Reuse the reader to read again
readTestData(t, r, 0, testWriterStart, entries)
}
func TestInfoReadWrite(t *testing.T) {
dir := createTempDir(t)
filePathPrefix := filepath.Join(dir, "")
defer os.RemoveAll(dir)
entries := []testEntry{
{"foo", nil, []byte{1, 2, 3}},
{"bar", nil, []byte{4, 5, 6}},
{"baz", nil, make([]byte, 65536)},
{"cat", nil, make([]byte, 100000)},
{"foo+bar=baz,qux=qaz", map[string]string{
"bar": "baz",
"qux": "qaz",
}, []byte{7, 8, 9}},
}
w := newTestWriter(t, filePathPrefix)
writeTestData(t, w, 0, testWriterStart, entries, persist.FileSetFlushType)
readInfoFileResults := ReadInfoFiles(filePathPrefix, testNs1ID, 0, 16, nil, persist.FileSetFlushType)
require.Equal(t, 1, len(readInfoFileResults))
for _, result := range readInfoFileResults {
require.NoError(t, result.Err.Error())
}
infoFile := readInfoFileResults[0].Info
require.True(t, testWriterStart.Equal(xtime.FromNanoseconds(infoFile.BlockStart)))
require.Equal(t, testBlockSize, time.Duration(infoFile.BlockSize))
require.Equal(t, int64(len(entries)), infoFile.Entries)
}
func TestInfoReadWriteVolumeIndex(t *testing.T) {
dir := createTempDir(t)
filePathPrefix := filepath.Join(dir, "")
defer os.RemoveAll(dir)
var (
entries = []testEntry{}
w = newTestWriter(t, filePathPrefix)
volume = 1
)
writeTestDataWithVolume(t, w, 0, testWriterStart, volume, entries, persist.FileSetFlushType)
readInfoFileResults := ReadInfoFiles(filePathPrefix, testNs1ID, 0, 16, nil, persist.FileSetFlushType)
require.Equal(t, 1, len(readInfoFileResults))
for _, result := range readInfoFileResults {
require.NoError(t, result.Err.Error())
}
infoFile := readInfoFileResults[0].Info
require.True(t, testWriterStart.Equal(xtime.FromNanoseconds(infoFile.BlockStart)))
require.Equal(t, volume, infoFile.VolumeIndex)
require.Equal(t, testBlockSize, time.Duration(infoFile.BlockSize))
require.Equal(t, int64(len(entries)), infoFile.Entries)
}
func TestInfoReadWriteSnapshot(t *testing.T) {
dir := createTempDir(t)
filePathPrefix := filepath.Join(dir, "")
defer os.RemoveAll(dir)
w := newTestWriter(t, filePathPrefix)
writeTestData(t, w, 0, testWriterStart, nil, persist.FileSetSnapshotType)
snapshotFiles, err := SnapshotFiles(filePathPrefix, testNs1ID, 0)
require.NoError(t, err)
require.Equal(t, 1, len(snapshotFiles))
snapshot := snapshotFiles[0]
snapshotTime, snapshotID, err := snapshot.SnapshotTimeAndID()
require.NoError(t, err)
require.True(t, testWriterStart.Equal(snapshotTime))
require.Equal(t, testSnapshotID, snapshotID)
}
func TestReusingReaderWriter(t *testing.T) {
dir := createTempDir(t)
filePathPrefix := filepath.Join(dir, "")
defer os.RemoveAll(dir)
allEntries := [][]testEntry{
{
{"foo", nil, []byte{1, 2, 3}},
{"bar", nil, []byte{4, 5, 6}},
},
{
{"baz", nil, []byte{7, 8, 9}},
},
{},
}
w := newTestWriter(t, filePathPrefix)
for i := range allEntries {
writeTestData(
t, w, 0, testWriterStart.Add(time.Duration(i)*time.Hour), allEntries[i], persist.FileSetFlushType)
}
r := newTestReader(t, filePathPrefix)
for i := range allEntries {
readTestData(t, r, 0, testWriterStart.Add(time.Duration(i)*time.Hour), allEntries[i])
}
}
func TestReusingWriterAfterWriteError(t *testing.T) {
dir := createTempDir(t)
filePathPrefix := filepath.Join(dir, "")
defer os.RemoveAll(dir)
entries := []testEntry{
{"foo", nil, []byte{1, 2, 3}},
{"bar", nil, []byte{4, 5, 6}},
}
w := newTestWriter(t, filePathPrefix)
shard := uint32(0)
writerOpts := DataWriterOpenOptions{
Identifier: FileSetFileIdentifier{
Namespace: testNs1ID,
Shard: shard,
BlockStart: testWriterStart,
},
}
metadata := persist.NewMetadataFromIDAndTags(entries[0].ID(),
entries[0].Tags(),
persist.MetadataOptions{})
require.NoError(t, w.Open(writerOpts))
require.NoError(t, w.Write(metadata,
bytesRefd(entries[0].data),
digest.Checksum(entries[0].data)))
// Intentionally force a writer error.
w.(*writer).err = errors.New("foo")
metadata = persist.NewMetadataFromIDAndTags(entries[1].ID(),
entries[1].Tags(),
persist.MetadataOptions{})
require.Equal(t, "foo", w.Write(metadata,
bytesRefd(entries[1].data),
digest.Checksum(entries[1].data)).Error())
w.Close()
r := newTestReader(t, filePathPrefix)
rOpenOpts := DataReaderOpenOptions{
Identifier: FileSetFileIdentifier{
Namespace: testNs1ID,
Shard: shard,
BlockStart: testWriterStart,
},
}
require.Equal(t, ErrCheckpointFileNotFound, r.Open(rOpenOpts))
// Now reuse the writer and validate the data are written as expected.
writeTestData(t, w, shard, testWriterStart, entries, persist.FileSetFlushType)
readTestData(t, r, shard, testWriterStart, entries)
}
func TestWriterOnlyWritesNonNilBytes(t *testing.T) {
dir := createTempDir(t)
filePathPrefix := filepath.Join(dir, "")
defer os.RemoveAll(dir)
w := newTestWriter(t, filePathPrefix)
writerOpts := DataWriterOpenOptions{
BlockSize: testBlockSize,
Identifier: FileSetFileIdentifier{
Namespace: testNs1ID,
Shard: 0,
BlockStart: testWriterStart,
},
}
metadata := persist.NewMetadataFromIDAndTags(
ident.StringID("foo"),
ident.Tags{},
persist.MetadataOptions{})
require.NoError(t, w.Open(writerOpts))
err := w.WriteAll(metadata,
[]checked.Bytes{
checkedBytes([]byte{1, 2, 3}),
nil,
checkedBytes([]byte{4, 5, 6}),
},
digest.Checksum([]byte{1, 2, 3, 4, 5, 6}))
require.NoError(t, err)
assert.NoError(t, w.Close())
r := newTestReader(t, filePathPrefix)
readTestData(t, r, 0, testWriterStart, []testEntry{
{"foo", nil, []byte{1, 2, 3, 4, 5, 6}},
})
}
func readData(t *testing.T, reader DataFileSetReader) (id ident.ID, tags ident.TagIterator, data checked.Bytes, checksum uint32, err error) {
if reader.StreamingEnabled() {
id, encodedTags, data, checksum, err := reader.StreamingRead()
var tags = ident.EmptyTagIterator
if len(encodedTags) > 0 {
tagsDecoder := testTagDecoderPool.Get()
tagsDecoder.Reset(checkedBytes(encodedTags))
require.NoError(t, tagsDecoder.Err())
tags = tagsDecoder
}
return id, tags, checked.NewBytes(data, nil), checksum, err
}
return reader.Read()
}
func checkedBytes(b []byte) checked.Bytes {
r := checked.NewBytes(b, nil)
r.IncRef()
return r
}
| {
"pile_set_name": "Github"
} |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.ambari.server.checks;
import org.apache.ambari.server.api.services.AmbariMetaInfo;
import org.apache.ambari.server.orm.dao.RepositoryVersionDAO;
import org.apache.ambari.server.state.CheckHelper;
import org.apache.ambari.server.state.Clusters;
import org.mockito.Mockito;
import com.google.inject.Provider;
/**
* Used to help mock out cluster and repository queries.
*/
public class MockCheckHelper extends CheckHelper {
public RepositoryVersionDAO m_repositoryVersionDAO = Mockito.mock(RepositoryVersionDAO.class);
public Clusters m_clusters = Mockito.mock(Clusters.class);
public MockCheckHelper() {
clustersProvider = new Provider<Clusters>() {
@Override
public Clusters get() {
return m_clusters;
}
};
repositoryVersionDaoProvider = new Provider<RepositoryVersionDAO>() {
@Override
public RepositoryVersionDAO get() {
return m_repositoryVersionDAO;
}
};
}
/**
* Helper to set the AmbariMetaInfo provider instance
*/
public void setMetaInfoProvider(Provider<AmbariMetaInfo> provider) {
metaInfoProvider = provider;
}
}
| {
"pile_set_name": "Github"
} |
{
"type": "minecraft:block",
"pools": [
{
"name": "main",
"rolls": 1,
"entries": [
{
"type": "minecraft:item",
"name": "botania:lime_floating_flower"
}
],
"conditions": [
{
"condition": "minecraft:survives_explosion"
}
]
}
]
} | {
"pile_set_name": "Github"
} |
/*
* ServerMainOverlay.cpp
*
* Copyright (C) 2020 by RStudio, PBC
*
* Unless you have received this program directly from RStudio pursuant
* to the terms of a commercial license agreement with RStudio, then
* this program is licensed to you under the terms of version 3 of the
* GNU Affero General Public License. This program is distributed WITHOUT
* ANY EXPRESS OR IMPLIED WARRANTY, INCLUDING THOSE OF NON-INFRINGEMENT,
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Please refer to the
* AGPL (http://www.gnu.org/licenses/agpl-3.0.txt) for more details.
*
*/
#include <shared_core/Error.hpp>
using namespace rstudio::core;
namespace rstudio {
namespace server {
namespace overlay {
Error initialize()
{
return Success();
}
Error startup()
{
return Success();
}
Error reloadConfiguration()
{
return Success();
}
void shutdown()
{
}
bool requireLocalR()
{
return true;
}
} // namespace overlay
} // namespace server
} // namespace rstudio
| {
"pile_set_name": "Github"
} |
/* https://github.com/cirosantilli/x86-bare-metal-examples#reboot */
#include "common.h"
BEGIN
ljmpw $0xF000, $0XFFF0
| {
"pile_set_name": "Github"
} |
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
#include <pthread.h>
#include <string.h>
#include <signal.h>
#include "libcgo.h"
#include "libcgo_unix.h"
static void *threadentry(void*);
static void (*setg_gcc)(void*);
// This will be set in gcc_android.c for android-specific customization.
void (*x_cgo_inittls)(void **tlsg, void **tlsbase);
void
x_cgo_init(G *g, void (*setg)(void*), void **tlsg, void **tlsbase)
{
pthread_attr_t attr;
size_t size;
setg_gcc = setg;
pthread_attr_init(&attr);
pthread_attr_getstacksize(&attr, &size);
g->stacklo = (uintptr)&attr - size + 4096;
pthread_attr_destroy(&attr);
if (x_cgo_inittls) {
x_cgo_inittls(tlsg, tlsbase);
}
}
void
_cgo_sys_thread_start(ThreadStart *ts)
{
pthread_attr_t attr;
sigset_t ign, oset;
pthread_t p;
size_t size;
int err;
sigfillset(&ign);
pthread_sigmask(SIG_SETMASK, &ign, &oset);
// Not sure why the memset is necessary here,
// but without it, we get a bogus stack size
// out of pthread_attr_getstacksize. C'est la Linux.
memset(&attr, 0, sizeof attr);
pthread_attr_init(&attr);
size = 0;
pthread_attr_getstacksize(&attr, &size);
// Leave stacklo=0 and set stackhi=size; mstart will do the rest.
ts->g->stackhi = size;
err = _cgo_try_pthread_create(&p, &attr, threadentry, ts);
pthread_sigmask(SIG_SETMASK, &oset, nil);
if (err != 0) {
fatalf("pthread_create failed: %s", strerror(err));
}
}
static void*
threadentry(void *v)
{
ThreadStart ts;
ts = *(ThreadStart*)v;
free(v);
/*
* Set specific keys.
*/
setg_gcc((void*)ts.g);
crosscall_386(ts.fn);
return nil;
}
| {
"pile_set_name": "Github"
} |
<?php
namespace calderawp\calderaforms\cf2\Fields\Handlers;
interface UploaderContract
{
/**
* Do the upload
*
* @param array $file File to upload
* @param array $args Optional Additonal args to pass to upload function
* @return mixed
*/
public function upload($file, array $args = array());
/**
* Add upload related filters
*
* Changes directory name
*
* @since 1.8.0
*
* @param string $fieldId The field ID for file field
* @param string $formId The form ID
* @param boolean $private
* @return void
*/
public function addFilter($fieldId, $formId, $private, $transientId);
/**
* Remove upload related filters
*
* @since 1.8.0
*
* @return void
*/
public function removeFilter();
/**
* Schedule file to be deleted as soon as possible
*
* @since 1.8.0
*
* @param string $fieldId ID of field
* @param string $formId ID of form
* @param string $file Path to file to delete.
*
* @return bool
*/
public function scheduleFileDelete($fieldId,$formId,$file);
/**
* Check if file is too large to upload
*
* @since 1.8.0
*
* @param array $field Field config
* @param string $filePath Path to file to check
*
* @return bool
*/
public function isFileTooLarge(array $field,$filePath);
} | {
"pile_set_name": "Github"
} |
"use strict";
var inherits = require('util').inherits
, EventEmitter = require('events').EventEmitter
, Connection = require('./connection')
, Query = require('./commands').Query
, Logger = require('./logger')
, f = require('util').format;
var DISCONNECTED = 'disconnected';
var CONNECTING = 'connecting';
var CONNECTED = 'connected';
var DESTROYED = 'destroyed';
var _id = 0;
/**
* Creates a new Pool instance
* @class
* @param {string} options.host The server host
* @param {number} options.port The server port
* @param {number} [options.size=5] Server connection pool size
* @param {boolean} [options.keepAlive=true] TCP Connection keep alive enabled
* @param {number} [options.keepAliveInitialDelay=0] Initial delay before TCP keep alive enabled
* @param {boolean} [options.noDelay=true] TCP Connection no delay
* @param {number} [options.connectionTimeout=0] TCP Connection timeout setting
* @param {number} [options.socketTimeout=0] TCP Socket timeout setting
* @param {boolean} [options.singleBufferSerializtion=true] Serialize into single buffer, trade of peak memory for serialization speed
* @param {boolean} [options.ssl=false] Use SSL for connection
* @param {Buffer} [options.ca] SSL Certificate store binary buffer
* @param {Buffer} [options.cert] SSL Certificate binary buffer
* @param {Buffer} [options.key] SSL Key file binary buffer
* @param {string} [options.passPhrase] SSL Certificate pass phrase
* @param {boolean} [options.rejectUnauthorized=false] Reject unauthorized server certificates
* @param {boolean} [options.promoteLongs=true] Convert Long values from the db into Numbers if they fit into 53 bits
* @fires Pool#connect
* @fires Pool#close
* @fires Pool#error
* @fires Pool#timeout
* @fires Pool#parseError
* @return {Pool} A cursor instance
*/
var Pool = function(options) {
var self = this;
// Add event listener
EventEmitter.call(this);
// Set empty if no options passed
this.options = options || {};
this.size = typeof options.size == 'number' ? options.size : 5;
// Message handler
this.messageHandler = options.messageHandler;
// No bson parser passed in
if(!options.bson) throw new Error("must pass in valid bson parser");
// Contains all connections
this.connections = [];
this.state = DISCONNECTED;
// Round robin index
this.index = 0;
this.dead = false;
// Logger instance
this.logger = Logger('Pool', options);
// Pool id
this.id = _id++;
// Grouping tag used for debugging purposes
this.tag = options.tag;
}
inherits(Pool, EventEmitter);
var errorHandler = function(self) {
return function(err, connection) {
if(self.logger.isDebug()) self.logger.debug(f('pool [%s] errored out [%s] with connection [%s]', this.dead, JSON.stringify(err), JSON.stringify(connection)));
if(!self.dead) {
self.state = DISCONNECTED;
self.dead = true;
self.destroy();
self.emit('error', err, self);
}
}
}
var timeoutHandler = function(self) {
return function(err, connection) {
if(self.logger.isDebug()) self.logger.debug(f('pool [%s] timed out [%s] with connection [%s]', this.dead, JSON.stringify(err), JSON.stringify(connection)));
if(!self.dead) {
self.state = DISCONNECTED;
self.dead = true;
self.destroy();
self.emit('timeout', err, self);
}
}
}
var closeHandler = function(self) {
return function(err, connection) {
if(self.logger.isDebug()) self.logger.debug(f('pool [%s] closed [%s] with connection [%s]', this.dead, JSON.stringify(err), JSON.stringify(connection)));
if(!self.dead) {
self.state = DISCONNECTED;
self.dead = true;
self.destroy();
self.emit('close', err, self);
}
}
}
var parseErrorHandler = function(self) {
return function(err, connection) {
if(self.logger.isDebug()) self.logger.debug(f('pool [%s] errored out [%s] with connection [%s]', this.dead, JSON.stringify(err), JSON.stringify(connection)));
if(!self.dead) {
self.state = DISCONNECTED;
self.dead = true;
self.destroy();
self.emit('parseError', err, self);
}
}
}
var connectHandler = function(self) {
return function(connection) {
self.connections.push(connection);
// We have connected to all servers
if(self.connections.length == self.size) {
self.state = CONNECTED;
// Done connecting
self.emit("connect", self);
}
}
}
/**
* Destroy pool
* @method
*/
Pool.prototype.destroy = function() {
this.state = DESTROYED;
// Set dead
this.dead = true;
// Destroy all the connections
this.connections.forEach(function(c) {
// Destroy all event emitters
["close", "message", "error", "timeout", "parseError", "connect"].forEach(function(e) {
c.removeAllListeners(e);
});
// Destroy the connection
c.destroy();
});
}
var execute = null;
try {
execute = setImmediate;
} catch(err) {
execute = process.nextTick;
}
/**
* Connect pool
* @method
*/
Pool.prototype.connect = function(_options) {
var self = this;
// Set to connecting
this.state = CONNECTING
// No dead
this.dead = false;
// Ensure we allow for a little time to setup connections
var wait = 1;
// Connect all sockets
for(var i = 0; i < this.size; i++) {
setTimeout(function() {
execute(function() {
self.options.messageHandler = self.messageHandler;
var connection = new Connection(self.options);
// Add all handlers
connection.once('close', closeHandler(self));
connection.once('error', errorHandler(self));
connection.once('timeout', timeoutHandler(self));
connection.once('parseError', parseErrorHandler(self));
connection.on('connect', connectHandler(self));
// Start connection
connection.connect(_options);
});
}, wait);
// wait for 1 miliseconds before attempting to connect, spacing out connections
wait = wait + 1;
}
}
/**
* Get a pool connection (round-robin)
* @method
* @return {Connection}
*/
Pool.prototype.get = function() {
// if(this.dead) return null;
var connection = this.connections[this.index++];
this.index = this.index % this.connections.length;
return connection;
}
/**
* Reduce the poolSize to the provided max connections value
* @method
* @param {number} maxConnections reduce the poolsize to maxConnections
*/
Pool.prototype.capConnections = function(maxConnections) {
// Do we have more connections than specified slice it
if(this.connections.length > maxConnections) {
// Get the rest of the connections
var connections = this.connections.slice(maxConnections);
// Cap the active connections
this.connections = this.connections.slice(0, maxConnections);
if (this.index >= maxConnections){
// Go back to the beggining of the pool if capping connections
this.index = 0;
}
// Remove all listeners
for(var i = 0; i < connections.length; i++) {
connections[i].removeAllListeners('close');
connections[i].removeAllListeners('error');
connections[i].removeAllListeners('timeout');
connections[i].removeAllListeners('parseError');
connections[i].removeAllListeners('connect');
connections[i].destroy();
}
}
}
/**
* Get all pool connections
* @method
* @return {array}
*/
Pool.prototype.getAll = function() {
return this.connections.slice(0);
}
/**
* Is the pool connected
* @method
* @return {boolean}
*/
Pool.prototype.isConnected = function() {
for(var i = 0; i < this.connections.length; i++) {
if(!this.connections[i].isConnected()) return false;
}
return this.state == CONNECTED;
}
/**
* Was the pool destroyed
* @method
* @return {boolean}
*/
Pool.prototype.isDestroyed = function() {
return this.state == DESTROYED;
}
/**
* A server connect event, used to verify that the connection is up and running
*
* @event Pool#connect
* @type {Pool}
*/
/**
* The server connection closed, all pool connections closed
*
* @event Pool#close
* @type {Pool}
*/
/**
* The server connection caused an error, all pool connections closed
*
* @event Pool#error
* @type {Pool}
*/
/**
* The server connection timed out, all pool connections closed
*
* @event Pool#timeout
* @type {Pool}
*/
/**
* The driver experienced an invalid message, all pool connections closed
*
* @event Pool#parseError
* @type {Pool}
*/
module.exports = Pool;
| {
"pile_set_name": "Github"
} |
int checkWasCalled = 0;
class Drawable {
int sr;
void checkItem() {
_checkEqual(sr, checkWasCalled);
checkWasCalled++;
}
void setItem(int r) {
sr =r;
}
}
class A extends Drawable {
A() {
setItem(2);
}
void checkItem() {
super.checkItem();
}
}
class B extends A {
B() {
setItem(1);
}
void checkItem() {
super.checkItem();
}
}
class C extends B {
C() {
setItem(0);
}
void checkItem() {
super.checkItem();
}
}
C c; B b; A a;
c = new C();
b = new B();
a = new A();
c.checkItem();
b.checkItem();
a.checkItem();
_checkEqual(checkWasCalled, 3);
| {
"pile_set_name": "Github"
} |
test = (function () {
function f() {
[1,2,3,4,5];
};
return "var obj = { x : 2 };" + f.toSource() + "; f()";
})();
evalWithCache(test, {});
function evalWithCache(code, ctx) {
code = cacheEntry(code);
ctx.compileAndGo = true;
var res1 = evaluate(code, Object.create(ctx, {saveBytecode: { value: true } }));
var res2 = evaluate(code, Object.create(ctx, {loadBytecode: { value: true }, saveBytecode: { value: true } }));
}
| {
"pile_set_name": "Github"
} |
/*
*
* Copyright 2015 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include <grpc/support/port_platform.h>
#include "src/core/lib/surface/channel.h"
#include <inttypes.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include "src/core/ext/filters/client_channel/client_channel.h"
#include "src/core/lib/iomgr/timer.h"
#include "src/core/lib/surface/api_trace.h"
#include "src/core/lib/surface/completion_queue.h"
grpc_connectivity_state grpc_channel_check_connectivity_state(
grpc_channel* channel, int try_to_connect) {
/* forward through to the underlying client channel */
grpc_channel_element* client_channel_elem =
grpc_channel_stack_last_element(grpc_channel_get_channel_stack(channel));
grpc_core::ApplicationCallbackExecCtx callback_exec_ctx;
grpc_core::ExecCtx exec_ctx;
grpc_connectivity_state state;
GRPC_API_TRACE(
"grpc_channel_check_connectivity_state(channel=%p, try_to_connect=%d)", 2,
(channel, try_to_connect));
if (GPR_LIKELY(client_channel_elem->filter == &grpc_client_channel_filter)) {
state = grpc_client_channel_check_connectivity_state(client_channel_elem,
try_to_connect);
return state;
}
gpr_log(GPR_ERROR,
"grpc_channel_check_connectivity_state called on something that is "
"not a client channel, but '%s'",
client_channel_elem->filter->name);
return GRPC_CHANNEL_SHUTDOWN;
}
typedef enum {
WAITING,
READY_TO_CALL_BACK,
CALLING_BACK_AND_FINISHED,
} callback_phase;
namespace {
struct state_watcher {
gpr_mu mu;
callback_phase phase;
grpc_closure on_complete;
grpc_closure on_timeout;
grpc_closure watcher_timer_init;
grpc_timer alarm;
grpc_connectivity_state state;
grpc_completion_queue* cq;
grpc_cq_completion completion_storage;
grpc_channel* channel;
grpc_error* error;
void* tag;
};
} // namespace
static void delete_state_watcher(state_watcher* w) {
grpc_channel_element* client_channel_elem = grpc_channel_stack_last_element(
grpc_channel_get_channel_stack(w->channel));
if (client_channel_elem->filter == &grpc_client_channel_filter) {
GRPC_CHANNEL_INTERNAL_UNREF(w->channel, "watch_channel_connectivity");
} else {
abort();
}
gpr_mu_destroy(&w->mu);
gpr_free(w);
}
static void finished_completion(void* pw, grpc_cq_completion* ignored) {
bool should_delete = false;
state_watcher* w = static_cast<state_watcher*>(pw);
gpr_mu_lock(&w->mu);
switch (w->phase) {
case WAITING:
case READY_TO_CALL_BACK:
GPR_UNREACHABLE_CODE(return );
case CALLING_BACK_AND_FINISHED:
should_delete = true;
break;
}
gpr_mu_unlock(&w->mu);
if (should_delete) {
delete_state_watcher(w);
}
}
static void partly_done(state_watcher* w, bool due_to_completion,
grpc_error* error) {
if (due_to_completion) {
grpc_timer_cancel(&w->alarm);
} else {
grpc_channel_element* client_channel_elem = grpc_channel_stack_last_element(
grpc_channel_get_channel_stack(w->channel));
grpc_client_channel_watch_connectivity_state(
client_channel_elem,
grpc_polling_entity_create_from_pollset(grpc_cq_pollset(w->cq)),
nullptr, &w->on_complete, nullptr);
}
gpr_mu_lock(&w->mu);
if (due_to_completion) {
if (grpc_trace_operation_failures.enabled()) {
GRPC_LOG_IF_ERROR("watch_completion_error", GRPC_ERROR_REF(error));
}
GRPC_ERROR_UNREF(error);
error = GRPC_ERROR_NONE;
} else {
if (error == GRPC_ERROR_NONE) {
error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"Timed out waiting for connection state change");
} else if (error == GRPC_ERROR_CANCELLED) {
error = GRPC_ERROR_NONE;
}
}
switch (w->phase) {
case WAITING:
GRPC_ERROR_REF(error);
w->error = error;
w->phase = READY_TO_CALL_BACK;
break;
case READY_TO_CALL_BACK:
if (error != GRPC_ERROR_NONE) {
GPR_ASSERT(!due_to_completion);
GRPC_ERROR_UNREF(w->error);
GRPC_ERROR_REF(error);
w->error = error;
}
w->phase = CALLING_BACK_AND_FINISHED;
grpc_cq_end_op(w->cq, w->tag, w->error, finished_completion, w,
&w->completion_storage);
break;
case CALLING_BACK_AND_FINISHED:
GPR_UNREACHABLE_CODE(return );
break;
}
gpr_mu_unlock(&w->mu);
GRPC_ERROR_UNREF(error);
}
static void watch_complete(void* pw, grpc_error* error) {
partly_done(static_cast<state_watcher*>(pw), true, GRPC_ERROR_REF(error));
}
static void timeout_complete(void* pw, grpc_error* error) {
partly_done(static_cast<state_watcher*>(pw), false, GRPC_ERROR_REF(error));
}
int grpc_channel_num_external_connectivity_watchers(grpc_channel* channel) {
grpc_channel_element* client_channel_elem =
grpc_channel_stack_last_element(grpc_channel_get_channel_stack(channel));
return grpc_client_channel_num_external_connectivity_watchers(
client_channel_elem);
}
typedef struct watcher_timer_init_arg {
state_watcher* w;
gpr_timespec deadline;
} watcher_timer_init_arg;
static void watcher_timer_init(void* arg, grpc_error* error_ignored) {
watcher_timer_init_arg* wa = static_cast<watcher_timer_init_arg*>(arg);
grpc_timer_init(&wa->w->alarm, grpc_timespec_to_millis_round_up(wa->deadline),
&wa->w->on_timeout);
gpr_free(wa);
}
int grpc_channel_support_connectivity_watcher(grpc_channel* channel) {
grpc_channel_element* client_channel_elem =
grpc_channel_stack_last_element(grpc_channel_get_channel_stack(channel));
return client_channel_elem->filter != &grpc_client_channel_filter ? 0 : 1;
}
void grpc_channel_watch_connectivity_state(
grpc_channel* channel, grpc_connectivity_state last_observed_state,
gpr_timespec deadline, grpc_completion_queue* cq, void* tag) {
grpc_channel_element* client_channel_elem =
grpc_channel_stack_last_element(grpc_channel_get_channel_stack(channel));
grpc_core::ApplicationCallbackExecCtx callback_exec_ctx;
grpc_core::ExecCtx exec_ctx;
state_watcher* w = static_cast<state_watcher*>(gpr_malloc(sizeof(*w)));
GRPC_API_TRACE(
"grpc_channel_watch_connectivity_state("
"channel=%p, last_observed_state=%d, "
"deadline=gpr_timespec { tv_sec: %" PRId64
", tv_nsec: %d, clock_type: %d }, "
"cq=%p, tag=%p)",
7,
(channel, (int)last_observed_state, deadline.tv_sec, deadline.tv_nsec,
(int)deadline.clock_type, cq, tag));
GPR_ASSERT(grpc_cq_begin_op(cq, tag));
gpr_mu_init(&w->mu);
GRPC_CLOSURE_INIT(&w->on_complete, watch_complete, w,
grpc_schedule_on_exec_ctx);
GRPC_CLOSURE_INIT(&w->on_timeout, timeout_complete, w,
grpc_schedule_on_exec_ctx);
w->phase = WAITING;
w->state = last_observed_state;
w->cq = cq;
w->tag = tag;
w->channel = channel;
w->error = nullptr;
watcher_timer_init_arg* wa = static_cast<watcher_timer_init_arg*>(
gpr_malloc(sizeof(watcher_timer_init_arg)));
wa->w = w;
wa->deadline = deadline;
GRPC_CLOSURE_INIT(&w->watcher_timer_init, watcher_timer_init, wa,
grpc_schedule_on_exec_ctx);
if (client_channel_elem->filter == &grpc_client_channel_filter) {
GRPC_CHANNEL_INTERNAL_REF(channel, "watch_channel_connectivity");
grpc_client_channel_watch_connectivity_state(
client_channel_elem,
grpc_polling_entity_create_from_pollset(grpc_cq_pollset(cq)), &w->state,
&w->on_complete, &w->watcher_timer_init);
} else {
abort();
}
}
| {
"pile_set_name": "Github"
} |
import { assert } from 'chai';
import { sortGraph } from '../src/gmath';
import { expectEitherRight, expectEitherError } from './helpers';
describe('Graph math', () => {
describe('Topological sorting', () => {
it('should return [] for empty graph', () => {
expectEitherRight(sorted => {
assert.deepEqual(sorted, []);
}, sortGraph([], []));
});
it('should return single vertex for single-vertex graph', () => {
expectEitherRight(sorted => {
assert.deepEqual(sorted, [42]);
}, sortGraph([42], []));
});
it('should return vertexes as is if there are no edges', () => {
expectEitherRight(sorted => {
assert.deepEqual(sorted, [42, 43, 44]);
}, sortGraph([42, 43, 44], []));
});
it('should return vertexes as is if already sorted', () => {
expectEitherRight(sorted => {
assert.deepEqual(sorted, [42, 43, 44]);
}, sortGraph([42, 43, 44], [[42, 43], [43, 44]]));
});
it('should return sorted vertexes if given vertexes are inversed', () => {
expectEitherRight(sorted => {
assert.deepEqual(sorted, [42, 43, 44]);
}, sortGraph([44, 43, 42], [[42, 43], [43, 44]]));
});
it('should throw error for cycled graph', () => {
expectEitherError(
'LOOPS_DETECTED {}',
sortGraph([42, 43, 44], [[42, 43], [43, 42]])
);
});
it('should sort diamond graph', () => {
expectEitherRight(sorted => {
assert.deepEqual(sorted, [42, 44, 43, 45]);
}, sortGraph([44, 43, 42, 45], [[42, 43], [42, 44], [43, 45], [44, 45]]));
});
it('should sort clusters', () => {
expectEitherRight(sorted => {
assert.deepEqual(sorted, [44, 42, 45, 43]);
}, sortGraph([44, 43, 42, 45], [[42, 43], [44, 45]]));
});
});
});
| {
"pile_set_name": "Github"
} |
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-04-25 18:02
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('oidc_provider', '0013_auto_20160407_1912'),
]
operations = [
migrations.AddField(
model_name='client',
name='jwt_alg',
field=models.CharField(
choices=[(b'HS256', b'HS256'), (b'RS256', b'RS256')],
default=b'RS256',
max_length=10,
verbose_name='JWT Algorithm'),
),
]
| {
"pile_set_name": "Github"
} |
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/elasticfilesystem/model/ListTagsForResourceRequest.h>
#include <aws/core/utils/json/JsonSerializer.h>
#include <aws/core/http/URI.h>
#include <aws/core/utils/memory/stl/AWSStringStream.h>
#include <utility>
using namespace Aws::EFS::Model;
using namespace Aws::Utils::Json;
using namespace Aws::Utils;
using namespace Aws::Http;
ListTagsForResourceRequest::ListTagsForResourceRequest() :
m_resourceIdHasBeenSet(false),
m_maxResults(0),
m_maxResultsHasBeenSet(false),
m_nextTokenHasBeenSet(false)
{
}
Aws::String ListTagsForResourceRequest::SerializePayload() const
{
return {};
}
void ListTagsForResourceRequest::AddQueryStringParameters(URI& uri) const
{
Aws::StringStream ss;
if(m_maxResultsHasBeenSet)
{
ss << m_maxResults;
uri.AddQueryStringParameter("MaxResults", ss.str());
ss.str("");
}
if(m_nextTokenHasBeenSet)
{
ss << m_nextToken;
uri.AddQueryStringParameter("NextToken", ss.str());
ss.str("");
}
}
| {
"pile_set_name": "Github"
} |
# Practical Python Programming - Instructor Notes
Author: David Beazley
## Overview
This document provides some general notes and advice on teaching the
content of my “Practical Python” course including objectives, target
audience, tricky bits, etc.
These instructions were given to people teaching the course in
a typical three-day corporate training environment. They might
give you some insight about teaching your own course.
## Target Audience and General Approach
This course is intended to be an “Introduction to Python” course for
people who already have some programming experience. This is
definitely not a course designed to teach people “programming 101.”
Having said that, I have observed that the typical student in a Python
course is also not likely to be a hard-core software engineer or
programmer. Instead, you are probably going to get a mix of
engineers, scientists, web programmers, and more inexperienced
developers. Student background varies widely. You might have some
students with a lot of C,C++, Java experience, others might know PHP
and HTML, others may be coming from tools like MATLAB, and others
still might have almost no traditional “programming” experience at all
despite my best attempts to make the prerequisites clear.
With this in mind, the course aims to teach Python through the general
problem of manipulating data (stock market data in particular). This
domain has been chosen because it’s simple and something everyone
should know about it regardless of their background. Just as an example,
students with weak programming skills are still likely to know about
common things like using a spreadsheet (e.g., Excel). So, if they’re
really stuck, you can tell them things like “well, this list of tuples
is kind of like rows of data in a spreadsheet” or “a list
comprehension is the same idea as applying an operation to a
spreadsheet column and putting the result in a different column.” The
key idea is to stay grounded in a real-world setting as opposed to
getting sidetracked into esoteric “computer science” problems (e.g.,
“let’s go compute fibonacci numbers.”).
This problem domain also works well for introducing other programming
topics. For example, scientists/engineers might want to know about
data analysis or plotting. So, you can show them how to make a plot
using matplotlib. Web programmers might want to know how to present
stock market data on a web-page. So, you can talk about template
engines. Syadmins might want to do something with log files. So, you
can point them at a log file of real-time streaming stock data.
Software engineers might want to know about design. So, you can have
them look at ways to encapsulate stock data inside an object or making
a program extensible (e.g., how would make this program produce output
in 10 different table formats). You get the idea.
## Presentation Guidelines
The presentation slides (notes) are there to provide a narrative
structure to the course and for reference by students when they work
on exercises. Do not laboriously go over every bullet point on every
slide--assume that students can read and that they will have time to
go back when coding. I tend to go through the slides at a pretty
brisk pace, showing short examples interactively as I go. I often
skip slides entirely in favor of live demos. For example, you don't
really need to do a bunch of slides on lists. Just go to the
interpreter and do some list examples live instead. Rule of thumb: No
more than 1 minute per slide unless it’s something unusually tricky.
Honestly, you could probably skip most of the slides and simply
lecture using live demos if you feel that it works for you. I often
do this.
## Course Exercises
The course has about 130 hands-on exercises. If you do every single
exercise and give students time to think and code, it will likely take
them about 10-12 hours. In practice, you will probably find that students
require more time on certain exercises. I have some notes about this
below.
You should repeatedly emphasize to students that solution code is
available and that it is okay to look at it and copy it--especially
due to time requirements.
Prior to teaching the course, I would strongly advise that you go
through and work every single course exercise so that there are no
surprises.
During course delivery, I usually work every single exercise from
scratch, without looking at the solution, on my computer while the
students also work. For this, I strongly advise you to have a printed
copy of the exercises on hand that you can look at without having to
pull it up on the computer screen (which is being projected). Near
the end of the exercise time period, I will start discussing my
solution code, emphasizes different bits on the screen and talking
about them. If there are any potential problems with the solution
(including design considerations), I’ll also talk about it. Emphasize
to students that they may want to look at/copy solution code before
going forward.
## Section 1: Introduction
The major goal of this section is to get people started with the
environment. This includes using the interactive shell and
editing/run short programs. By the end of the section, students
should be able to write short scripts that read data files and perform
small calculations. They will know about numbers, strings, lists, and
files. There will also be some exposure to functions, exceptions, and
modules, but a lot of details will be missing.
The first part of this course is often the longest because students
are new to the tools and may have various problems getting things to
work. It is absolutely critical that you go around the room and make
sure that everyone can edit, run, and debug simple programs. Make
sure Python is installed correctly. Make sure they have the course
exercises downloaded. Make sure the internet works. Fix anything
else that comes up.
Timing: I aim to finish section 1 around lunch on the first day.
## Section 2 : Working with Data
This section is probably the most important in the course. It covers
the basics of data representation and manipulation including tuples,
lists, dicts, and sets.
Section 2.2 the most important. Give students as much time as
they need to get exercises working within reason. Depending on audience,
the exercises might last 45 minutes. In the middle of this exercise,
I will often move forward to Section 2.3 (formatted printing) and
give students more time to keep working. Together, Sections 2.2/2.3
might take an hour or more.
Section 2.4 has people explore the use of enumerate(), and zip(). I
consider these functions essential so don’t skimp on it.
Section 2.5 introduces the collections module. There is a LOT that
could be said about collections, but it won't be fully appreciated by
students at this time. Approach this more from the standpoint of
"here's this cool module you should look at later. Here are a few cool
examples."
Section 2.6 introduces list comprehensions which are an important
feature for processing list data. Emphasize to students that list
comprehensions are very similar to things like SQL database queries.
At the end of this exercise, I often do an interactive demo involving
something more advanced. Maybe do a list comprehension and plot some
data with matplotlib. Also an opportunity to introduce Jupyter if
you're so inclined.
Section 2.7 is the most sophisticated exercise. It relates to the use
of first-class data in Python and the fact that data structures like
lists can hold any kind of object that you want. The exercises are
related to parsing columns of data in CSV files and concepts are later reused in
Section 3.2.
Timing: Ideally, you want to be done with section 2 on the first day.
However, it is common to finish with section 2.5 or 2.6. So, don't
panic if you feel that you're a bit behind.
## 3. Program Organization
The main goal of this section is to introduce more details about
functions and to encourage students to use them. The section builds
from functions into modules and script writing.
Section 3.1 is about going from simple “scripting” to functions.
Students should be discouraged from writing disorganized “scripts.”
Instead, code should at least be modularized into functions. It makes
the code easier to understand, it makes it easier to make changes
later, and it actually runs a little bit faster. Functions are good.
Section 3.2 is probably the most advanced set of exercises in the
whole course. It has students write a general purpose utility
function for parsing column-oriented data. However, it makes heavy
use of list comprehensions as well as lists of functions (e.g.,
functions as first-class objects). You will probably need to guide
people through every single step of this code, showing how it works in
great detail. The payoff is huge however---you can show people a
short general purpose function that does something amazingly powerful
and which would be virtually impossible to write in C, C++, or Java
without having a *LOT* of very complicated code. There are a lot of
possible design/discussion avenues for this code. Use your
imagination.
Section 3.3 adds error handling to the function created in Section 3.2
This is a good time to talk about exception handling generally.
Definitely talk about the dangers of catching all exceptions. This
might be a good time to talk about the “Errors should never pass
silently” item on the “Zen of Python.”
*Note: Before Exercise 3.4, make sure students get fully working versions of report.py, pcost.py, and fileparse.py. Copy from Solutions folder if needed *
Section 3.4 Introduces module imports. The file written in Section
3.2-3.3 is used to simplify code in Section 3.1. Be aware that you
may need to help students fix issues with IDLE, sys.path, and other
assorted settings related to import.
Section 3.5 talks about `__main__` and script writing. There's a bit
about command line arguments. You might be inclined to discuss a
module like argparse. However, be warned that doing so opens up
a quagmire. It's usually better to just mention it and move on.
Section 3.6 opens up a discussion about design more generally in Python.
Is it better to write code that's more flexible vs code that's
hardwired to only work with filenames? This is the first place
where you make a code change and have to refactor existing code.
Going forward from here, most of the exercises make small changes
to code that's already been written.
## 4. Classes and Objects
This section is about very basic object oriented programming. In
general, it is not safe to assume that people have much background in
OO. So, before starting this, I usually generally describe the OO
“style” and how it's data and methods bundled together. Do some
examples with strings and lists to illustrate that they are “objects”
and that the methods (invoked via .) do things with the object.
Emphasize how the methods are attached to the object itself. For
example, you do items.append(x), you don’t call a separate function
append(items, x).
Section 4.1 introduces the class statement and shows people how to
make a basic object. Really, this just introduces classes as another
way to define a simple data structure--relating back to using tuples
and dicts for this purpose in section 2.
Section 4.2 is about inheritance and how you use to create extensible
programs. This set of exercises is probably the most significant in terms of
OO programming and OO design. Give students a lot of time to work on
it (30-45 minutes). Depending on interest, you can spend a LOT of
time discussing aspects of OO. For example, different
design patterns, inheritance hierarchies, abstract base classes, etc.
Section 4.3 does a few experiments with special methods. I wouldn't
spend too much time fooling around with this. Special methods come up
a bit later in Exercise 6.1 and elsewhere.
Timing: This is usually the end of the 2nd day.
## 5. Inside Objects
This section takes students behind the scenes of the object system and
how it’s built using dictionaries, how instances and classes are tied
together, and how inheritance works. However, most important part of
this section is probably the material about encapsulation (private
attributes, properties, slots, etc.)
Section 5.1 just peels back the covers and has students observe and
play with the underlying dicts of instances and classes.
Section 5.2 is about hiding attributes behind get/set functions and
using properties. I usually emphasize that these techniques are
commonly used in libraries and frameworks--especially in situations
where more control over what a user is allowed to do is desired.
An astute Python master will notice that I do not talk about advanced
topics such as descriptors, or attribute access methods (`__getattr__`,
`__setattr__`) at all. I have found, through experience, that this is
just too much mental overload for students taking the intro course.
Everyone’s head is already on the verge of exploding at this point and
if you go talk about how something like descriptors work, you’ll lose
them for the rest of the day, if not the rest of the course. Save it
for an "Advanced Python" course.
If you're looking at the clock thinking "There's no way I'm going to
finish this course", you can skip section 5 entirely.
## 6. Generators
The main purpose of this section is to introduce generators as a way
to define custom iteration and to use them for various problems
related to data handling. The course exercises have students analyze
streaming data in the form of stock updates being written to a log
file.
There are two big ideas to emphasize. First, generators can be used to
write code based on incremental processing. This can be very useful
for things like streaming data or huge datasets that are too large to
fit into memory all at once. The second idea is that you can chain
generators/iterators together to create processing pipelines (kind of
like Unix pipes). Again, this can be a really powerful way to process
and think about streams, large datasets, etc.
Some omissions: Although the iteration protocol is described, the
notes don’t go into detail about creating iterable objects (i.e.,
classes with `__iter__()` and `next()`). In practice, I’ve found that
it’s not necessary to do this so often (generators are often
better/easier). So, in the interest of time, I’ve made a conscious
decision to omit it. Also not included are extended generators
(coroutines) or uses of generators for concurrency (tasklets, etc.).
That’s better covered in advanced courses.
## 7. Advanced Topics
Basically this section is an assortment of more advanced topics that
could have been covered earlier, but weren’t for various reasons
related to course flow and content of the course exercises. If you
must know, I used to present this material earlier in the course, but
found that students were already overloaded with enough information.
Coming back to it later seems to work better---especially since by
this point, everyone is much more familiar with working in Python and
starting to get the hang of it.
Topics include variadic function arguments (*args, **kwargs), lambda,
closures, and decorators. Discussion of decorators is only a tiny
hint of what’s possible with metaprogramming. Feel free to say more
about what’s possible, but I’d probably stay out of metaclasses!
Lately, I have been demoing "numba" as an example of a more
interesting decorator.
If you're pressed for time, most of section 7 can be skipped or heavily
compressed (you could skip exercises for instance).
## 8. Testing and Debugging
The main purpose of this section is just to introduce various tools
and techniques related to testing, debugging, and software
development. Show everyone the unittest module. Introduce the
logging module. Discuss assertions and the idea of “contracts.” Show
people the debugger and profiler. Most of this is self-explanatory.
## 9. Packages
At this point, students have written an assortment of files (pcost.py,
report.py, fileparse.py, tableformat.py, stock.py, portfolio.py,
follow.py, etc.). Two main goals in this section. First, put all of
the code into a Python package structure. This is only a gentle
introduction to that, but they'll move the files into a directory and
everything will break. They'll need to fix their import statements
(package relative imports) and maybe fiddle with an `__init__.py` file.
Second goal, write a simple setup.py file that they can use to package
up the code and give it away to someone. That's it. End of the
course.
[Contents](Contents.md)
| {
"pile_set_name": "Github"
} |
package crazypants.enderio.base.integration.jei;
import javax.annotation.Nonnull;
import com.enderio.core.common.util.NNList;
import net.minecraft.item.crafting.IRecipe;
public class JeiAccessor {
static boolean jeiRuntimeAvailable = false;
public static boolean isJeiRuntimeAvailable() {
return jeiRuntimeAvailable;
}
public static void setFilterText(@Nonnull String filterText) {
if (jeiRuntimeAvailable) {
JeiPlugin.setFilterText(filterText);
}
}
public static @Nonnull String getFilterText() {
if (jeiRuntimeAvailable) {
return JeiPlugin.getFilterText();
}
return "";
}
public static void showCraftingRecipes() {
if (jeiRuntimeAvailable) {
JeiPlugin.showCraftingRecipes();
}
}
static final @Nonnull NNList<IRecipe> ALTERNATIVES = new NNList<>();
public static void addAlternativeRecipe(@Nonnull IRecipe recipe) {
ALTERNATIVES.add(recipe);
}
}
| {
"pile_set_name": "Github"
} |
type_of_cancer: brca
cancer_study_identifier: brca_tcga_pub
name: Breast Invasive Carcinoma (TCGA, Nature 2012)
description: <a href="http://cancergenome.nih.gov/">The Cancer Genome Atlas (TCGA)</a> Breast Invasive Carcinoma project. 825 cases.<br><i>Nature 2012.</i> <a href="http://tcga-data.nci.nih.gov/tcga/">Raw data via the TCGA Data Portal</a>.
citation: TCGA, Nature 2012
pmid: 23000897
groups: PUBLIC;GDAC;SU2C-PI3K
short_name: BRCA (TCGA)
| {
"pile_set_name": "Github"
} |
<!doctype HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN"><html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<meta http-equiv="Content-Language" content="en-us">
<meta name="VI60_defaultClientScript" content="JavaScript">
<meta name="GENERATOR" content="Microsoft FrontPage 12.0">
<meta name="keywords" content="Unicode Standard, copyright">
<meta name="ProgId" content="FrontPage.Editor.Document">
<title>Unicode Terms of Use</title>
<link rel="stylesheet" type="text/css"
href="http://www.unicode.org/webscripts/standard_styles.css">
<style type="text/css">
pre {
FONT-FAMILY: Arial, Geneva, sans-serif;
}
</style>
</head>
<body text="#330000">
<table width="100%" cellpadding="0" cellspacing="0" border="0">
<tr>
<td colspan="2">
<table width="100%" border="0" cellpadding="0" cellspacing="0">
<tr>
<td class="icon"><a href="http://www.unicode.org/"><img border="0"
src="http://www.unicode.org/webscripts/logo60s2.gif" align="middle"
alt="[Unicode]" width="34" height="33"></a> <a class="bar"
href="http://www.unicode.org/copyright.html"><font size="3">Terms of
Use</font></a></td>
<td class="bar"><a href="http://www.unicode.org" class="bar">Home</a>
| <a href="http://www.unicode.org/sitemap/" class="bar">Site Map</a> |
<a href="http://www.unicode.org/search" class="bar">Search </a></td>
</tr>
</table>
</td>
</tr>
<tr>
<td colspan="2" class="gray"> </td>
</tr>
<tr>
<td valign="top" width="25%" class="navCol">
<table class="navColTable" border="0" width="100%" cellspacing="4"
cellpadding="0">
<tr>
<td class="navColTitle">Contents</td>
</tr>
<tr>
<td valign="top" class="navColCell"><a href="#1">Unicode Copyright</a></td>
</tr>
<tr>
<td valign="top" class="navColCell"><a href="#2">Restricted Rights
Legend</a></td>
</tr>
<tr>
<td valign="top" class="navColCell"><a href="#3">Warranties &
Disclaimers</a></td>
</tr>
<tr>
<td valign="top" class="navColCell"><a href="#4">Waiver of Damages</a></td>
</tr>
<tr>
<td valign="top" class="navColCell"><a href="#5">Trademarks & Logos</a></td>
</tr>
<tr>
<td valign="top" class="navColCell"><a href="#7">Miscellaneous</a></td>
</tr>
<tr>
<td valign="top" class="navColCell"><a href="#License">Data Files and
Software License Agreement (Exhibit 1)</a></td>
</tr>
<tr>
<td valign="top" class="navColCell"> </td>
</tr>
</table>
<table class="navColTable" border="0" width="100%" cellspacing="4"
cellpadding="0">
<tr>
<td class="navColTitle">Related Links</td>
</tr>
<tr>
<td valign="top" class="navColCell">
<a href="http://www.unicode.org/policies/logo_policy.html">Trademark Policy</a></td>
</tr>
<tr>
<td valign="top" class="navColCell">
<a href="http://www.unicode.org/policies/policies.html">Unicode
Policies</a></td>
</tr>
<tr>
<td valign="top" class="navColCell"></td>
</tr>
</table>
<!-- BEGIN CONTENTS -->
<td>
<blockquote>
<h1>Unicode® Terms of Use</h1>
<p>For the general privacy policy governing access to this site, see
the
<a href="http://www.unicode.org/policies/privacy_policy.html">
Unicode Privacy Policy</a>. For trademark usage, see
<a href="http://www.unicode.org/policies/logo_policy.html">the
Unicode® Consortium Name and Trademark Usage Policy</a>.</p>
<table class="sidebar" align="right" width="50%" id="table1">
<tr>
<td class="sidebarTitle">Notice to End User: Terms of Use</td>
</tr>
<tr>
<td class="sidebar">Carefully read the following legal agreement
("Agreement"). Use or copying of the software and/or codes
provided with this agreement (The "Software") constitutes your
acceptance of these terms. If you have any questions about these terms of use, please <a href="http://www.unicode.org/contacts.html">contact the Unicode Consortium</a>.</td>
</tr>
</table>
<ol type="A">
<li><u><a name="1"></a>Unicode Copyright.</u>
<ol>
<li>Copyright © 1991-2016 Unicode, Inc. All rights reserved.</li>
<li>Certain documents and files on this website contain a legend
indicating that "Modification is permitted." Any person is
hereby authorized, without fee, to modify such documents and
files to create derivative works conforming to the Unicode®
Standard, subject to Terms and Conditions herein.</li>
<li>Any person is hereby authorized, without fee, to view, use,
reproduce, and distribute all documents and files solely for
informational purposes and in the creation of products supporting
the Unicode Standard, subject to the Terms and Conditions
herein.</li>
<li>Further specifications of rights and restrictions pertaining
to the use of the particular set of data files known as the
"Unicode Character Database" can be found in the
<a href="#License">License</a>.</li>
<li>Each version of the Unicode Standard has further
specifications of rights and restrictions of use. For the book
editions (Unicode 5.0 and earlier), these are found on the back
of the
<a href="http://www.unicode.org/versions/Unicode5.0.0/Title.pdf">title page</a>.
The online code charts carry specific restrictions. All other files, including online documentation of the core specification for Unicode 6.0 and later, are covered under these general Terms of Use.</li>
<li>No license is granted to "mirror" the Unicode website where
a fee is charged for access to the "mirror" site.</li>
<li>Modification is not permitted with respect to this document.
All copies of this document must be verbatim.</li>
</ol>
</li>
<li><u><a name="2"></a>Restricted Rights Legend</u>. Any technical
data or software which is licensed to the United States of
America, its agencies and/or instrumentalities under this
Agreement is commercial technical data or commercial computer
software developed exclusively at private expense as defined in
FAR 2.101, or DFARS 252.227-7014 (June 1995), as applicable. For
technical data, use, duplication, or disclosure by the Government
is subject to restrictions as set forth in DFARS 202.227-7015
Technical Data, Commercial and Items (Nov 1995) and this
Agreement. For Software, in accordance with FAR 12-212 or DFARS
227-7202, as applicable, use, duplication or disclosure by the
Government is subject to the restrictions set forth in this
Agreement.</li>
<li><u><a name="3"></a>Warranties and Disclaimers</u>.
<ol>
<li>This publication and/or website may include technical or
typographical errors or other inaccuracies . Changes are
periodically added to the information herein; these changes will
be incorporated in new editions of the publication and/or
website. Unicode may make improvements and/or changes in the
product(s) and/or program(s) described in this publication
and/or website at any time.</li>
<li>If this file has been purchased on magnetic or optical media
from Unicode, Inc. the sole and exclusive remedy for any claim
will be exchange of the defective media within ninety (90) days
of original purchase.</li>
<li>EXCEPT AS PROVIDED IN SECTION C.2, THIS PUBLICATION AND/OR
SOFTWARE IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND EITHER
EXPRESS, IMPLIED, OR STATUTORY, INCLUDING, BUT NOT LIMITED TO,
ANY WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
PURPOSE, OR NON-INFRINGEMENT. UNICODE AND ITS LICENSORS ASSUME
NO RESPONSIBILITY FOR ERRORS OR OMISSIONS IN THIS PUBLICATION
AND/OR SOFTWARE OR OTHER DOCUMENTS WHICH ARE REFERENCED BY OR
LINKED TO THIS PUBLICATION OR THE UNICODE WEBSITE.</li>
</ol>
</li>
<li><u><a name="4"></a>Waiver of Damages.</u> In no event shall
Unicode or its licensors be liable for any special, incidental,
indirect or consequential damages of any kind, or any damages
whatsoever, whether or not Unicode was advised of the possibility
of the damage, including, without limitation, those resulting from
the following: loss of use, data or profits, in connection with
the use, modification or distribution of this information or its
derivatives.</li>
<li><u><a name="5"></a>Trademarks & Logos.</u>
<ol>
<li>The Unicode Word Mark and the Unicode Logo are trademarks of Unicode, Inc. “The Unicode Consortium” and “Unicode, Inc.” are trade names of Unicode, Inc. Use of the information and materials found on this website indicates your acknowledgement of Unicode, Inc.’s exclusive worldwide rights in the Unicode Word Mark, the Unicode Logo, and the Unicode trade names.</li>
<li><a href="http://www.unicode.org/policies/logo_policy.html">The Unicode Consortium Name and Trademark Usage Policy</a> (“Trademark Policy”) are incorporated herein by reference and you agree to abide by the provisions of the Trademark Policy, which may be changed from time to time in the sole discretion of Unicode, Inc.</li>
<li>All third party trademarks referenced herein are the property of their respective owners.
</li>
</ol>
</li>
<li><u><a name="7"></a>Miscellaneous</u>.
<ol>
<li><u>Jurisdiction and Venue</u>. This server is operated from
a location in the State of California, United States of America.
Unicode makes no representation that the materials are
appropriate for use in other locations. If you access this
server from other locations, you are responsible for compliance
with local laws. This Agreement, all use of this site and any
claims and damages resulting from use of this site are governed
solely by the laws of the State of California without regard to
any principles which would apply the laws of a different
jurisdiction. The user agrees that any disputes regarding this
site shall be resolved solely in the courts located in Santa
Clara County, California. The user agrees said courts have
personal jurisdiction and agree to waive any right to transfer
the dispute to any other forum. </li>
<li><u>Modification by Unicode </u>Unicode shall have the right
to modify this Agreement at any time by posting it to this site.
The user may not assign any part of this Agreement without
Unicode’s prior written consent.</li>
<li><u>Taxes.</u> The user agrees to pay any taxes arising from
access to this website or use of the information herein, except
for those based on Unicode’s net income.</li>
<li><u>Severability</u>. If any provision of this
Agreement is declared invalid or unenforceable, the remaining
provisions of this Agreement shall remain in effect.</li>
<li><u>Entire Agreement</u>. This Agreement constitutes the
entire agreement between the parties. </li>
</ol>
</li>
</ol>
</blockquote>
<hr width="95%">
<blockquote>
<h3 align="center"><a name="Exhibit1">EXHIBIT 1</a><br>
<a name="License">UNICODE, INC. LICENSE AGREEMENT - DATA FILES AND SOFTWARE</a></h3>
<pre>
Unicode Data Files include all data files under the directories
http://www.unicode.org/Public/, http://www.unicode.org/reports/,
http://www.unicode.org/cldr/data/, http://source.icu-project.org/repos/icu/, and
http://www.unicode.org/utility/trac/browser/.
Unicode Data Files do not include PDF online code charts under the
directory http://www.unicode.org/Public/.
Software includes any source code published in the Unicode Standard
or under the directories
http://www.unicode.org/Public/, http://www.unicode.org/reports/,
http://www.unicode.org/cldr/data/, http://source.icu-project.org/repos/icu/, and
http://www.unicode.org/utility/trac/browser/.
NOTICE TO USER: Carefully read the following legal agreement.
BY DOWNLOADING, INSTALLING, COPYING OR OTHERWISE USING UNICODE INC.'S
DATA FILES ("DATA FILES"), AND/OR SOFTWARE ("SOFTWARE"),
YOU UNEQUIVOCALLY ACCEPT, AND AGREE TO BE BOUND BY, ALL OF THE
TERMS AND CONDITIONS OF THIS AGREEMENT.
IF YOU DO NOT AGREE, DO NOT DOWNLOAD, INSTALL, COPY, DISTRIBUTE OR USE
THE DATA FILES OR SOFTWARE.
COPYRIGHT AND PERMISSION NOTICE
Copyright © 1991-2016 Unicode, Inc. All rights reserved.
Distributed under the Terms of Use in http://www.unicode.org/copyright.html.
Permission is hereby granted, free of charge, to any person obtaining
a copy of the Unicode data files and any associated documentation
(the "Data Files") or Unicode software and any associated documentation
(the "Software") to deal in the Data Files or Software
without restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, and/or sell copies of
the Data Files or Software, and to permit persons to whom the Data Files
or Software are furnished to do so, provided that either
(a) this copyright and permission notice appear with all copies
of the Data Files or Software, or
(b) this copyright and permission notice appear in associated
Documentation.
THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT OF THIRD PARTY RIGHTS.
IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS INCLUDED IN THIS
NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL
DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
PERFORMANCE OF THE DATA FILES OR SOFTWARE.
Except as contained in this notice, the name of a copyright holder
shall not be used in advertising or otherwise to promote the sale,
use or other dealings in these Data Files or Software without prior
written authorization of the copyright holder.
</pre>
</blockquote>
<hr width="50%">
<div align="center">
<center>
<table cellspacing="0" cellpadding="0" border="0" id="table2">
<tr>
<td><a href="http://www.unicode.org/copyright.html">
<img src="http://www.unicode.org/img/hb_notice.gif"
border="0" alt="Access to Copyright and terms of use"
width="216" height="50"></a></td>
</tr>
</table>
<script language="Javascript" type="text/javascript"
src="http://www.unicode.org/webscripts/lastModified.js">
</script>
</center>
</div>
</td>
</tr>
</table>
</body>
</html>
| {
"pile_set_name": "Github"
} |
/* Copyright (C) 2016 Alexander Shishenko <[email protected]>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
* In addition, as a special exception, the copyright holders give
* permission to link the code of portions of this program with the
* OpenSSL library under certain conditions as described in each
* individual source file, and distribute linked combinations
* including the two.
* You must obey the GNU General Public License in all respects
* for all of the code used other than OpenSSL. If you modify
* file(s) with this exception, you may extend this exception to your
* version of the file(s), but you are not obligated to do so. If you
* do not wish to do so, delete this exception statement from your
* version. If you delete this exception statement from all source
* files in the program, then also delete it here.
*/
#pragma once
#include "Archive.h"
namespace librevault {
class NoArchive : public ArchiveStrategy {
Q_OBJECT
public:
NoArchive(Archive* parent) : ArchiveStrategy(parent) {}
void archive(QString denormpath);
};
} /* namespace librevault */
| {
"pile_set_name": "Github"
} |
project(android-external-sqlite)
# NOTE the following flags,
# SQLITE_TEMP_STORE=3 causes all TEMP files to go into RAM. and thats the behavior we want
# SQLITE_ENABLE_FTS3 enables usage of FTS3 - NOT FTS1 or 2.
# SQLITE_DEFAULT_AUTOVACUUM=1 causes the databases to be subject to auto-vacuum
add_definitions(
-DNDEBUG=1
-DHAVE_USLEEP=1
-DSQLITE_HAVE_ISNAN
-DSQLITE_DEFAULT_JOURNAL_SIZE_LIMIT=1048576
-DSQLITE_THREADSAFE=2
-DSQLITE_TEMP_STORE=3
-DSQLITE_POWERSAFE_OVERWRITE=1
-DSQLITE_DEFAULT_FILE_FORMAT=4
-DSQLITE_DEFAULT_AUTOVACUUM=1
-DSQLITE_ENABLE_MEMORY_MANAGEMENT=1
-DSQLITE_ENABLE_FTS3
-DSQLITE_ENABLE_FTS3_BACKWARDS
-DSQLITE_ENABLE_FTS4
-DSQLITE_OMIT_BUILTIN_TEST
-DSQLITE_OMIT_COMPILEOPTION_DIAGS
-DSQLITE_OMIT_LOAD_EXTENSION
-DSQLITE_DEFAULT_FILE_PERMISSIONS=0600
)
set(SRC
dist/sqlite3.c
)
add_library(android-external-sqlite OBJECT ${SRC})
set(EXTERNAL_SQLITE_OBJECTS $<TARGET_OBJECTS:android-external-sqlite> PARENT_SCOPE)
| {
"pile_set_name": "Github"
} |
/*
* Copyright (C) 2014-2018 Andrew Gunnerson <[email protected]>
*
* This file is part of DualBootPatcher
*
* DualBootPatcher is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* DualBootPatcher is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with DualBootPatcher. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <memory>
#include <string>
#include <unordered_map>
#include <vector>
#include "mbcommon/flags.h"
namespace mb
{
class Package
{
public:
// From frameworks/base/core/java/android/content/pm/ApplicationInfo.java
// See https://android.googlesource.com/platform/frameworks/base/+/master/core/java/android/content/pm/ApplicationInfo.java
enum class Flag : uint64_t
{
SYSTEM = 1ULL << 0,
DEBUGGABLE = 1ULL << 1,
HAS_CODE = 1ULL << 2,
PERSISTENT = 1ULL << 3,
FACTORY_TEST = 1ULL << 4,
ALLOW_TASK_REPARENTING = 1ULL << 5,
ALLOW_CLEAR_USER_DATA = 1ULL << 6,
UPDATED_SYSTEM_APP = 1ULL << 7,
TEST_ONLY = 1ULL << 8,
SUPPORTS_SMALL_SCREENS = 1ULL << 9,
SUPPORTS_NORMAL_SCREENS = 1ULL << 10,
SUPPORTS_LARGE_SCREENS = 1ULL << 11,
RESIZEABLE_FOR_SCREENS = 1ULL << 12,
SUPPORTS_SCREEN_DENSITIES = 1ULL << 13,
VM_SAFE_MODE = 1ULL << 14,
ALLOW_BACKUP = 1ULL << 15,
KILL_AFTER_RESTORE = 1ULL << 16,
RESTORE_ANY_VERSION = 1ULL << 17,
EXTERNAL_STORAGE = 1ULL << 18,
SUPPORTS_XLARGE_SCREENS = 1ULL << 19,
LARGE_HEAP = 1ULL << 20,
STOPPED = 1ULL << 21,
SUPPORTS_RTL = 1ULL << 22,
INSTALLED = 1ULL << 23,
IS_DATA_ONLY = 1ULL << 24,
IS_GAME = 1ULL << 25,
FULL_BACKUP_ONLY = 1ULL << 26,
HIDDEN = 1ULL << 27,
CANT_SAVE_STATE = 1ULL << 28,
FORWARD_LOCK = 1ULL << 29,
PRIVILEGED = 1ULL << 30,
MULTIARCH = 1ULL << 31,
};
MB_DECLARE_FLAGS(Flags, Flag)
enum class PublicFlag : uint64_t
{
SYSTEM = 1ULL << 0,
DEBUGGABLE = 1ULL << 1,
HAS_CODE = 1ULL << 2,
PERSISTENT = 1ULL << 3,
FACTORY_TEST = 1ULL << 4,
ALLOW_TASK_REPARENTING = 1ULL << 5,
ALLOW_CLEAR_USER_DATA = 1ULL << 6,
UPDATED_SYSTEM_APP = 1ULL << 7,
TEST_ONLY = 1ULL << 8,
SUPPORTS_SMALL_SCREENS = 1ULL << 9,
SUPPORTS_NORMAL_SCREENS = 1ULL << 10,
SUPPORTS_LARGE_SCREENS = 1ULL << 11,
RESIZEABLE_FOR_SCREENS = 1ULL << 12,
SUPPORTS_SCREEN_DENSITIES = 1ULL << 13,
VM_SAFE_MODE = 1ULL << 14,
ALLOW_BACKUP = 1ULL << 15,
KILL_AFTER_RESTORE = 1ULL << 16,
RESTORE_ANY_VERSION = 1ULL << 17,
EXTERNAL_STORAGE = 1ULL << 18,
SUPPORTS_XLARGE_SCREENS = 1ULL << 19,
LARGE_HEAP = 1ULL << 20,
STOPPED = 1ULL << 21,
SUPPORTS_RTL = 1ULL << 22,
INSTALLED = 1ULL << 23,
IS_DATA_ONLY = 1ULL << 24,
IS_GAME = 1ULL << 25,
FULL_BACKUP_ONLY = 1ULL << 26,
USES_CLEARTEXT_TRAFFIC = 1ULL << 27,
EXTRACT_NATIVE_LIBS = 1ULL << 28,
HARDWARE_ACCELERATED = 1ULL << 29,
SUSPENDED = 1ULL << 30,
MULTIARCH = 1ULL << 31,
};
MB_DECLARE_FLAGS(PublicFlags, PublicFlag)
enum class PrivateFlag : uint64_t
{
HIDDEN = 1ULL << 0,
CANT_SAVE_STATE = 1ULL << 1,
FORWARD_LOCK = 1ULL << 2,
PRIVILEGED = 1ULL << 3,
HAS_DOMAIN_URLS = 1ULL << 4,
DEFAULT_TO_DEVICE_PROTECTED_STORAGE = 1ULL << 5,
DIRECT_BOOT_AWARE = 1ULL << 6,
INSTANT = 1ULL << 7,
PARTIALLY_DIRECT_BOOT_AWARE = 1ULL << 8,
REQUIRED_FOR_SYSTEM_USER = 1ULL << 9,
ACTIVITIES_RESIZE_MODE_RESIZEABLE = 1ULL << 10,
ACTIVITIES_RESIZE_MODE_UNRESIZEABLE = 1ULL << 11,
ACTIVITIES_RESIZE_MODE_RESIZEABLE_VIA_SDK_VERSION = 1ULL << 12,
BACKUP_IN_FOREGROUND = 1ULL << 13,
STATIC_SHARED_LIBRARY = 1ULL << 14,
ISOLATED_SPLIT_LOADING = 1ULL << 15,
VIRTUAL_PRELOAD = 1ULL << 16,
};
MB_DECLARE_FLAGS(PrivateFlags, PrivateFlag)
std::string name; // PackageSetting.name
std::string real_name; // PackageSetting.realName
std::string code_path; // PackageSetting.codePathString
std::string resource_path; // PackageSetting.resourcePathString
std::string native_library_path; // PackageSetting.legacyNativeLibraryPathString
std::string primary_cpu_abi; // PackageSetting.primaryCpuAbiString
std::string secondary_cpu_abi; // PackageSetting.secondaryCpuAbiString
std::string cpu_abi_override; // PackageSetting.cpuAbiOverride
// Android <6.0
Flags pkg_flags; // PackageSetting.pkgFlags
// Android >=6.0
PublicFlags pkg_public_flags; // PackageSetting.pkgFlags
PrivateFlags pkg_private_flags; // PackageSetting.pkgPrivateFlags
// Timestamps are in milliseconds epoch/unix time
uint64_t timestamp; // PackageSetting.timeStamp
uint64_t first_install_time; // PackageSetting.firstInstallTime
uint64_t last_update_time; // PackageSetting.lastUpdateTime
int version; // PackageSetting.versionCode
int is_shared_user; // PackageSetting.sharedUser != null
int user_id; // PackageSetting.appId
int shared_user_id; // PackageSetting.appId
std::string uid_error; // (not in PackageSetting)
std::string install_status; // (not in PackageSetting)
std::string installer; // PackageSetting.installerPackageName
std::vector<std::string> sig_indexes;
// Functions
Package();
uid_t get_uid();
void dump();
};
MB_DECLARE_OPERATORS_FOR_FLAGS(Package::Flags)
MB_DECLARE_OPERATORS_FOR_FLAGS(Package::PublicFlags)
MB_DECLARE_OPERATORS_FOR_FLAGS(Package::PrivateFlags)
class Packages
{
public:
std::vector<std::shared_ptr<Package>> pkgs;
std::unordered_map<std::string, std::string> sigs;
bool load_xml(const std::string &path);
std::shared_ptr<Package> find_by_uid(uid_t uid) const;
std::shared_ptr<Package> find_by_pkg(const std::string &pkg_id) const;
};
}
| {
"pile_set_name": "Github"
} |
% DOCKER(1) Docker User Manuals
% William Henry
% APRIL 2014
# NAME
docker \- Docker image and container command line interface
# SYNOPSIS
**docker** [OPTIONS] COMMAND [ARG...]
**docker** [--help|-v|--version]
# DESCRIPTION
**docker** is a client for interacting with the daemon (see **dockerd(8)**) through the CLI.
The Docker CLI has over 30 commands. The commands are listed below and each has
its own man page which explain usage and arguments.
To see the man page for a command run **man docker <command>**.
# OPTIONS
**--help**
Print usage statement
**--config**=""
Specifies the location of the Docker client configuration files. The default is '~/.docker'.
**-D**, **--debug**=*true*|*false*
Enable debug mode. Default is false.
**-H**, **--host**=[*unix:///var/run/docker.sock*]: tcp://[host]:[port][path] to bind or
unix://[/path/to/socket] to use.
The socket(s) to bind to in daemon mode specified using one or more
tcp://host:port/path, unix:///path/to/socket, fd://* or fd://socketfd.
If the tcp port is not specified, then it will default to either `2375` when
`--tls` is off, or `2376` when `--tls` is on, or `--tlsverify` is specified.
**-l**, **--log-level**="*debug*|*info*|*warn*|*error*|*fatal*"
Set the logging level. Default is `info`.
**--tls**=*true*|*false*
Use TLS; implied by --tlsverify. Default is false.
**--tlscacert**=*~/.docker/ca.pem*
Trust certs signed only by this CA.
**--tlscert**=*~/.docker/cert.pem*
Path to TLS certificate file.
**--tlskey**=*~/.docker/key.pem*
Path to TLS key file.
**--tlsverify**=*true*|*false*
Use TLS and verify the remote (daemon: verify client, client: verify daemon).
Default is false.
**-v**, **--version**=*true*|*false*
Print version information and quit. Default is false.
# COMMANDS
Use "docker help" or "docker --help" to get an overview of available commands.
# EXAMPLES
For specific client examples please see the man page for the specific Docker
command. For example:
man docker-run
# HISTORY
April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on docker.com source material and internal work.
| {
"pile_set_name": "Github"
} |
/*!
* Font Awesome 4.7.0 by @davegandy - http://fontawesome.io - @fontawesome
* License - http://fontawesome.io/license (Font: SIL OFL 1.1, CSS: MIT License)
*/
/* FONT PATH
* -------------------------- */
@font-face {
font-family: 'FontAwesome';
src: url('../fonts/fontawesome-webfont.eot?v=4.7.0');
src: url('../fonts/fontawesome-webfont.eot?#iefix&v=4.7.0') format('embedded-opentype'), url('../fonts/fontawesome-webfont.woff2?v=4.7.0') format('woff2'), url('../fonts/fontawesome-webfont.woff?v=4.7.0') format('woff'), url('../fonts/fontawesome-webfont.ttf?v=4.7.0') format('truetype'), url('../fonts/fontawesome-webfont.svg?v=4.7.0#fontawesomeregular') format('svg');
font-weight: normal;
font-style: normal;
}
.fa {
display: inline-block;
font: normal normal normal 14px/1 FontAwesome;
font-size: inherit;
text-rendering: auto;
-webkit-font-smoothing: antialiased;
-moz-osx-font-smoothing: grayscale;
}
/* makes the font 33% larger relative to the icon container */
.fa-lg {
font-size: 1.33333333em;
line-height: 0.75em;
vertical-align: -15%;
}
.fa-2x {
font-size: 2em;
}
.fa-3x {
font-size: 3em;
}
.fa-4x {
font-size: 4em;
}
.fa-5x {
font-size: 5em;
}
.fa-fw {
width: 1.28571429em;
text-align: center;
}
.fa-ul {
padding-left: 0;
margin-left: 2.14285714em;
list-style-type: none;
}
.fa-ul > li {
position: relative;
}
.fa-li {
position: absolute;
left: -2.14285714em;
width: 2.14285714em;
top: 0.14285714em;
text-align: center;
}
.fa-li.fa-lg {
left: -1.85714286em;
}
.fa-border {
padding: .2em .25em .15em;
border: solid 0.08em #eeeeee;
border-radius: .1em;
}
.fa-pull-left {
float: left;
}
.fa-pull-right {
float: right;
}
.fa.fa-pull-left {
margin-right: .3em;
}
.fa.fa-pull-right {
margin-left: .3em;
}
/* Deprecated as of 4.4.0 */
.pull-right {
float: right;
}
.pull-left {
float: left;
}
.fa.pull-left {
margin-right: .3em;
}
.fa.pull-right {
margin-left: .3em;
}
.fa-spin {
-webkit-animation: fa-spin 2s infinite linear;
animation: fa-spin 2s infinite linear;
}
.fa-pulse {
-webkit-animation: fa-spin 1s infinite steps(8);
animation: fa-spin 1s infinite steps(8);
}
@-webkit-keyframes fa-spin {
0% {
-webkit-transform: rotate(0deg);
transform: rotate(0deg);
}
100% {
-webkit-transform: rotate(359deg);
transform: rotate(359deg);
}
}
@keyframes fa-spin {
0% {
-webkit-transform: rotate(0deg);
transform: rotate(0deg);
}
100% {
-webkit-transform: rotate(359deg);
transform: rotate(359deg);
}
}
.fa-rotate-90 {
-ms-filter: "progid:DXImageTransform.Microsoft.BasicImage(rotation=1)";
-webkit-transform: rotate(90deg);
-ms-transform: rotate(90deg);
transform: rotate(90deg);
}
.fa-rotate-180 {
-ms-filter: "progid:DXImageTransform.Microsoft.BasicImage(rotation=2)";
-webkit-transform: rotate(180deg);
-ms-transform: rotate(180deg);
transform: rotate(180deg);
}
.fa-rotate-270 {
-ms-filter: "progid:DXImageTransform.Microsoft.BasicImage(rotation=3)";
-webkit-transform: rotate(270deg);
-ms-transform: rotate(270deg);
transform: rotate(270deg);
}
.fa-flip-horizontal {
-ms-filter: "progid:DXImageTransform.Microsoft.BasicImage(rotation=0, mirror=1)";
-webkit-transform: scale(-1, 1);
-ms-transform: scale(-1, 1);
transform: scale(-1, 1);
}
.fa-flip-vertical {
-ms-filter: "progid:DXImageTransform.Microsoft.BasicImage(rotation=2, mirror=1)";
-webkit-transform: scale(1, -1);
-ms-transform: scale(1, -1);
transform: scale(1, -1);
}
:root .fa-rotate-90,
:root .fa-rotate-180,
:root .fa-rotate-270,
:root .fa-flip-horizontal,
:root .fa-flip-vertical {
filter: none;
}
.fa-stack {
position: relative;
display: inline-block;
width: 2em;
height: 2em;
line-height: 2em;
vertical-align: middle;
}
.fa-stack-1x,
.fa-stack-2x {
position: absolute;
left: 0;
width: 100%;
text-align: center;
}
.fa-stack-1x {
line-height: inherit;
}
.fa-stack-2x {
font-size: 2em;
}
.fa-inverse {
color: #ffffff;
}
/* Font Awesome uses the Unicode Private Use Area (PUA) to ensure screen
readers do not read off random characters that represent icons */
.fa-glass:before {
content: "\f000";
}
.fa-music:before {
content: "\f001";
}
.fa-search:before {
content: "\f002";
}
.fa-envelope-o:before {
content: "\f003";
}
.fa-heart:before {
content: "\f004";
}
.fa-star:before {
content: "\f005";
}
.fa-star-o:before {
content: "\f006";
}
.fa-user:before {
content: "\f007";
}
.fa-film:before {
content: "\f008";
}
.fa-th-large:before {
content: "\f009";
}
.fa-th:before {
content: "\f00a";
}
.fa-th-list:before {
content: "\f00b";
}
.fa-check:before {
content: "\f00c";
}
.fa-remove:before,
.fa-close:before,
.fa-times:before {
content: "\f00d";
}
.fa-search-plus:before {
content: "\f00e";
}
.fa-search-minus:before {
content: "\f010";
}
.fa-power-off:before {
content: "\f011";
}
.fa-signal:before {
content: "\f012";
}
.fa-gear:before,
.fa-cog:before {
content: "\f013";
}
.fa-trash-o:before {
content: "\f014";
}
.fa-home:before {
content: "\f015";
}
.fa-file-o:before {
content: "\f016";
}
.fa-clock-o:before {
content: "\f017";
}
.fa-road:before {
content: "\f018";
}
.fa-download:before {
content: "\f019";
}
.fa-arrow-circle-o-down:before {
content: "\f01a";
}
.fa-arrow-circle-o-up:before {
content: "\f01b";
}
.fa-inbox:before {
content: "\f01c";
}
.fa-play-circle-o:before {
content: "\f01d";
}
.fa-rotate-right:before,
.fa-repeat:before {
content: "\f01e";
}
.fa-refresh:before {
content: "\f021";
}
.fa-list-alt:before {
content: "\f022";
}
.fa-lock:before {
content: "\f023";
}
.fa-flag:before {
content: "\f024";
}
.fa-headphones:before {
content: "\f025";
}
.fa-volume-off:before {
content: "\f026";
}
.fa-volume-down:before {
content: "\f027";
}
.fa-volume-up:before {
content: "\f028";
}
.fa-qrcode:before {
content: "\f029";
}
.fa-barcode:before {
content: "\f02a";
}
.fa-tag:before {
content: "\f02b";
}
.fa-tags:before {
content: "\f02c";
}
.fa-book:before {
content: "\f02d";
}
.fa-bookmark:before {
content: "\f02e";
}
.fa-print:before {
content: "\f02f";
}
.fa-camera:before {
content: "\f030";
}
.fa-font:before {
content: "\f031";
}
.fa-bold:before {
content: "\f032";
}
.fa-italic:before {
content: "\f033";
}
.fa-text-height:before {
content: "\f034";
}
.fa-text-width:before {
content: "\f035";
}
.fa-align-left:before {
content: "\f036";
}
.fa-align-center:before {
content: "\f037";
}
.fa-align-right:before {
content: "\f038";
}
.fa-align-justify:before {
content: "\f039";
}
.fa-list:before {
content: "\f03a";
}
.fa-dedent:before,
.fa-outdent:before {
content: "\f03b";
}
.fa-indent:before {
content: "\f03c";
}
.fa-video-camera:before {
content: "\f03d";
}
.fa-photo:before,
.fa-image:before,
.fa-picture-o:before {
content: "\f03e";
}
.fa-pencil:before {
content: "\f040";
}
.fa-map-marker:before {
content: "\f041";
}
.fa-adjust:before {
content: "\f042";
}
.fa-tint:before {
content: "\f043";
}
.fa-edit:before,
.fa-pencil-square-o:before {
content: "\f044";
}
.fa-share-square-o:before {
content: "\f045";
}
.fa-check-square-o:before {
content: "\f046";
}
.fa-arrows:before {
content: "\f047";
}
.fa-step-backward:before {
content: "\f048";
}
.fa-fast-backward:before {
content: "\f049";
}
.fa-backward:before {
content: "\f04a";
}
.fa-play:before {
content: "\f04b";
}
.fa-pause:before {
content: "\f04c";
}
.fa-stop:before {
content: "\f04d";
}
.fa-forward:before {
content: "\f04e";
}
.fa-fast-forward:before {
content: "\f050";
}
.fa-step-forward:before {
content: "\f051";
}
.fa-eject:before {
content: "\f052";
}
.fa-chevron-left:before {
content: "\f053";
}
.fa-chevron-right:before {
content: "\f054";
}
.fa-plus-circle:before {
content: "\f055";
}
.fa-minus-circle:before {
content: "\f056";
}
.fa-times-circle:before {
content: "\f057";
}
.fa-check-circle:before {
content: "\f058";
}
.fa-question-circle:before {
content: "\f059";
}
.fa-info-circle:before {
content: "\f05a";
}
.fa-crosshairs:before {
content: "\f05b";
}
.fa-times-circle-o:before {
content: "\f05c";
}
.fa-check-circle-o:before {
content: "\f05d";
}
.fa-ban:before {
content: "\f05e";
}
.fa-arrow-left:before {
content: "\f060";
}
.fa-arrow-right:before {
content: "\f061";
}
.fa-arrow-up:before {
content: "\f062";
}
.fa-arrow-down:before {
content: "\f063";
}
.fa-mail-forward:before,
.fa-share:before {
content: "\f064";
}
.fa-expand:before {
content: "\f065";
}
.fa-compress:before {
content: "\f066";
}
.fa-plus:before {
content: "\f067";
}
.fa-minus:before {
content: "\f068";
}
.fa-asterisk:before {
content: "\f069";
}
.fa-exclamation-circle:before {
content: "\f06a";
}
.fa-gift:before {
content: "\f06b";
}
.fa-leaf:before {
content: "\f06c";
}
.fa-fire:before {
content: "\f06d";
}
.fa-eye:before {
content: "\f06e";
}
.fa-eye-slash:before {
content: "\f070";
}
.fa-warning:before,
.fa-exclamation-triangle:before {
content: "\f071";
}
.fa-plane:before {
content: "\f072";
}
.fa-calendar:before {
content: "\f073";
}
.fa-random:before {
content: "\f074";
}
.fa-comment:before {
content: "\f075";
}
.fa-magnet:before {
content: "\f076";
}
.fa-chevron-up:before {
content: "\f077";
}
.fa-chevron-down:before {
content: "\f078";
}
.fa-retweet:before {
content: "\f079";
}
.fa-shopping-cart:before {
content: "\f07a";
}
.fa-folder:before {
content: "\f07b";
}
.fa-folder-open:before {
content: "\f07c";
}
.fa-arrows-v:before {
content: "\f07d";
}
.fa-arrows-h:before {
content: "\f07e";
}
.fa-bar-chart-o:before,
.fa-bar-chart:before {
content: "\f080";
}
.fa-twitter-square:before {
content: "\f081";
}
.fa-facebook-square:before {
content: "\f082";
}
.fa-camera-retro:before {
content: "\f083";
}
.fa-key:before {
content: "\f084";
}
.fa-gears:before,
.fa-cogs:before {
content: "\f085";
}
.fa-comments:before {
content: "\f086";
}
.fa-thumbs-o-up:before {
content: "\f087";
}
.fa-thumbs-o-down:before {
content: "\f088";
}
.fa-star-half:before {
content: "\f089";
}
.fa-heart-o:before {
content: "\f08a";
}
.fa-sign-out:before {
content: "\f08b";
}
.fa-linkedin-square:before {
content: "\f08c";
}
.fa-thumb-tack:before {
content: "\f08d";
}
.fa-external-link:before {
content: "\f08e";
}
.fa-sign-in:before {
content: "\f090";
}
.fa-trophy:before {
content: "\f091";
}
.fa-github-square:before {
content: "\f092";
}
.fa-upload:before {
content: "\f093";
}
.fa-lemon-o:before {
content: "\f094";
}
.fa-phone:before {
content: "\f095";
}
.fa-square-o:before {
content: "\f096";
}
.fa-bookmark-o:before {
content: "\f097";
}
.fa-phone-square:before {
content: "\f098";
}
.fa-twitter:before {
content: "\f099";
}
.fa-facebook-f:before,
.fa-facebook:before {
content: "\f09a";
}
.fa-github:before {
content: "\f09b";
}
.fa-unlock:before {
content: "\f09c";
}
.fa-credit-card:before {
content: "\f09d";
}
.fa-feed:before,
.fa-rss:before {
content: "\f09e";
}
.fa-hdd-o:before {
content: "\f0a0";
}
.fa-bullhorn:before {
content: "\f0a1";
}
.fa-bell:before {
content: "\f0f3";
}
.fa-certificate:before {
content: "\f0a3";
}
.fa-hand-o-right:before {
content: "\f0a4";
}
.fa-hand-o-left:before {
content: "\f0a5";
}
.fa-hand-o-up:before {
content: "\f0a6";
}
.fa-hand-o-down:before {
content: "\f0a7";
}
.fa-arrow-circle-left:before {
content: "\f0a8";
}
.fa-arrow-circle-right:before {
content: "\f0a9";
}
.fa-arrow-circle-up:before {
content: "\f0aa";
}
.fa-arrow-circle-down:before {
content: "\f0ab";
}
.fa-globe:before {
content: "\f0ac";
}
.fa-wrench:before {
content: "\f0ad";
}
.fa-tasks:before {
content: "\f0ae";
}
.fa-filter:before {
content: "\f0b0";
}
.fa-briefcase:before {
content: "\f0b1";
}
.fa-arrows-alt:before {
content: "\f0b2";
}
.fa-group:before,
.fa-users:before {
content: "\f0c0";
}
.fa-chain:before,
.fa-link:before {
content: "\f0c1";
}
.fa-cloud:before {
content: "\f0c2";
}
.fa-flask:before {
content: "\f0c3";
}
.fa-cut:before,
.fa-scissors:before {
content: "\f0c4";
}
.fa-copy:before,
.fa-files-o:before {
content: "\f0c5";
}
.fa-paperclip:before {
content: "\f0c6";
}
.fa-save:before,
.fa-floppy-o:before {
content: "\f0c7";
}
.fa-square:before {
content: "\f0c8";
}
.fa-navicon:before,
.fa-reorder:before,
.fa-bars:before {
content: "\f0c9";
}
.fa-list-ul:before {
content: "\f0ca";
}
.fa-list-ol:before {
content: "\f0cb";
}
.fa-strikethrough:before {
content: "\f0cc";
}
.fa-underline:before {
content: "\f0cd";
}
.fa-table:before {
content: "\f0ce";
}
.fa-magic:before {
content: "\f0d0";
}
.fa-truck:before {
content: "\f0d1";
}
.fa-pinterest:before {
content: "\f0d2";
}
.fa-pinterest-square:before {
content: "\f0d3";
}
.fa-google-plus-square:before {
content: "\f0d4";
}
.fa-google-plus:before {
content: "\f0d5";
}
.fa-money:before {
content: "\f0d6";
}
.fa-caret-down:before {
content: "\f0d7";
}
.fa-caret-up:before {
content: "\f0d8";
}
.fa-caret-left:before {
content: "\f0d9";
}
.fa-caret-right:before {
content: "\f0da";
}
.fa-columns:before {
content: "\f0db";
}
.fa-unsorted:before,
.fa-sort:before {
content: "\f0dc";
}
.fa-sort-down:before,
.fa-sort-desc:before {
content: "\f0dd";
}
.fa-sort-up:before,
.fa-sort-asc:before {
content: "\f0de";
}
.fa-envelope:before {
content: "\f0e0";
}
.fa-linkedin:before {
content: "\f0e1";
}
.fa-rotate-left:before,
.fa-undo:before {
content: "\f0e2";
}
.fa-legal:before,
.fa-gavel:before {
content: "\f0e3";
}
.fa-dashboard:before,
.fa-tachometer:before {
content: "\f0e4";
}
.fa-comment-o:before {
content: "\f0e5";
}
.fa-comments-o:before {
content: "\f0e6";
}
.fa-flash:before,
.fa-bolt:before {
content: "\f0e7";
}
.fa-sitemap:before {
content: "\f0e8";
}
.fa-umbrella:before {
content: "\f0e9";
}
.fa-paste:before,
.fa-clipboard:before {
content: "\f0ea";
}
.fa-lightbulb-o:before {
content: "\f0eb";
}
.fa-exchange:before {
content: "\f0ec";
}
.fa-cloud-download:before {
content: "\f0ed";
}
.fa-cloud-upload:before {
content: "\f0ee";
}
.fa-user-md:before {
content: "\f0f0";
}
.fa-stethoscope:before {
content: "\f0f1";
}
.fa-suitcase:before {
content: "\f0f2";
}
.fa-bell-o:before {
content: "\f0a2";
}
.fa-coffee:before {
content: "\f0f4";
}
.fa-cutlery:before {
content: "\f0f5";
}
.fa-file-text-o:before {
content: "\f0f6";
}
.fa-building-o:before {
content: "\f0f7";
}
.fa-hospital-o:before {
content: "\f0f8";
}
.fa-ambulance:before {
content: "\f0f9";
}
.fa-medkit:before {
content: "\f0fa";
}
.fa-fighter-jet:before {
content: "\f0fb";
}
.fa-beer:before {
content: "\f0fc";
}
.fa-h-square:before {
content: "\f0fd";
}
.fa-plus-square:before {
content: "\f0fe";
}
.fa-angle-double-left:before {
content: "\f100";
}
.fa-angle-double-right:before {
content: "\f101";
}
.fa-angle-double-up:before {
content: "\f102";
}
.fa-angle-double-down:before {
content: "\f103";
}
.fa-angle-left:before {
content: "\f104";
}
.fa-angle-right:before {
content: "\f105";
}
.fa-angle-up:before {
content: "\f106";
}
.fa-angle-down:before {
content: "\f107";
}
.fa-desktop:before {
content: "\f108";
}
.fa-laptop:before {
content: "\f109";
}
.fa-tablet:before {
content: "\f10a";
}
.fa-mobile-phone:before,
.fa-mobile:before {
content: "\f10b";
}
.fa-circle-o:before {
content: "\f10c";
}
.fa-quote-left:before {
content: "\f10d";
}
.fa-quote-right:before {
content: "\f10e";
}
.fa-spinner:before {
content: "\f110";
}
.fa-circle:before {
content: "\f111";
}
.fa-mail-reply:before,
.fa-reply:before {
content: "\f112";
}
.fa-github-alt:before {
content: "\f113";
}
.fa-folder-o:before {
content: "\f114";
}
.fa-folder-open-o:before {
content: "\f115";
}
.fa-smile-o:before {
content: "\f118";
}
.fa-frown-o:before {
content: "\f119";
}
.fa-meh-o:before {
content: "\f11a";
}
.fa-gamepad:before {
content: "\f11b";
}
.fa-keyboard-o:before {
content: "\f11c";
}
.fa-flag-o:before {
content: "\f11d";
}
.fa-flag-checkered:before {
content: "\f11e";
}
.fa-terminal:before {
content: "\f120";
}
.fa-code:before {
content: "\f121";
}
.fa-mail-reply-all:before,
.fa-reply-all:before {
content: "\f122";
}
.fa-star-half-empty:before,
.fa-star-half-full:before,
.fa-star-half-o:before {
content: "\f123";
}
.fa-location-arrow:before {
content: "\f124";
}
.fa-crop:before {
content: "\f125";
}
.fa-code-fork:before {
content: "\f126";
}
.fa-unlink:before,
.fa-chain-broken:before {
content: "\f127";
}
.fa-question:before {
content: "\f128";
}
.fa-info:before {
content: "\f129";
}
.fa-exclamation:before {
content: "\f12a";
}
.fa-superscript:before {
content: "\f12b";
}
.fa-subscript:before {
content: "\f12c";
}
.fa-eraser:before {
content: "\f12d";
}
.fa-puzzle-piece:before {
content: "\f12e";
}
.fa-microphone:before {
content: "\f130";
}
.fa-microphone-slash:before {
content: "\f131";
}
.fa-shield:before {
content: "\f132";
}
.fa-calendar-o:before {
content: "\f133";
}
.fa-fire-extinguisher:before {
content: "\f134";
}
.fa-rocket:before {
content: "\f135";
}
.fa-maxcdn:before {
content: "\f136";
}
.fa-chevron-circle-left:before {
content: "\f137";
}
.fa-chevron-circle-right:before {
content: "\f138";
}
.fa-chevron-circle-up:before {
content: "\f139";
}
.fa-chevron-circle-down:before {
content: "\f13a";
}
.fa-html5:before {
content: "\f13b";
}
.fa-css3:before {
content: "\f13c";
}
.fa-anchor:before {
content: "\f13d";
}
.fa-unlock-alt:before {
content: "\f13e";
}
.fa-bullseye:before {
content: "\f140";
}
.fa-ellipsis-h:before {
content: "\f141";
}
.fa-ellipsis-v:before {
content: "\f142";
}
.fa-rss-square:before {
content: "\f143";
}
.fa-play-circle:before {
content: "\f144";
}
.fa-ticket:before {
content: "\f145";
}
.fa-minus-square:before {
content: "\f146";
}
.fa-minus-square-o:before {
content: "\f147";
}
.fa-level-up:before {
content: "\f148";
}
.fa-level-down:before {
content: "\f149";
}
.fa-check-square:before {
content: "\f14a";
}
.fa-pencil-square:before {
content: "\f14b";
}
.fa-external-link-square:before {
content: "\f14c";
}
.fa-share-square:before {
content: "\f14d";
}
.fa-compass:before {
content: "\f14e";
}
.fa-toggle-down:before,
.fa-caret-square-o-down:before {
content: "\f150";
}
.fa-toggle-up:before,
.fa-caret-square-o-up:before {
content: "\f151";
}
.fa-toggle-right:before,
.fa-caret-square-o-right:before {
content: "\f152";
}
.fa-euro:before,
.fa-eur:before {
content: "\f153";
}
.fa-gbp:before {
content: "\f154";
}
.fa-dollar:before,
.fa-usd:before {
content: "\f155";
}
.fa-rupee:before,
.fa-inr:before {
content: "\f156";
}
.fa-cny:before,
.fa-rmb:before,
.fa-yen:before,
.fa-jpy:before {
content: "\f157";
}
.fa-ruble:before,
.fa-rouble:before,
.fa-rub:before {
content: "\f158";
}
.fa-won:before,
.fa-krw:before {
content: "\f159";
}
.fa-bitcoin:before,
.fa-btc:before {
content: "\f15a";
}
.fa-file:before {
content: "\f15b";
}
.fa-file-text:before {
content: "\f15c";
}
.fa-sort-alpha-asc:before {
content: "\f15d";
}
.fa-sort-alpha-desc:before {
content: "\f15e";
}
.fa-sort-amount-asc:before {
content: "\f160";
}
.fa-sort-amount-desc:before {
content: "\f161";
}
.fa-sort-numeric-asc:before {
content: "\f162";
}
.fa-sort-numeric-desc:before {
content: "\f163";
}
.fa-thumbs-up:before {
content: "\f164";
}
.fa-thumbs-down:before {
content: "\f165";
}
.fa-youtube-square:before {
content: "\f166";
}
.fa-youtube:before {
content: "\f167";
}
.fa-xing:before {
content: "\f168";
}
.fa-xing-square:before {
content: "\f169";
}
.fa-youtube-play:before {
content: "\f16a";
}
.fa-dropbox:before {
content: "\f16b";
}
.fa-stack-overflow:before {
content: "\f16c";
}
.fa-instagram:before {
content: "\f16d";
}
.fa-flickr:before {
content: "\f16e";
}
.fa-adn:before {
content: "\f170";
}
.fa-bitbucket:before {
content: "\f171";
}
.fa-bitbucket-square:before {
content: "\f172";
}
.fa-tumblr:before {
content: "\f173";
}
.fa-tumblr-square:before {
content: "\f174";
}
.fa-long-arrow-down:before {
content: "\f175";
}
.fa-long-arrow-up:before {
content: "\f176";
}
.fa-long-arrow-left:before {
content: "\f177";
}
.fa-long-arrow-right:before {
content: "\f178";
}
.fa-apple:before {
content: "\f179";
}
.fa-windows:before {
content: "\f17a";
}
.fa-android:before {
content: "\f17b";
}
.fa-linux:before {
content: "\f17c";
}
.fa-dribbble:before {
content: "\f17d";
}
.fa-skype:before {
content: "\f17e";
}
.fa-foursquare:before {
content: "\f180";
}
.fa-trello:before {
content: "\f181";
}
.fa-female:before {
content: "\f182";
}
.fa-male:before {
content: "\f183";
}
.fa-gittip:before,
.fa-gratipay:before {
content: "\f184";
}
.fa-sun-o:before {
content: "\f185";
}
.fa-moon-o:before {
content: "\f186";
}
.fa-archive:before {
content: "\f187";
}
.fa-bug:before {
content: "\f188";
}
.fa-vk:before {
content: "\f189";
}
.fa-weibo:before {
content: "\f18a";
}
.fa-renren:before {
content: "\f18b";
}
.fa-pagelines:before {
content: "\f18c";
}
.fa-stack-exchange:before {
content: "\f18d";
}
.fa-arrow-circle-o-right:before {
content: "\f18e";
}
.fa-arrow-circle-o-left:before {
content: "\f190";
}
.fa-toggle-left:before,
.fa-caret-square-o-left:before {
content: "\f191";
}
.fa-dot-circle-o:before {
content: "\f192";
}
.fa-wheelchair:before {
content: "\f193";
}
.fa-vimeo-square:before {
content: "\f194";
}
.fa-turkish-lira:before,
.fa-try:before {
content: "\f195";
}
.fa-plus-square-o:before {
content: "\f196";
}
.fa-space-shuttle:before {
content: "\f197";
}
.fa-slack:before {
content: "\f198";
}
.fa-envelope-square:before {
content: "\f199";
}
.fa-wordpress:before {
content: "\f19a";
}
.fa-openid:before {
content: "\f19b";
}
.fa-institution:before,
.fa-bank:before,
.fa-university:before {
content: "\f19c";
}
.fa-mortar-board:before,
.fa-graduation-cap:before {
content: "\f19d";
}
.fa-yahoo:before {
content: "\f19e";
}
.fa-google:before {
content: "\f1a0";
}
.fa-reddit:before {
content: "\f1a1";
}
.fa-reddit-square:before {
content: "\f1a2";
}
.fa-stumbleupon-circle:before {
content: "\f1a3";
}
.fa-stumbleupon:before {
content: "\f1a4";
}
.fa-delicious:before {
content: "\f1a5";
}
.fa-digg:before {
content: "\f1a6";
}
.fa-pied-piper-pp:before {
content: "\f1a7";
}
.fa-pied-piper-alt:before {
content: "\f1a8";
}
.fa-drupal:before {
content: "\f1a9";
}
.fa-joomla:before {
content: "\f1aa";
}
.fa-language:before {
content: "\f1ab";
}
.fa-fax:before {
content: "\f1ac";
}
.fa-building:before {
content: "\f1ad";
}
.fa-child:before {
content: "\f1ae";
}
.fa-paw:before {
content: "\f1b0";
}
.fa-spoon:before {
content: "\f1b1";
}
.fa-cube:before {
content: "\f1b2";
}
.fa-cubes:before {
content: "\f1b3";
}
.fa-behance:before {
content: "\f1b4";
}
.fa-behance-square:before {
content: "\f1b5";
}
.fa-steam:before {
content: "\f1b6";
}
.fa-steam-square:before {
content: "\f1b7";
}
.fa-recycle:before {
content: "\f1b8";
}
.fa-automobile:before,
.fa-car:before {
content: "\f1b9";
}
.fa-cab:before,
.fa-taxi:before {
content: "\f1ba";
}
.fa-tree:before {
content: "\f1bb";
}
.fa-spotify:before {
content: "\f1bc";
}
.fa-deviantart:before {
content: "\f1bd";
}
.fa-soundcloud:before {
content: "\f1be";
}
.fa-database:before {
content: "\f1c0";
}
.fa-file-pdf-o:before {
content: "\f1c1";
}
.fa-file-word-o:before {
content: "\f1c2";
}
.fa-file-excel-o:before {
content: "\f1c3";
}
.fa-file-powerpoint-o:before {
content: "\f1c4";
}
.fa-file-photo-o:before,
.fa-file-picture-o:before,
.fa-file-image-o:before {
content: "\f1c5";
}
.fa-file-zip-o:before,
.fa-file-archive-o:before {
content: "\f1c6";
}
.fa-file-sound-o:before,
.fa-file-audio-o:before {
content: "\f1c7";
}
.fa-file-movie-o:before,
.fa-file-video-o:before {
content: "\f1c8";
}
.fa-file-code-o:before {
content: "\f1c9";
}
.fa-vine:before {
content: "\f1ca";
}
.fa-codepen:before {
content: "\f1cb";
}
.fa-jsfiddle:before {
content: "\f1cc";
}
.fa-life-bouy:before,
.fa-life-buoy:before,
.fa-life-saver:before,
.fa-support:before,
.fa-life-ring:before {
content: "\f1cd";
}
.fa-circle-o-notch:before {
content: "\f1ce";
}
.fa-ra:before,
.fa-resistance:before,
.fa-rebel:before {
content: "\f1d0";
}
.fa-ge:before,
.fa-empire:before {
content: "\f1d1";
}
.fa-git-square:before {
content: "\f1d2";
}
.fa-git:before {
content: "\f1d3";
}
.fa-y-combinator-square:before,
.fa-yc-square:before,
.fa-hacker-news:before {
content: "\f1d4";
}
.fa-tencent-weibo:before {
content: "\f1d5";
}
.fa-qq:before {
content: "\f1d6";
}
.fa-wechat:before,
.fa-weixin:before {
content: "\f1d7";
}
.fa-send:before,
.fa-paper-plane:before {
content: "\f1d8";
}
.fa-send-o:before,
.fa-paper-plane-o:before {
content: "\f1d9";
}
.fa-history:before {
content: "\f1da";
}
.fa-circle-thin:before {
content: "\f1db";
}
.fa-header:before {
content: "\f1dc";
}
.fa-paragraph:before {
content: "\f1dd";
}
.fa-sliders:before {
content: "\f1de";
}
.fa-share-alt:before {
content: "\f1e0";
}
.fa-share-alt-square:before {
content: "\f1e1";
}
.fa-bomb:before {
content: "\f1e2";
}
.fa-soccer-ball-o:before,
.fa-futbol-o:before {
content: "\f1e3";
}
.fa-tty:before {
content: "\f1e4";
}
.fa-binoculars:before {
content: "\f1e5";
}
.fa-plug:before {
content: "\f1e6";
}
.fa-slideshare:before {
content: "\f1e7";
}
.fa-twitch:before {
content: "\f1e8";
}
.fa-yelp:before {
content: "\f1e9";
}
.fa-newspaper-o:before {
content: "\f1ea";
}
.fa-wifi:before {
content: "\f1eb";
}
.fa-calculator:before {
content: "\f1ec";
}
.fa-paypal:before {
content: "\f1ed";
}
.fa-google-wallet:before {
content: "\f1ee";
}
.fa-cc-visa:before {
content: "\f1f0";
}
.fa-cc-mastercard:before {
content: "\f1f1";
}
.fa-cc-discover:before {
content: "\f1f2";
}
.fa-cc-amex:before {
content: "\f1f3";
}
.fa-cc-paypal:before {
content: "\f1f4";
}
.fa-cc-stripe:before {
content: "\f1f5";
}
.fa-bell-slash:before {
content: "\f1f6";
}
.fa-bell-slash-o:before {
content: "\f1f7";
}
.fa-trash:before {
content: "\f1f8";
}
.fa-copyright:before {
content: "\f1f9";
}
.fa-at:before {
content: "\f1fa";
}
.fa-eyedropper:before {
content: "\f1fb";
}
.fa-paint-brush:before {
content: "\f1fc";
}
.fa-birthday-cake:before {
content: "\f1fd";
}
.fa-area-chart:before {
content: "\f1fe";
}
.fa-pie-chart:before {
content: "\f200";
}
.fa-line-chart:before {
content: "\f201";
}
.fa-lastfm:before {
content: "\f202";
}
.fa-lastfm-square:before {
content: "\f203";
}
.fa-toggle-off:before {
content: "\f204";
}
.fa-toggle-on:before {
content: "\f205";
}
.fa-bicycle:before {
content: "\f206";
}
.fa-bus:before {
content: "\f207";
}
.fa-ioxhost:before {
content: "\f208";
}
.fa-angellist:before {
content: "\f209";
}
.fa-cc:before {
content: "\f20a";
}
.fa-shekel:before,
.fa-sheqel:before,
.fa-ils:before {
content: "\f20b";
}
.fa-meanpath:before {
content: "\f20c";
}
.fa-buysellads:before {
content: "\f20d";
}
.fa-connectdevelop:before {
content: "\f20e";
}
.fa-dashcube:before {
content: "\f210";
}
.fa-forumbee:before {
content: "\f211";
}
.fa-leanpub:before {
content: "\f212";
}
.fa-sellsy:before {
content: "\f213";
}
.fa-shirtsinbulk:before {
content: "\f214";
}
.fa-simplybuilt:before {
content: "\f215";
}
.fa-skyatlas:before {
content: "\f216";
}
.fa-cart-plus:before {
content: "\f217";
}
.fa-cart-arrow-down:before {
content: "\f218";
}
.fa-diamond:before {
content: "\f219";
}
.fa-ship:before {
content: "\f21a";
}
.fa-user-secret:before {
content: "\f21b";
}
.fa-motorcycle:before {
content: "\f21c";
}
.fa-street-view:before {
content: "\f21d";
}
.fa-heartbeat:before {
content: "\f21e";
}
.fa-venus:before {
content: "\f221";
}
.fa-mars:before {
content: "\f222";
}
.fa-mercury:before {
content: "\f223";
}
.fa-intersex:before,
.fa-transgender:before {
content: "\f224";
}
.fa-transgender-alt:before {
content: "\f225";
}
.fa-venus-double:before {
content: "\f226";
}
.fa-mars-double:before {
content: "\f227";
}
.fa-venus-mars:before {
content: "\f228";
}
.fa-mars-stroke:before {
content: "\f229";
}
.fa-mars-stroke-v:before {
content: "\f22a";
}
.fa-mars-stroke-h:before {
content: "\f22b";
}
.fa-neuter:before {
content: "\f22c";
}
.fa-genderless:before {
content: "\f22d";
}
.fa-facebook-official:before {
content: "\f230";
}
.fa-pinterest-p:before {
content: "\f231";
}
.fa-whatsapp:before {
content: "\f232";
}
.fa-server:before {
content: "\f233";
}
.fa-user-plus:before {
content: "\f234";
}
.fa-user-times:before {
content: "\f235";
}
.fa-hotel:before,
.fa-bed:before {
content: "\f236";
}
.fa-viacoin:before {
content: "\f237";
}
.fa-train:before {
content: "\f238";
}
.fa-subway:before {
content: "\f239";
}
.fa-medium:before {
content: "\f23a";
}
.fa-yc:before,
.fa-y-combinator:before {
content: "\f23b";
}
.fa-optin-monster:before {
content: "\f23c";
}
.fa-opencart:before {
content: "\f23d";
}
.fa-expeditedssl:before {
content: "\f23e";
}
.fa-battery-4:before,
.fa-battery:before,
.fa-battery-full:before {
content: "\f240";
}
.fa-battery-3:before,
.fa-battery-three-quarters:before {
content: "\f241";
}
.fa-battery-2:before,
.fa-battery-half:before {
content: "\f242";
}
.fa-battery-1:before,
.fa-battery-quarter:before {
content: "\f243";
}
.fa-battery-0:before,
.fa-battery-empty:before {
content: "\f244";
}
.fa-mouse-pointer:before {
content: "\f245";
}
.fa-i-cursor:before {
content: "\f246";
}
.fa-object-group:before {
content: "\f247";
}
.fa-object-ungroup:before {
content: "\f248";
}
.fa-sticky-note:before {
content: "\f249";
}
.fa-sticky-note-o:before {
content: "\f24a";
}
.fa-cc-jcb:before {
content: "\f24b";
}
.fa-cc-diners-club:before {
content: "\f24c";
}
.fa-clone:before {
content: "\f24d";
}
.fa-balance-scale:before {
content: "\f24e";
}
.fa-hourglass-o:before {
content: "\f250";
}
.fa-hourglass-1:before,
.fa-hourglass-start:before {
content: "\f251";
}
.fa-hourglass-2:before,
.fa-hourglass-half:before {
content: "\f252";
}
.fa-hourglass-3:before,
.fa-hourglass-end:before {
content: "\f253";
}
.fa-hourglass:before {
content: "\f254";
}
.fa-hand-grab-o:before,
.fa-hand-rock-o:before {
content: "\f255";
}
.fa-hand-stop-o:before,
.fa-hand-paper-o:before {
content: "\f256";
}
.fa-hand-scissors-o:before {
content: "\f257";
}
.fa-hand-lizard-o:before {
content: "\f258";
}
.fa-hand-spock-o:before {
content: "\f259";
}
.fa-hand-pointer-o:before {
content: "\f25a";
}
.fa-hand-peace-o:before {
content: "\f25b";
}
.fa-trademark:before {
content: "\f25c";
}
.fa-registered:before {
content: "\f25d";
}
.fa-creative-commons:before {
content: "\f25e";
}
.fa-gg:before {
content: "\f260";
}
.fa-gg-circle:before {
content: "\f261";
}
.fa-tripadvisor:before {
content: "\f262";
}
.fa-odnoklassniki:before {
content: "\f263";
}
.fa-odnoklassniki-square:before {
content: "\f264";
}
.fa-get-pocket:before {
content: "\f265";
}
.fa-wikipedia-w:before {
content: "\f266";
}
.fa-safari:before {
content: "\f267";
}
.fa-chrome:before {
content: "\f268";
}
.fa-firefox:before {
content: "\f269";
}
.fa-opera:before {
content: "\f26a";
}
.fa-internet-explorer:before {
content: "\f26b";
}
.fa-tv:before,
.fa-television:before {
content: "\f26c";
}
.fa-contao:before {
content: "\f26d";
}
.fa-500px:before {
content: "\f26e";
}
.fa-amazon:before {
content: "\f270";
}
.fa-calendar-plus-o:before {
content: "\f271";
}
.fa-calendar-minus-o:before {
content: "\f272";
}
.fa-calendar-times-o:before {
content: "\f273";
}
.fa-calendar-check-o:before {
content: "\f274";
}
.fa-industry:before {
content: "\f275";
}
.fa-map-pin:before {
content: "\f276";
}
.fa-map-signs:before {
content: "\f277";
}
.fa-map-o:before {
content: "\f278";
}
.fa-map:before {
content: "\f279";
}
.fa-commenting:before {
content: "\f27a";
}
.fa-commenting-o:before {
content: "\f27b";
}
.fa-houzz:before {
content: "\f27c";
}
.fa-vimeo:before {
content: "\f27d";
}
.fa-black-tie:before {
content: "\f27e";
}
.fa-fonticons:before {
content: "\f280";
}
.fa-reddit-alien:before {
content: "\f281";
}
.fa-edge:before {
content: "\f282";
}
.fa-credit-card-alt:before {
content: "\f283";
}
.fa-codiepie:before {
content: "\f284";
}
.fa-modx:before {
content: "\f285";
}
.fa-fort-awesome:before {
content: "\f286";
}
.fa-usb:before {
content: "\f287";
}
.fa-product-hunt:before {
content: "\f288";
}
.fa-mixcloud:before {
content: "\f289";
}
.fa-scribd:before {
content: "\f28a";
}
.fa-pause-circle:before {
content: "\f28b";
}
.fa-pause-circle-o:before {
content: "\f28c";
}
.fa-stop-circle:before {
content: "\f28d";
}
.fa-stop-circle-o:before {
content: "\f28e";
}
.fa-shopping-bag:before {
content: "\f290";
}
.fa-shopping-basket:before {
content: "\f291";
}
.fa-hashtag:before {
content: "\f292";
}
.fa-bluetooth:before {
content: "\f293";
}
.fa-bluetooth-b:before {
content: "\f294";
}
.fa-percent:before {
content: "\f295";
}
.fa-gitlab:before {
content: "\f296";
}
.fa-wpbeginner:before {
content: "\f297";
}
.fa-wpforms:before {
content: "\f298";
}
.fa-envira:before {
content: "\f299";
}
.fa-universal-access:before {
content: "\f29a";
}
.fa-wheelchair-alt:before {
content: "\f29b";
}
.fa-question-circle-o:before {
content: "\f29c";
}
.fa-blind:before {
content: "\f29d";
}
.fa-audio-description:before {
content: "\f29e";
}
.fa-volume-control-phone:before {
content: "\f2a0";
}
.fa-braille:before {
content: "\f2a1";
}
.fa-assistive-listening-systems:before {
content: "\f2a2";
}
.fa-asl-interpreting:before,
.fa-american-sign-language-interpreting:before {
content: "\f2a3";
}
.fa-deafness:before,
.fa-hard-of-hearing:before,
.fa-deaf:before {
content: "\f2a4";
}
.fa-glide:before {
content: "\f2a5";
}
.fa-glide-g:before {
content: "\f2a6";
}
.fa-signing:before,
.fa-sign-language:before {
content: "\f2a7";
}
.fa-low-vision:before {
content: "\f2a8";
}
.fa-viadeo:before {
content: "\f2a9";
}
.fa-viadeo-square:before {
content: "\f2aa";
}
.fa-snapchat:before {
content: "\f2ab";
}
.fa-snapchat-ghost:before {
content: "\f2ac";
}
.fa-snapchat-square:before {
content: "\f2ad";
}
.fa-pied-piper:before {
content: "\f2ae";
}
.fa-first-order:before {
content: "\f2b0";
}
.fa-yoast:before {
content: "\f2b1";
}
.fa-themeisle:before {
content: "\f2b2";
}
.fa-google-plus-circle:before,
.fa-google-plus-official:before {
content: "\f2b3";
}
.fa-fa:before,
.fa-font-awesome:before {
content: "\f2b4";
}
.fa-handshake-o:before {
content: "\f2b5";
}
.fa-envelope-open:before {
content: "\f2b6";
}
.fa-envelope-open-o:before {
content: "\f2b7";
}
.fa-linode:before {
content: "\f2b8";
}
.fa-address-book:before {
content: "\f2b9";
}
.fa-address-book-o:before {
content: "\f2ba";
}
.fa-vcard:before,
.fa-address-card:before {
content: "\f2bb";
}
.fa-vcard-o:before,
.fa-address-card-o:before {
content: "\f2bc";
}
.fa-user-circle:before {
content: "\f2bd";
}
.fa-user-circle-o:before {
content: "\f2be";
}
.fa-user-o:before {
content: "\f2c0";
}
.fa-id-badge:before {
content: "\f2c1";
}
.fa-drivers-license:before,
.fa-id-card:before {
content: "\f2c2";
}
.fa-drivers-license-o:before,
.fa-id-card-o:before {
content: "\f2c3";
}
.fa-quora:before {
content: "\f2c4";
}
.fa-free-code-camp:before {
content: "\f2c5";
}
.fa-telegram:before {
content: "\f2c6";
}
.fa-thermometer-4:before,
.fa-thermometer:before,
.fa-thermometer-full:before {
content: "\f2c7";
}
.fa-thermometer-3:before,
.fa-thermometer-three-quarters:before {
content: "\f2c8";
}
.fa-thermometer-2:before,
.fa-thermometer-half:before {
content: "\f2c9";
}
.fa-thermometer-1:before,
.fa-thermometer-quarter:before {
content: "\f2ca";
}
.fa-thermometer-0:before,
.fa-thermometer-empty:before {
content: "\f2cb";
}
.fa-shower:before {
content: "\f2cc";
}
.fa-bathtub:before,
.fa-s15:before,
.fa-bath:before {
content: "\f2cd";
}
.fa-podcast:before {
content: "\f2ce";
}
.fa-window-maximize:before {
content: "\f2d0";
}
.fa-window-minimize:before {
content: "\f2d1";
}
.fa-window-restore:before {
content: "\f2d2";
}
.fa-times-rectangle:before,
.fa-window-close:before {
content: "\f2d3";
}
.fa-times-rectangle-o:before,
.fa-window-close-o:before {
content: "\f2d4";
}
.fa-bandcamp:before {
content: "\f2d5";
}
.fa-grav:before {
content: "\f2d6";
}
.fa-etsy:before {
content: "\f2d7";
}
.fa-imdb:before {
content: "\f2d8";
}
.fa-ravelry:before {
content: "\f2d9";
}
.fa-eercast:before {
content: "\f2da";
}
.fa-microchip:before {
content: "\f2db";
}
.fa-snowflake-o:before {
content: "\f2dc";
}
.fa-superpowers:before {
content: "\f2dd";
}
.fa-wpexplorer:before {
content: "\f2de";
}
.fa-meetup:before {
content: "\f2e0";
}
.sr-only {
position: absolute;
width: 1px;
height: 1px;
padding: 0;
margin: -1px;
overflow: hidden;
clip: rect(0, 0, 0, 0);
border: 0;
}
.sr-only-focusable:active,
.sr-only-focusable:focus {
position: static;
width: auto;
height: auto;
margin: 0;
overflow: visible;
clip: auto;
}
| {
"pile_set_name": "Github"
} |
/*
Copyright 2008-2013 ITACA-TSB, http://www.tsb.upv.es/
Instituto Tecnologico de Aplicaciones de Comunicacion
Avanzadas - Grupo Tecnologias para la Salud y el
Bienestar (TSB)
See the NOTICE file distributed with this work for additional
information regarding copyright ownership
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package org.bubblecloud.zigbee.network.packet.system;
import org.bubblecloud.zigbee.network.packet.ZToolCMD;
import org.bubblecloud.zigbee.network.packet.ZToolPacket;
import org.bubblecloud.zigbee.util.DoubleByte;
/**
* @author <a href="mailto:[email protected]">Alvaro Fides Valero</a>
* @version $LastChangedRevision: 799 $ ($LastChangedDate: 2013-08-06 19:00:05 +0300 (Tue, 06 Aug 2013) $)
*/
public class SYS_OSAL_NV_WRITE extends ZToolPacket /*implements IREQUEST,ISYSTEM*/ {
/// <name>TI.ZPI2.SYS_OSAL_NV_WRITE.Id</name>
/// <summary>indicates the PROFILE_ID_HOME_AUTOMATION of the NV item to be written.</summary>
public DoubleByte Id;
/// <name>TI.ZPI2.SYS_OSAL_NV_WRITE.Len</name>
/// <summary>Number of bytes in item (up to 250)</summary>
public int Len;
/// <name>TI.ZPI2.SYS_OSAL_NV_WRITE.Offset</name>
/// <summary>Memory offset into item (up to 250)</summary>
public int Offset;
/// <name>TI.ZPI2.SYS_OSAL_NV_WRITE.Value</name>
/// <summary>Dynamic array, requires memory allocation. Contains the data value that is to be written to the location.</summary>
public int[] Value;
/// <name>TI.ZPI2.SYS_OSAL_NV_WRITE</name>
/// <summary>Constructor</summary>
public SYS_OSAL_NV_WRITE() {
this.Value = new int[0xff];
}
/// <name>TI.ZPI2.SYS_OSAL_NV_WRITE</name>
/// <summary>Constructor</summary>
public SYS_OSAL_NV_WRITE(DoubleByte num1, int num2, int num3, int[] buffer1) {
this.Id = num1;
this.Offset = num2;
this.Len = num3;
this.Value = new int[buffer1.length];
this.Value = buffer1;
/*if (buffer1.Length > 0xff)
{
throw new Exception("Error creating object.");
}
this.Value = new byte[0xff];
Array.Copy(buffer1, this.Value, buffer1.Length);*/
int[] framedata = new int[buffer1.length + 4];
framedata[0] = this.Id.getLsb();
framedata[1] = this.Id.getMsb();
framedata[2] = this.Offset;
framedata[3] = this.Len;
for (int i = 4; i < framedata.length; i++) {
framedata[i] = this.Value[i - 4];
}
super.buildPacket(new DoubleByte(ZToolCMD.SYS_OSAL_NV_WRITE), framedata);
}
}
| {
"pile_set_name": "Github"
} |
/****************************************************************************
* arch/mips/src/pic32mx/pic32mx_gpio.c
*
* Copyright (C) 2011 Gregory Nutt. All rights reserved.
* Author: Gregory Nutt <[email protected]>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* 3. Neither the name NuttX nor the names of its contributors may be
* used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
****************************************************************************/
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
#include <stdint.h>
#include <assert.h>
#include <errno.h>
#include <nuttx/irq.h>
#include <nuttx/arch.h>
#include <arch/board/board.h>
#include "mips_arch.h"
#include "chip.h"
#include "pic32mx_ioport.h"
#include "pic32mx.h"
/****************************************************************************
* Pre-processor Definitions
****************************************************************************/
/****************************************************************************
* Public Data
****************************************************************************/
/****************************************************************************
* Private Data
****************************************************************************/
static const uintptr_t g_gpiobase[CHIP_NPORTS] =
{
PIC32MX_IOPORTA_K1BASE
#if CHIP_NPORTS > 1
, PIC32MX_IOPORTB_K1BASE
#endif
#if CHIP_NPORTS > 2
, PIC32MX_IOPORTC_K1BASE
#endif
#if CHIP_NPORTS > 3
, PIC32MX_IOPORTD_K1BASE
#endif
#if CHIP_NPORTS > 4
, PIC32MX_IOPORTE_K1BASE
#endif
#if CHIP_NPORTS > 5
, PIC32MX_IOPORTF_K1BASE
#endif
#if CHIP_NPORTS > 6
, PIC32MX_IOPORTG_K1BASE
#endif
};
/****************************************************************************
* Private Functions
****************************************************************************/
/****************************************************************************
* Name: Inline PIN set field extractors
****************************************************************************/
static inline bool pic32mx_output(uint16_t pinset)
{
return ((pinset & GPIO_OUTPUT) != 0);
}
static inline bool pic32mx_opendrain(uint16_t pinset)
{
return ((pinset & GPIO_MODE_MASK) == GPIO_OPENDRAN);
}
static inline bool pic32mx_outputhigh(uint16_t pinset)
{
return ((pinset & GPIO_VALUE_MASK) != 0);
}
static inline unsigned int pic32mx_portno(uint16_t pinset)
{
return ((pinset & GPIO_PORT_MASK) >> GPIO_PORT_SHIFT);
}
static inline unsigned int pic32mx_pinno(uint16_t pinset)
{
return ((pinset & GPIO_PIN_MASK) >> GPIO_PIN_SHIFT);
}
#if defined(CHIP_PIC32MX1) || defined(CHIP_PIC32MX2)
static inline unsigned int pic32mx_analog(uint16_t pinset)
{
return ((pinset & GPIO_ANALOG_MASK) != 0);
}
#else
# define pic32mx_analog(pinset) (false)
#endif
/****************************************************************************
* Public Functions
****************************************************************************/
/****************************************************************************
* Name: pic32mx_configgpio
*
* Description:
* Configure a GPIO pin based on bit-encoded description of the pin (the
* interrupt will be configured when pic32mx_attach() is called.
*
* Returned Value:
* OK on success; negated errno on failure.
*
****************************************************************************/
int pic32mx_configgpio(uint16_t cfgset)
{
unsigned int port = pic32mx_portno(cfgset);
unsigned int pin = pic32mx_pinno(cfgset);
uint32_t mask = (1 << pin);
uintptr_t base;
/* Verify that the port number is within range */
if (port < CHIP_NPORTS)
{
/* Get the base address of the ports */
base = g_gpiobase[port];
/* Is this an input or an output? */
sched_lock();
if (pic32mx_output(cfgset))
{
/* Not analog */
#if defined(CHIP_PIC32MX1) || defined(CHIP_PIC32MX2)
putreg32(mask, base + PIC32MX_IOPORT_ANSELCLR_OFFSET);
#endif
/* It is an output; clear the corresponding bit in TRIS register */
putreg32(mask, base + PIC32MX_IOPORT_TRISCLR_OFFSET);
/* Is it an open drain output? */
if (pic32mx_opendrain(cfgset))
{
/* It is an open drain output. Set the corresponding bit in
* the ODC register.
*/
putreg32(mask, base + PIC32MX_IOPORT_ODCSET_OFFSET);
}
else
{
/* Is is a normal output. Clear the corresponding bit in the
* ODC register.
*/
putreg32(mask, base + PIC32MX_IOPORT_ODCCLR_OFFSET);
}
/* Set the initial output value */
pic32mx_gpiowrite(cfgset, pic32mx_outputhigh(cfgset));
}
else
{
/* It is an input; set the corresponding bit in TRIS register. */
putreg32(mask, base + PIC32MX_IOPORT_TRISSET_OFFSET);
putreg32(mask, base + PIC32MX_IOPORT_ODCCLR_OFFSET);
/* Is it an analog input? */
#if defined(CHIP_PIC32MX1) || defined(CHIP_PIC32MX2)
if (pic32mx_analog(cfgset))
{
putreg32(mask, base + PIC32MX_IOPORT_ANSELSET_OFFSET);
}
else
{
putreg32(mask, base + PIC32MX_IOPORT_ANSELCLR_OFFSET);
}
#endif
}
sched_unlock();
return OK;
}
return -EINVAL;
}
/****************************************************************************
* Name: pic32mx_gpiowrite
*
* Description:
* Write one or zero to the selected GPIO pin
*
****************************************************************************/
void pic32mx_gpiowrite(uint16_t pinset, bool value)
{
unsigned int port = pic32mx_portno(pinset);
unsigned int pin = pic32mx_pinno(pinset);
uintptr_t base;
/* Verify that the port number is within range */
if (port < CHIP_NPORTS)
{
/* Get the base address of the ports */
base = g_gpiobase[port];
/* Set or clear the output */
if (value)
{
putreg32(1 << pin, base + PIC32MX_IOPORT_PORTSET_OFFSET);
}
else
{
putreg32(1 << pin, base + PIC32MX_IOPORT_PORTCLR_OFFSET);
}
}
}
/****************************************************************************
* Name: pic32mx_gpioread
*
* Description:
* Read one or zero from the selected GPIO pin
*
****************************************************************************/
bool pic32mx_gpioread(uint16_t pinset)
{
unsigned int port = pic32mx_portno(pinset);
unsigned int pin = pic32mx_pinno(pinset);
uintptr_t base;
/* Verify that the port number is within range */
if (port < CHIP_NPORTS)
{
/* Get the base address of the ports */
base = g_gpiobase[port];
/* Get and return the input value */
return (getreg32(base + PIC32MX_IOPORT_PORT_OFFSET) & (1 << pin)) != 0;
}
return false;
}
/****************************************************************************
* Function: pic32mx_dumpgpio
*
* Description:
* Dump all GPIO registers associated with the provided base address
*
****************************************************************************/
#ifdef CONFIG_DEBUG_GPIO_INFO
void pic32mx_dumpgpio(uint32_t pinset, const char *msg)
{
unsigned int port = pic32mx_portno(pinset);
irqstate_t flags;
uintptr_t base;
/* Verify that the port number is within range */
if (port < CHIP_NPORTS)
{
/* Get the base address of the ports */
base = g_gpiobase[port];
/* The following requires exclusive access to the GPIO registers */
sched_lock();
gpioinfo("IOPORT%c pinset: %04x base: %08x -- %s\n",
'A'+port, pinset, base, msg);
gpioinfo(" TRIS: %08x PORT: %08x LAT: %08x ODC: %08x\n",
getreg32(base + PIC32MX_IOPORT_TRIS_OFFSET),
getreg32(base + PIC32MX_IOPORT_PORT_OFFSET),
getreg32(base + PIC32MX_IOPORT_LAT_OFFSET),
getreg32(base + PIC32MX_IOPORT_ODC_OFFSET));
gpioinfo(" CNCON: %08x CNEN: %08x CNPUE: %08x\n",
getreg32(PIC32MX_IOPORT_CNCON),
getreg32(PIC32MX_IOPORT_CNEN),
getreg32(PIC32MX_IOPORT_CNPUE));
sched_unlock();
}
}
#endif
| {
"pile_set_name": "Github"
} |
/*
* (C) Copyright 2010
* NVIDIA Corporation <www.nvidia.com>
*
* SPDX-License-Identifier: GPL-2.0+
*/
#ifndef __ASM_ARCH_TEGRA_DISPLAY_H
#define __ASM_ARCH_TEGRA_DISPLAY_H
#include <asm/arch/dc.h>
#include <fdtdec.h>
/* This holds information about a window which can be displayed */
struct disp_ctl_win {
enum win_color_depth_id fmt; /* Color depth/format */
unsigned bpp; /* Bits per pixel */
phys_addr_t phys_addr; /* Physical address in memory */
unsigned x; /* Horizontal address offset (bytes) */
unsigned y; /* Veritical address offset (bytes) */
unsigned w; /* Width of source window */
unsigned h; /* Height of source window */
unsigned stride; /* Number of bytes per line */
unsigned out_x; /* Left edge of output window (col) */
unsigned out_y; /* Top edge of output window (row) */
unsigned out_w; /* Width of output window in pixels */
unsigned out_h; /* Height of output window in pixels */
};
#define FDT_LCD_TIMINGS 4
enum {
FDT_LCD_TIMING_REF_TO_SYNC,
FDT_LCD_TIMING_SYNC_WIDTH,
FDT_LCD_TIMING_BACK_PORCH,
FDT_LCD_TIMING_FRONT_PORCH,
FDT_LCD_TIMING_COUNT,
};
enum lcd_cache_t {
FDT_LCD_CACHE_OFF = 0,
FDT_LCD_CACHE_WRITE_THROUGH = 1 << 0,
FDT_LCD_CACHE_WRITE_BACK = 1 << 1,
FDT_LCD_CACHE_FLUSH = 1 << 2,
FDT_LCD_CACHE_WRITE_BACK_FLUSH = FDT_LCD_CACHE_WRITE_BACK |
FDT_LCD_CACHE_FLUSH,
};
/* Information about the display controller */
struct fdt_disp_config {
int valid; /* config is valid */
int width; /* width in pixels */
int height; /* height in pixels */
int bpp; /* number of bits per pixel */
/*
* log2 of number of bpp, in general, unless it bpp is 24 in which
* case this field holds 24 also! This is a U-Boot thing.
*/
int log2_bpp;
struct disp_ctlr *disp; /* Display controller to use */
fdt_addr_t frame_buffer; /* Address of frame buffer */
unsigned pixel_clock; /* Pixel clock in Hz */
uint horiz_timing[FDT_LCD_TIMING_COUNT]; /* Horizontal timing */
uint vert_timing[FDT_LCD_TIMING_COUNT]; /* Vertical timing */
int panel_node; /* node offset of panel information */
};
/* Information about the LCD panel */
struct fdt_panel_config {
int pwm_channel; /* PWM channel to use for backlight */
enum lcd_cache_t cache_type;
struct fdt_gpio_state backlight_en; /* GPIO for backlight enable */
struct fdt_gpio_state lvds_shutdown; /* GPIO for lvds shutdown */
struct fdt_gpio_state backlight_vdd; /* GPIO for backlight vdd */
struct fdt_gpio_state panel_vdd; /* GPIO for panel vdd */
/*
* Panel required timings
* Timing 1: delay between panel_vdd-rise and data-rise
* Timing 2: delay between data-rise and backlight_vdd-rise
* Timing 3: delay between backlight_vdd and pwm-rise
* Timing 4: delay between pwm-rise and backlight_en-rise
*/
uint panel_timings[FDT_LCD_TIMINGS];
};
/**
* Register a new display based on device tree configuration.
*
* The frame buffer can be positioned by U-Boot or overriden by the fdt.
* You should pass in the U-Boot address here, and check the contents of
* struct fdt_disp_config to see what was actually chosen.
*
* @param blob Device tree blob
* @param default_lcd_base Default address of LCD frame buffer
* @return 0 if ok, -1 on error (unsupported bits per pixel)
*/
int tegra_display_probe(const void *blob, void *default_lcd_base);
/**
* Return the current display configuration
*
* @return pointer to display configuration, or NULL if there is no valid
* config
*/
struct fdt_disp_config *tegra_display_get_config(void);
/**
* Perform the next stage of the LCD init if it is time to do so.
*
* LCD init can be time-consuming because of the number of delays we need
* while waiting for the backlight power supply, etc. This function can
* be called at various times during U-Boot operation to advance the
* initialization of the LCD to the next stage if sufficient time has
* passed since the last stage. It keeps track of what stage it is up to
* and the time that it is permitted to move to the next stage.
*
* The final call should have wait=1 to complete the init.
*
* @param blob fdt blob containing LCD information
* @param wait 1 to wait until all init is complete, and then return
* 0 to return immediately, potentially doing nothing if it is
* not yet time for the next init.
*/
int tegra_lcd_check_next_stage(const void *blob, int wait);
/**
* Set up the maximum LCD size so we can size the frame buffer.
*
* @param blob fdt blob containing LCD information
*/
void tegra_lcd_early_init(const void *blob);
#endif /*__ASM_ARCH_TEGRA_DISPLAY_H*/
| {
"pile_set_name": "Github"
} |
.wrapper{ width: 424px;margin: 10px auto; zoom:1;position: relative}
.tabbody{height:225px;}
.tabbody .panel { position: absolute;width:100%; height:100%;background: #fff; display: none;}
.tabbody .focus { display: block;}
body{font-size: 12px;color: #888;overflow: hidden;}
input,label{vertical-align:middle}
.clear{clear: both;}
.pl{padding-left: 18px;padding-left: 23px\9;}
#imageList {width: 420px;height: 215px;margin-top: 10px;overflow: hidden;overflow-y: auto;}
#imageList div {float: left;width: 100px;height: 95px;margin: 5px 10px;}
#imageList img {cursor: pointer;border: 2px solid white;}
.bgarea{margin: 10px;padding: 5px;height: 84%;border: 1px solid #A8A297;}
.content div{margin: 10px 0 10px 5px;}
.content .iptradio{margin: 0px 5px 5px 0px;}
.txt{width:280px;}
.wrapcolor{height: 19px;}
div.color{float: left;margin: 0;}
#colorPicker{width: 17px;height: 17px;border: 1px solid #CCC;display: inline-block;border-radius: 3px;box-shadow: 2px 2px 5px #D3D6DA;margin: 0;float: left;}
div.alignment,#custom{margin-left: 23px;margin-left: 28px\9;}
#custom input{height: 15px;min-height: 15px;width:20px;}
#repeatType{width:100px;}
/* 图片管理样式 */
#imgManager {
width: 100%;
height: 225px;
}
#imgManager #imageList{
width: 100%;
overflow-x: hidden;
overflow-y: auto;
}
#imgManager ul {
display: block;
list-style: none;
margin: 0;
padding: 0;
}
#imgManager li {
float: left;
display: block;
list-style: none;
padding: 0;
width: 113px;
height: 113px;
margin: 9px 0 0 19px;
background-color: #eee;
overflow: hidden;
cursor: pointer;
position: relative;
}
#imgManager li.clearFloat {
float: none;
clear: both;
display: block;
width:0;
height:0;
margin: 0;
padding: 0;
}
#imgManager li img {
cursor: pointer;
}
#imgManager li .icon {
cursor: pointer;
width: 113px;
height: 113px;
position: absolute;
top: 0;
left: 0;
z-index: 2;
border: 0;
background-repeat: no-repeat;
}
#imgManager li .icon:hover {
width: 107px;
height: 107px;
border: 3px solid #1094fa;
}
#imgManager li.selected .icon {
background-image: url(images/success.png);
background-position: 75px 75px;
}
#imgManager li.selected .icon:hover {
width: 107px;
height: 107px;
border: 3px solid #1094fa;
background-position: 72px 72px;
} | {
"pile_set_name": "Github"
} |
\name{Hurricanes}
\alias{Hurricanes}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Hurricane fatalities and gender of names}
\description{
Data used in Jung et al 2014 analysis of effect of gender of name on hurricane fatalities. Note that hurricanes Katrina (2005) and Audrey (1957) were removed from the data.
}
\usage{
data(Hurricanes)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
}
\format{
\enumerate{
\item name : Given name of hurricane
\item year : Year of hurricane
\item deaths : number of deaths
\item category : Severity code for storm
\item min_pressure : Minimum pressure, a measure of storm strength; low is stronger
\item damage_norm : Normalized estimate of damage in dollars
\item female : Indicator variable for female name
\item femininity : 1-11 scale from totally masculine (1) to totally feminine (11) for name. Average of 9 scores from 9 raters.
}
}
\value{
}
\references{Jung et al. 2014. Female hurricanes are deadlier than male hurricanes. PNAS.}
\author{}
\seealso{}
\examples{
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ }
| {
"pile_set_name": "Github"
} |
# [SwiftUI](https://developer.apple.com/xcode/swiftui/)
## Links
- [SwiftUI Essentials WWDC 2019](https://developer.apple.com/videos/play/wwdc2019/216/)
- [SwiftUI Combine Example](https://github.com/ra1028/SwiftUI-Combine) - Example project of SwiftUI and Combine using GitHub API.
- [SwiftUI by Example](https://www.hackingwithswift.com/quick-start/swiftui/)
- [Evolution discussion of the new DSL feature behind SwiftUI (2019)](https://forums.swift.org/t/important-evolution-discussion-of-the-new-dsl-feature-behind-swiftui/25168/12)
- [About SwiftUI](https://github.com/Juanpe/About-SwiftUI) - Gathering all info published, both by Apple and by others, about new framework SwiftUI.
- [SwiftUI Cheat Sheet](https://github.com/SimpleBoilerplates/SwiftUI-Cheat-Sheet)
- [SwiftUI Framework Learning and Usage Guide](https://github.com/Jinxiansen/SwiftUI)
- [SwiftUI Examples Projects](https://github.com/ivanvorobei/SwiftUI)
- [Answers to the most common questions about SwiftUI (2019)](https://wwdcbysundell.com/2019/swiftui-common-questions/)
- [Awesome SwiftUI](https://github.com/vlondon/awesome-swiftui)
- [Awesome SwiftUI 2](https://github.com/ygit/swiftui)
- [Awesome SwiftUI 3](https://github.com/chinsyo/awesome-swiftui)
- [MovieSwiftUI](https://github.com/Dimillian/MovieSwiftUI) - SwiftUI & Combine app using MovieDB API. With a custom Flux (Redux) implementation.
- [Swift Playground explaining the concepts of the new Combine framework](https://github.com/AvdLee/CombineSwiftPlayground)
- [SwiftWebUI](https://github.com/swiftwebui/SwiftWebUI) - Demo implementation of SwiftUI for the Web.
- [The missing ☑️: SwiftWebUI](http://www.alwaysrightinstitute.com/swiftwebui/)
- [QGrid](https://github.com/Q-Mobile/QGrid) - The missing SwiftUI collection view.
- [DesignCode SwiftUI](https://github.com/mythxn/DesignCode-SwiftUI) - App fully written in SwiftUI showcasing beautiful design and animations.
- [Introducing SwiftUI Apple Docs](https://developer.apple.com/tutorials/swiftui)
- [Creating and Combining SwiftUI Views](https://developer.apple.com/tutorials/swiftui/creating-and-combining-views)
- [View composition in SwiftUI (2019)](https://mecid.github.io/2019/10/30/view-composition-in-swiftui/)
- [You have to change mindset to use SwiftUI (2019)](https://swiftwithmajid.com/2019/11/19/you-have-to-change-mindset-to-use-swiftui/)
- [Recipes app written in SwiftUI using Single State Container](https://github.com/mecid/swiftui-recipes-app)
- [WaterfallGrid](https://github.com/paololeonardi/WaterfallGrid) - Waterfall grid layout view for SwiftUI.
- [SwiftUI NewsReader](https://github.com/basememara/SwiftUI-NewsReader) - Yet another SwiftUI example.
- [OpenSwiftUI](https://github.com/Cosmo/OpenSwiftUI) - OpenSource implementation of Apple's SwiftUI DSL.
- [SwiftHIG](https://github.com/AustinConlon/SwiftHIG) - Links to the relevant Human Interface Guidelines for SwiftUI affordances.
- [Clean Architecture for SwiftUI + Combine](https://github.com/nalexn/clean-architecture-swiftui)
- [You can use SwiftUI today (2020)](https://rambo.codes/swiftui/2020/01/03/you-can-use-swiftui-today.html)
- [Answering the big question: should you learn SwiftUI, UIKit, or both? (2019)](https://www.hackingwithswift.com/quick-start/swiftui/answering-the-big-question-should-you-learn-swiftui-uikit-or-both)
- [ViewInspector for SwiftUI](https://github.com/nalexn/ViewInspector) - Framework for runtime inspection and unit testing of SwiftUI views.
- [SwiftUI Online Playground](https://github.com/kishikawakatsumi/swiftui-playground)
- [Learn SwiftUI for iOS 13 course](https://designcode.io/swiftui2) - Learn how to code custom UIs, animations, gestures and components in Xcode 11.
- [SwiftUIEmbedded](https://github.com/Cosmo/SwiftUIEmbedded) - Implementation of SwiftUI (based on OpenSwiftUI) for embedded and Linux devices.
- [Build A Networking Client in SwiftUI](https://github.com/gtokman/SwiftUI-Networking)
- [WebView](https://github.com/kylehickinson/SwiftUI-WebView) - SwiftUI component to use WKWebView.
- [DeLong ◇ SwiftUI ◇ presentation (2020)](https://www.youtube.com/watch?v=APxrtnxRzwI)
- [Building Bottom sheet in SwiftUI (2020)](https://swiftwithmajid.com/2019/12/11/building-bottom-sheet-in-swiftui/)
- [SwiftUI for Mac (2019)](https://troz.net/post/2019/swiftui-for-mac-1/)
- [SwiftUI basic Shape operations (2020)](https://sarunw.com/posts/swiftui-basic-shape-operations/)
- [Data in SwiftUI: blog series (2020)](https://sarunw.com/posts/data-in-swiftui-1/) ([HN](https://news.ycombinator.com/item?id=22325735))
- [SwiftUIX](https://github.com/SwiftUIX/SwiftUIX) - Extension to the standard SwiftUI library.
- [Composable Architecture](https://www.pointfree.co/collections/composable-architecture)
- [Cross-platform Reddit client built in SwiftUI](https://github.com/carson-katri/reddit-swiftui)
- [A guide to the SwiftUI layout system (2020)](https://swiftbysundell.com/articles/swiftui-layout-system-guide-part-1/)
- [SwiftUI Architectures: Model-View, Redux & ViewState MVVM](https://github.com/quickbirdstudios/SwiftUI-Architectures)
- [SwiftUI by Examples](https://github.com/artemnovichkov/SwiftUI-by-Examples)
- [Introspect for SwiftUI](https://github.com/siteline/SwiftUI-Introspect) - Allows you to get the underlying UIKit or AppKit element of a SwiftUI view.
- [Suitcase](https://github.com/Impedimenta/Suitcase) - Command line tool that can be "programmed" to display a SwiftUI interface that can trigger commands and scripts. ([HN](https://news.ycombinator.com/item?id=23018209))
- [SwiftUI Airtable Demo](https://github.com/zackshapiro/SwiftUIAirtableDemo) ([HN](https://news.ycombinator.com/item?id=23106155))
- [Build a SwiftUI Animal Crossing Application (2020)](https://medium.com/better-programming/build-a-swiftui-animal-crossing-application-part-1-aaf3528c1df)
- [Free guide: Architecting SwiftUI apps with MVC and MVVM](https://matteomanferdini.com/)
- [SwiftUI: Running a Mac App Without an Xcode Project (2020)](https://www.objc.io/blog/2020/05/19/swiftui-without-an-xcodeproj/)
- [Stanford: Developing Applications for iOS using SwiftUI (2020)](https://www.youtube.com/watch?v=jbtqIBpUG7g)
- [Preview SwiftUI layouts using Emacs org blocks (2020)](http://xenodium.com/swiftui-layout-previews-using-emacs-org-blocks/)
- [Getting the most out of Xcode Previews for SwiftUI (2020)](https://www.swiftbysundell.com/articles/getting-the-most-out-of-xcode-previews/)
- [Recreate](https://recreatecode.substack.com/) - Video series about recreating popular UI with SwiftUI.
- [Understanding Property Wrappers in SwiftUI (2019)](https://swiftwithmajid.com/2019/06/12/understanding-property-wrappers-in-swiftui/)
- [SwiftWebUI and WASM](https://github.com/carson-katri/SwiftWebUI) - Fork of the incredible SwiftWebUI to support WebAssembly via swiftwasm.
- [Scripts to make working with SwiftWebUI and WASM easier](https://github.com/carson-katri/swiftwebui-scripts)
- [Replicating complex UI using SwiftUI](https://github.com/exyte/replicating)
- [SwiftUI Suitability for MacOS App Development (2020)](https://ds9soft.com/blog/2020/06/swiftui-suitability-for-macos-app-development/)
- [What’s new in SwiftUI for iOS 14](https://www.hackingwithswift.com/articles/221/whats-new-in-swiftui-for-ios-14)
- [State and Data Flow](https://developer.apple.com/documentation/swiftui/state-and-data-flow)
- [What’s the difference between @StateObject and @ObservedObject? (2020)](https://www.donnywals.com/whats-the-difference-between-stateobject-and-observedobject/) ([Tweet](https://twitter.com/DonnyWals/status/1275451776586506242))
- [What's new in SwiftUI for iOS 14 – Extended Edition (2020)](https://www.youtube.com/watch?v=-h8pk2pe7Xo)
- [Inline wrapping of UIKit or AppKit views within SwiftUI (2020)](https://www.swiftbysundell.com/tips/inline-wrapping-of-uikit-or-appkit-views-within-swiftui/)
- [SwiftUI mix and match (2020)](https://www.swiftbysundell.com/tips/swiftui-mix-and-match/)
- [All SwiftUI property wrappers explained and compared (2020)](https://www.hackingwithswift.com/quick-start/swiftui/all-swiftui-property-wrappers-explained-and-compared)
- [100 Days of SwiftUI](https://www.hackingwithswift.com/100/swiftui)
- [SwiftUI Inspector](https://swiftui.ai/) - Production-quality code straight from your favorite design tool.
- [Multiplatform Messages app for macOS, iOS, iPadOS in SwiftUI](https://github.com/jordansinger/messages-multiplatform-swiftui-sample)
- [A guide to SwiftUI’s state management system (2020)](https://swiftbysundell.com/articles/swiftui-state-management-guide/)
- [Twitter macOS Big Sur SwiftUI example app](https://github.com/jordansinger/twitter-macos-swiftui-sample)
- [Todo example app for macOS Big Sur built in SwiftUI](https://github.com/jordansinger/todo-macos-swiftui-sample) ([Tweet](https://twitter.com/jsngr/status/1280280911968378882))
- [Learning SwiftUI](https://joeyabanks.io/notes/learning-swift)
- [Rdio macOS SwiftUI example app for Big Sur](https://github.com/jordansinger/rdio-macos-swiftui-sample)
- [CatalystSidebarToolbar](https://github.com/steventroughtonsmith/CatalystSidebarToolbar) - Simple sidebar / NSToolbar example for Catalyst.
- [Thinking in SwiftUI](https://www.objc.io/books/thinking-in-swiftui/) - A Transition Guide.
- [Decision tree for how to define your SwiftUI properties (2020)](https://twitter.com/chriseidhof/status/1280433133813456896)
- [Figma macOS Big Sur example app built in SwiftUI](https://github.com/jordansinger/figma-macos-swiftui-sample)
- [SwiftUI Changelog](https://github.com/twostraws/swiftui-changelog) - Repository to track changes in the SwiftUI generated interface.
- [SwiftUI: Bridging UIKit with ScrollViewReader and DragGesture (2020)](https://www.fivestars.blog/code/section-title-index-swiftui.html)
- [Swift UI Property Wrappers](https://swiftuipropertywrappers.com/) - Deciding when to use @State, @Binding, @StateObject, @ObservedObject, and @EnvironmentObject.
- [RedditOS](https://github.com/Dimillian/RedditOS) - SwiftUI Reddit client for macOS.
- [What is a SwiftUI Property Wrapper (2020)](https://www.christopherbiscardi.com/what-is-a-swift-ui-property-wrapper)
- [Primitive School](https://www.primitive.school/) - Learn SwiftUI for Designers, Sprint Kits, and more.
- [Alignment Guides in SwiftUI (2020)](https://swiftui-lab.com/alignment-guides/)
- [SwiftUI Lab](https://swiftui-lab.com/)
- [Impossible Grids with SwiftUI (2020)](https://swiftui-lab.com/impossible-grids/)
- [SwiftUI Hierarchy List (2020)](https://www.fivestars.blog/code/swiftui-hierarchy-list.html)
- [Using SwiftUI's Spacer to Create Complex Layouts (2019)](https://benmcmahen.com/using-spacer-in-swiftui/)
- [SwiftUI Kit](https://github.com/jordansinger/SwiftUI-Kit) - SwiftUI system components and interactions demo app.
- [Sharing SwiftUI layout information (2020)](https://fivestars.blog/swiftui/swiftui-share-layout-information.html)
- [SwiftUI betas - what changed before 1.0 (2020)](https://mackuba.eu/2020/08/17/swiftui-beta/)
- [The State of SwiftUI (2020)](https://steipete.com/posts/state-of-swiftui/)
- [How Swift 5.3 enhances SwiftUI’s DSL (2020)](https://www.swiftbysundell.com/articles/how-swift-5-3-enhances-swiftui-dsl/) ([HN](https://news.ycombinator.com/item?id=24462511))
- [Going native: SwiftUI from the perspective of a React developer (2020)](https://blog.maximeheckel.com/posts/swiftui-as-react-developer)
- [Debating if I’ll use SwiftUI for my new project. Worried I’ll be a lot slower and will run into issues that force me to redo parts in UIKit (2020)](https://twitter.com/soffes/status/1308149148009472001)
| {
"pile_set_name": "Github"
} |
;; Copyright 2010 the V8 project authors. All rights reserved.
;; Redistribution and use in source and binary forms, with or without
;; modification, are permitted provided that the following conditions are
;; met:
;;
;; * Redistributions of source code must retain the above copyright
;; notice, this list of conditions and the following disclaimer.
;; * Redistributions in binary form must reproduce the above
;; copyright notice, this list of conditions and the following
;; disclaimer in the documentation and/or other materials provided
;; with the distribution.
;; * Neither the name of Google Inc. nor the names of its
;; contributors may be used to endorse or promote products derived
;; from this software without specific prior written permission.
;;
;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
;; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
;; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
;; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
;; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
;; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
;; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
;; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
;; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
;; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;; This is a Scheme script for the Bigloo compiler. Bigloo must be compiled with
;; support for bignums. The compilation of the script can be done as follows:
;; bigloo -static-bigloo -o generate-ten-powers generate-ten-powers.scm
;;
;; Generate approximations of 10^k.
(module gen-ten-powers
(static (class Cached-Fast
v::bignum
e::bint
exact?::bool))
(main my-main))
;;----------------bignum shifts -----------------------------------------------
(define (bit-lshbx::bignum x::bignum by::bint)
(if (<fx by 0)
#z0
(*bx x (exptbx #z2 (fixnum->bignum by)))))
(define (bit-rshbx::bignum x::bignum by::bint)
(if (<fx by 0)
#z0
(/bx x (exptbx #z2 (fixnum->bignum by)))))
;;----------------the actual power generation -------------------------------
;; e should be an indication. it might be too small.
(define (round-n-cut n e nb-bits)
(define max-container (- (bit-lshbx #z1 nb-bits) 1))
(define (round n)
(case *round*
((down) n)
((up)
(+bx n
;; with the -1 it will only round up if the cut off part is
;; non-zero
(-bx (bit-lshbx #z1
(-fx (+fx e nb-bits) 1))
#z1)))
((round)
(+bx n
(bit-lshbx #z1
(-fx (+fx e nb-bits) 2))))))
(let* ((shift (-fx (+fx e nb-bits) 1))
(cut (bit-rshbx (round n) shift))
(exact? (=bx n (bit-lshbx cut shift))))
(if (<=bx cut max-container)
(values cut e exact?)
(round-n-cut n (+fx e 1) nb-bits))))
(define (rounded-/bx x y)
(case *round*
((down) (/bx x y))
((up) (+bx (/bx x y) #z1))
((round) (let ((tmp (/bx (*bx #z2 x) y)))
(if (zerobx? (remainderbx tmp #z2))
(/bx tmp #z2)
(+bx (/bx tmp #z2) #z1))))))
(define (generate-powers from to mantissa-size)
(let* ((nb-bits mantissa-size)
(offset (- from))
(nb-elements (+ (- from) to 1))
(vec (make-vector nb-elements))
(max-container (- (bit-lshbx #z1 nb-bits) 1)))
;; the negative ones. 10^-1, 10^-2, etc.
;; We already know, that we can't be exact, so exact? will always be #f.
;; Basically we will have a ten^i that we will *10 at each iteration. We
;; want to create the matissa of 1/ten^i. However the mantissa must be
;; normalized (start with a 1). -> we have to shift the number.
;; We shift by multiplying with two^e. -> We encode two^e*(1/ten^i) ==
;; two^e/ten^i.
(let loop ((i 1)
(ten^i #z10)
(two^e #z1)
(e 0))
(unless (< (- i) from)
(if (>bx (/bx (*bx #z2 two^e) ten^i) max-container)
;; another shift would make the number too big. We are
;; hence normalized now.
(begin
(vector-set! vec (-fx offset i)
(instantiate::Cached-Fast
(v (rounded-/bx two^e ten^i))
(e (negfx e))
(exact? #f)))
(loop (+fx i 1) (*bx ten^i #z10) two^e e))
(loop i ten^i (bit-lshbx two^e 1) (+fx e 1)))))
;; the positive ones 10^0, 10^1, etc.
;; start with 1.0. mantissa: 10...0 (1 followed by nb-bits-1 bits)
;; -> e = -(nb-bits-1)
;; exact? is true when the container can still hold the complete 10^i
(let loop ((i 0)
(n (bit-lshbx #z1 (-fx nb-bits 1)))
(e (-fx 1 nb-bits)))
(when (<= i to)
(receive (cut e exact?)
(round-n-cut n e nb-bits)
(vector-set! vec (+fx i offset)
(instantiate::Cached-Fast
(v cut)
(e e)
(exact? exact?)))
(loop (+fx i 1) (*bx n #z10) e))))
vec))
(define (print-c powers from to struct-type
cache-name max-distance-name offset-name macro64)
(define (display-power power k)
(with-access::Cached-Fast power (v e exact?)
(let ((tmp-p (open-output-string)))
;; really hackish way of getting the digits
(display (format "~x" v) tmp-p)
(let ((str (close-output-port tmp-p)))
(printf " {~a(0x~a, ~a), ~a, ~a},\n"
macro64
(substring str 0 8)
(substring str 8 16)
e
k)))))
(define (print-powers-reduced n)
(print "static const " struct-type " " cache-name
"(" n ")"
"[] = {")
(let loop ((i 0)
(nb-elements 0)
(last-e 0)
(max-distance 0))
(cond
((>= i (vector-length powers))
(print " };")
(print "static const int " max-distance-name "(" n ") = "
max-distance ";")
(print "// nb elements (" n "): " nb-elements))
(else
(let* ((power (vector-ref powers i))
(e (Cached-Fast-e power)))
(display-power power (+ i from))
(loop (+ i n)
(+ nb-elements 1)
e
(cond
((=fx i 0) max-distance)
((> (- e last-e) max-distance) (- e last-e))
(else max-distance))))))))
(print "// Copyright 2010 the V8 project authors. All rights reserved.")
(print "// ------------ GENERATED FILE ----------------")
(print "// command used:")
(print "// "
(apply string-append (map (lambda (str)
(string-append " " str))
*main-args*))
" // NOLINT")
(print)
(print
"// This file is intended to be included inside another .h or .cc files\n"
"// with the following defines set:\n"
"// GRISU_CACHE_STRUCT: should expand to the name of a struct that will\n"
"// hold the cached powers of ten. Each entry will hold a 64-bit\n"
"// significand, a 16-bit signed binary exponent, and a 16-bit\n"
"// signed decimal exponent. Each entry will be constructed as follows:\n"
"// { significand, binary_exponent, decimal_exponent }.\n"
"// GRISU_CACHE_NAME(i): generates the name for the different caches.\n"
"// The parameter i will be a number in the range 1-20. A cache will\n"
"// hold every i'th element of a full cache. GRISU_CACHE_NAME(1) will\n"
"// thus hold all elements. The higher i the fewer elements it has.\n"
"// Ideally the user should only reference one cache and let the\n"
"// compiler remove the unused ones.\n"
"// GRISU_CACHE_MAX_DISTANCE(i): generates the name for the maximum\n"
"// binary exponent distance between all elements of a given cache.\n"
"// GRISU_CACHE_OFFSET: is used as variable name for the decimal\n"
"// exponent offset. It is equal to -cache[0].decimal_exponent.\n"
"// GRISU_UINT64_C: used to construct 64-bit values in a platform\n"
"// independent way. In order to encode 0x123456789ABCDEF0 the macro\n"
"// will be invoked as follows: GRISU_UINT64_C(0x12345678,9ABCDEF0).\n")
(print)
(print-powers-reduced 1)
(print-powers-reduced 2)
(print-powers-reduced 3)
(print-powers-reduced 4)
(print-powers-reduced 5)
(print-powers-reduced 6)
(print-powers-reduced 7)
(print-powers-reduced 8)
(print-powers-reduced 9)
(print-powers-reduced 10)
(print-powers-reduced 11)
(print-powers-reduced 12)
(print-powers-reduced 13)
(print-powers-reduced 14)
(print-powers-reduced 15)
(print-powers-reduced 16)
(print-powers-reduced 17)
(print-powers-reduced 18)
(print-powers-reduced 19)
(print-powers-reduced 20)
(print "static const int GRISU_CACHE_OFFSET = " (- from) ";"))
;;----------------main --------------------------------------------------------
(define *main-args* #f)
(define *mantissa-size* #f)
(define *dest* #f)
(define *round* #f)
(define *from* #f)
(define *to* #f)
(define (my-main args)
(set! *main-args* args)
(args-parse (cdr args)
(section "Help")
(("?") (args-parse-usage #f))
((("-h" "--help") (help "?, -h, --help" "This help message"))
(args-parse-usage #f))
(section "Misc")
(("-o" ?file (help "The output file"))
(set! *dest* file))
(("--mantissa-size" ?size (help "Container-size in bits"))
(set! *mantissa-size* (string->number size)))
(("--round" ?direction (help "Round bignums (down, round or up)"))
(set! *round* (string->symbol direction)))
(("--from" ?from (help "start at 10^from"))
(set! *from* (string->number from)))
(("--to" ?to (help "go up to 10^to"))
(set! *to* (string->number to)))
(else
(print "Illegal argument `" else "'. Usage:")
(args-parse-usage #f)))
(when (not *from*)
(error "generate-ten-powers"
"Missing from"
#f))
(when (not *to*)
(error "generate-ten-powers"
"Missing to"
#f))
(when (not *mantissa-size*)
(error "generate-ten-powers"
"Missing mantissa size"
#f))
(when (not (memv *round* '(up down round)))
(error "generate-ten-powers"
"Missing round-method"
*round*))
(let ((dividers (generate-powers *from* *to* *mantissa-size*))
(p (if (not *dest*)
(current-output-port)
(open-output-file *dest*))))
(unwind-protect
(with-output-to-port p
(lambda ()
(print-c dividers *from* *to*
"GRISU_CACHE_STRUCT" "GRISU_CACHE_NAME"
"GRISU_CACHE_MAX_DISTANCE" "GRISU_CACHE_OFFSET"
"GRISU_UINT64_C"
)))
(if *dest*
(close-output-port p)))))
| {
"pile_set_name": "Github"
} |
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package idna
import (
"strings"
"testing"
)
var punycodeTestCases = [...]struct {
s, encoded string
}{
{"", ""},
{"-", "--"},
{"-a", "-a-"},
{"-a-", "-a--"},
{"a", "a-"},
{"a-", "a--"},
{"a-b", "a-b-"},
{"books", "books-"},
{"bücher", "bcher-kva"},
{"Hello世界", "Hello-ck1hg65u"},
{"ü", "tda"},
{"üý", "tdac"},
// The test cases below come from RFC 3492 section 7.1 with Errata 3026.
{
// (A) Arabic (Egyptian).
"\u0644\u064A\u0647\u0645\u0627\u0628\u062A\u0643\u0644" +
"\u0645\u0648\u0634\u0639\u0631\u0628\u064A\u061F",
"egbpdaj6bu4bxfgehfvwxn",
},
{
// (B) Chinese (simplified).
"\u4ED6\u4EEC\u4E3A\u4EC0\u4E48\u4E0D\u8BF4\u4E2D\u6587",
"ihqwcrb4cv8a8dqg056pqjye",
},
{
// (C) Chinese (traditional).
"\u4ED6\u5011\u7232\u4EC0\u9EBD\u4E0D\u8AAA\u4E2D\u6587",
"ihqwctvzc91f659drss3x8bo0yb",
},
{
// (D) Czech.
"\u0050\u0072\u006F\u010D\u0070\u0072\u006F\u0073\u0074" +
"\u011B\u006E\u0065\u006D\u006C\u0075\u0076\u00ED\u010D" +
"\u0065\u0073\u006B\u0079",
"Proprostnemluvesky-uyb24dma41a",
},
{
// (E) Hebrew.
"\u05DC\u05DE\u05D4\u05D4\u05DD\u05E4\u05E9\u05D5\u05D8" +
"\u05DC\u05D0\u05DE\u05D3\u05D1\u05E8\u05D9\u05DD\u05E2" +
"\u05D1\u05E8\u05D9\u05EA",
"4dbcagdahymbxekheh6e0a7fei0b",
},
{
// (F) Hindi (Devanagari).
"\u092F\u0939\u0932\u094B\u0917\u0939\u093F\u0928\u094D" +
"\u0926\u0940\u0915\u094D\u092F\u094B\u0902\u0928\u0939" +
"\u0940\u0902\u092C\u094B\u0932\u0938\u0915\u0924\u0947" +
"\u0939\u0948\u0902",
"i1baa7eci9glrd9b2ae1bj0hfcgg6iyaf8o0a1dig0cd",
},
{
// (G) Japanese (kanji and hiragana).
"\u306A\u305C\u307F\u3093\u306A\u65E5\u672C\u8A9E\u3092" +
"\u8A71\u3057\u3066\u304F\u308C\u306A\u3044\u306E\u304B",
"n8jok5ay5dzabd5bym9f0cm5685rrjetr6pdxa",
},
{
// (H) Korean (Hangul syllables).
"\uC138\uACC4\uC758\uBAA8\uB4E0\uC0AC\uB78C\uB4E4\uC774" +
"\uD55C\uAD6D\uC5B4\uB97C\uC774\uD574\uD55C\uB2E4\uBA74" +
"\uC5BC\uB9C8\uB098\uC88B\uC744\uAE4C",
"989aomsvi5e83db1d2a355cv1e0vak1dwrv93d5xbh15a0dt30a5j" +
"psd879ccm6fea98c",
},
{
// (I) Russian (Cyrillic).
"\u043F\u043E\u0447\u0435\u043C\u0443\u0436\u0435\u043E" +
"\u043D\u0438\u043D\u0435\u0433\u043E\u0432\u043E\u0440" +
"\u044F\u0442\u043F\u043E\u0440\u0443\u0441\u0441\u043A" +
"\u0438",
"b1abfaaepdrnnbgefbadotcwatmq2g4l",
},
{
// (J) Spanish.
"\u0050\u006F\u0072\u0071\u0075\u00E9\u006E\u006F\u0070" +
"\u0075\u0065\u0064\u0065\u006E\u0073\u0069\u006D\u0070" +
"\u006C\u0065\u006D\u0065\u006E\u0074\u0065\u0068\u0061" +
"\u0062\u006C\u0061\u0072\u0065\u006E\u0045\u0073\u0070" +
"\u0061\u00F1\u006F\u006C",
"PorqunopuedensimplementehablarenEspaol-fmd56a",
},
{
// (K) Vietnamese.
"\u0054\u1EA1\u0069\u0073\u0061\u006F\u0068\u1ECD\u006B" +
"\u0068\u00F4\u006E\u0067\u0074\u0068\u1EC3\u0063\u0068" +
"\u1EC9\u006E\u00F3\u0069\u0074\u0069\u1EBF\u006E\u0067" +
"\u0056\u0069\u1EC7\u0074",
"TisaohkhngthchnitingVit-kjcr8268qyxafd2f1b9g",
},
{
// (L) 3<nen>B<gumi><kinpachi><sensei>.
"\u0033\u5E74\u0042\u7D44\u91D1\u516B\u5148\u751F",
"3B-ww4c5e180e575a65lsy2b",
},
{
// (M) <amuro><namie>-with-SUPER-MONKEYS.
"\u5B89\u5BA4\u5948\u7F8E\u6075\u002D\u0077\u0069\u0074" +
"\u0068\u002D\u0053\u0055\u0050\u0045\u0052\u002D\u004D" +
"\u004F\u004E\u004B\u0045\u0059\u0053",
"-with-SUPER-MONKEYS-pc58ag80a8qai00g7n9n",
},
{
// (N) Hello-Another-Way-<sorezore><no><basho>.
"\u0048\u0065\u006C\u006C\u006F\u002D\u0041\u006E\u006F" +
"\u0074\u0068\u0065\u0072\u002D\u0057\u0061\u0079\u002D" +
"\u305D\u308C\u305E\u308C\u306E\u5834\u6240",
"Hello-Another-Way--fc4qua05auwb3674vfr0b",
},
{
// (O) <hitotsu><yane><no><shita>2.
"\u3072\u3068\u3064\u5C4B\u6839\u306E\u4E0B\u0032",
"2-u9tlzr9756bt3uc0v",
},
{
// (P) Maji<de>Koi<suru>5<byou><mae>
"\u004D\u0061\u006A\u0069\u3067\u004B\u006F\u0069\u3059" +
"\u308B\u0035\u79D2\u524D",
"MajiKoi5-783gue6qz075azm5e",
},
{
// (Q) <pafii>de<runba>
"\u30D1\u30D5\u30A3\u30FC\u0064\u0065\u30EB\u30F3\u30D0",
"de-jg4avhby1noc0d",
},
{
// (R) <sono><supiido><de>
"\u305D\u306E\u30B9\u30D4\u30FC\u30C9\u3067",
"d9juau41awczczp",
},
{
// (S) -> $1.00 <-
"\u002D\u003E\u0020\u0024\u0031\u002E\u0030\u0030\u0020" +
"\u003C\u002D",
"-> $1.00 <--",
},
}
func TestPunycode(t *testing.T) {
for _, tc := range punycodeTestCases {
if got, err := decode(tc.encoded); err != nil {
t.Errorf("decode(%q): %v", tc.encoded, err)
} else if got != tc.s {
t.Errorf("decode(%q): got %q, want %q", tc.encoded, got, tc.s)
}
if got, err := encode("", tc.s); err != nil {
t.Errorf(`encode("", %q): %v`, tc.s, err)
} else if got != tc.encoded {
t.Errorf(`encode("", %q): got %q, want %q`, tc.s, got, tc.encoded)
}
}
}
var punycodeErrorTestCases = [...]string{
"decode -", // A sole '-' is invalid.
"decode foo\x00bar", // '\x00' is not in [0-9A-Za-z].
"decode foo#bar", // '#' is not in [0-9A-Za-z].
"decode foo\u00A3bar", // '\u00A3' is not in [0-9A-Za-z].
"decode 9", // "9a" decodes to codepoint \u00A3; "9" is truncated.
"decode 99999a", // "99999a" decodes to codepoint \U0048A3C1, which is > \U0010FFFF.
"decode 9999999999a", // "9999999999a" overflows the int32 calculation.
"encode " + strings.Repeat("x", 65536) + "\uff00", // int32 overflow.
}
func TestPunycodeErrors(t *testing.T) {
for _, tc := range punycodeErrorTestCases {
var err error
switch {
case strings.HasPrefix(tc, "decode "):
_, err = decode(tc[7:])
case strings.HasPrefix(tc, "encode "):
_, err = encode("", tc[7:])
}
if err == nil {
if len(tc) > 256 {
tc = tc[:100] + "..." + tc[len(tc)-100:]
}
t.Errorf("no error for %s", tc)
}
}
}
| {
"pile_set_name": "Github"
} |
/* Post-stack Stolt modeling/migration.
Requires the input to be cosine-transformed over the lateral axes.
August 2014 program of the month:
http://ahay.org/blog/2014/08/03/program-of-the-month-sfstolt/
*/
/*
Copyright (C) 2004 University of Texas at Austin
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <math.h>
#include <rsf.h>
#include "fint1.h"
static float a, b, x, vel;
static float stolt(float w, int iw) {
float sq;
sq = (vel < 0)? w*w - x: w*w + x;
if (sq > 0.) {
sq = sqrtf(sq);
sq = a*w + b*sq;
}
return sq;
}
int main(int argc, char* argv[])
{
fint1 map;
int nt,nx,ny, iw,ix,iy, nf, nw, mute;
float dw, dt, dx,dy, x0,y0, t0, y, st, *trace=NULL, minstr;
sf_file in=NULL, out=NULL;
sf_init (argc,argv);
in = sf_input("in");
out = sf_output("out");
if (!sf_histint(in,"n1",&nt)) sf_error("No n1= in input");
if (!sf_histint(in,"n2",&nx)) nx=1;
if (!sf_histint(in,"n3",&ny)) ny=1;
if (!sf_getfloat("vel",&vel)) sf_error("Need vel=");
/* Constant velocity (use negative velocity for modeling) */
if (!sf_histfloat(in,"d1",&dt)) sf_error("No d1= in input");
if (!sf_histfloat(in,"o1",&t0)) sf_error("No o1= in input");
if (!sf_getint ("pad",&nw)) nw=nt;
/* padding on the time axis */
nw=2*kiss_fft_next_fast_size(nw-1);
sf_cosft_init(nw/2+1);
dw = 2 * SF_PI/(nw*dt);
if (!sf_histfloat(in,"o2",&x0)) x0=0.0;
if (!sf_histfloat(in,"d2",&dx)) sf_error("No d2= in input");
if (!sf_histfloat(in,"o3",&y0)) y0=0.0;
if (!sf_histfloat(in,"d3",&dy)) dy=dx;
x0 *= SF_PI * fabsf (vel);
y0 *= SF_PI * fabsf (vel);
dx *= SF_PI * fabsf (vel);
dy *= SF_PI * fabsf (vel);
if (!sf_getfloat("stretch", &st) && !sf_histfloat(in,"stretch",&st)) st=1.;
/*( stretch=1 Stolt stretch parameter )*/
if (1. != st) sf_warning("stretch=%g",st);
if (vel > 0) st = 2.-st;
a = (1.-1./st);
b = 1./st;
if (!sf_getint("extend",&nf)) nf=4;
/* trace extension */
if (!sf_getint("mute",&mute)) mute=12;
/* mute zone */
if (!sf_getfloat("minstr",&minstr)) minstr=0.0;
/* minimum stretch allowed */
trace = sf_floatalloc(nw);
map = fint1_init (nf, nw, mute);
for (iy = 0; iy < ny; iy++) {
sf_warning("%d of %d;",iy+1,ny);
y = y0+iy*dy;
y *= y;
for (ix = 0; ix < nx; ix++) {
x = x0+ix*dx;
x = st*(x*x + y);
sf_floatread(trace,nt,in);
for (iw = nt; iw < nw; iw++) { /* pad */
trace[iw]=0.;
}
sf_cosft_frw (trace,0,1);
fint1_set(map,trace);
stretch(map,stolt,nw,dw,0.,nw,dw,0.,trace,minstr);
sf_cosft_inv (trace,0,1);
sf_floatwrite(trace,nt,out);
}
}
sf_warning(".");
exit (0);
}
| {
"pile_set_name": "Github"
} |
# created by tools/tclZIC.tcl - do not edit
set TZData(:Asia/Novosibirsk) {
{-9223372036854775808 19900 0 LMT}
{-1579476700 21600 0 +06}
{-1247551200 25200 0 +08}
{354906000 28800 1 +08}
{370713600 25200 0 +07}
{386442000 28800 1 +08}
{402249600 25200 0 +07}
{417978000 28800 1 +08}
{433785600 25200 0 +07}
{449600400 28800 1 +08}
{465332400 25200 0 +07}
{481057200 28800 1 +08}
{496782000 25200 0 +07}
{512506800 28800 1 +08}
{528231600 25200 0 +07}
{543956400 28800 1 +08}
{559681200 25200 0 +07}
{575406000 28800 1 +08}
{591130800 25200 0 +07}
{606855600 28800 1 +08}
{622580400 25200 0 +07}
{638305200 28800 1 +08}
{654634800 25200 0 +07}
{670359600 21600 0 +07}
{670363200 25200 1 +07}
{686088000 21600 0 +06}
{695764800 25200 0 +08}
{701809200 28800 1 +08}
{717534000 25200 0 +07}
{733258800 28800 1 +08}
{738090000 25200 0 +07}
{748987200 21600 0 +06}
{764712000 25200 1 +07}
{780436800 21600 0 +06}
{796161600 25200 1 +07}
{811886400 21600 0 +06}
{828216000 25200 1 +07}
{846360000 21600 0 +06}
{859665600 25200 1 +07}
{877809600 21600 0 +06}
{891115200 25200 1 +07}
{909259200 21600 0 +06}
{922564800 25200 1 +07}
{941313600 21600 0 +06}
{954014400 25200 1 +07}
{972763200 21600 0 +06}
{985464000 25200 1 +07}
{1004212800 21600 0 +06}
{1017518400 25200 1 +07}
{1035662400 21600 0 +06}
{1048968000 25200 1 +07}
{1067112000 21600 0 +06}
{1080417600 25200 1 +07}
{1099166400 21600 0 +06}
{1111867200 25200 1 +07}
{1130616000 21600 0 +06}
{1143316800 25200 1 +07}
{1162065600 21600 0 +06}
{1174766400 25200 1 +07}
{1193515200 21600 0 +06}
{1206820800 25200 1 +07}
{1224964800 21600 0 +06}
{1238270400 25200 1 +07}
{1256414400 21600 0 +06}
{1269720000 25200 1 +07}
{1288468800 21600 0 +06}
{1301169600 25200 0 +07}
{1414263600 21600 0 +06}
{1469304000 25200 0 +07}
}
| {
"pile_set_name": "Github"
} |
path: "tensorflow.keras.layers.SeparableConvolution2D"
tf_class {
is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.convolutional.SeparableConv2D\'>"
is_instance: "<class \'tensorflow.python.layers.convolutional.SeparableConv2D\'>"
is_instance: "<class \'tensorflow.python.layers.convolutional.Conv2D\'>"
is_instance: "<class \'tensorflow.python.layers.convolutional._Conv\'>"
is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.topology.Layer\'>"
is_instance: "<class \'tensorflow.python.layers.base.Layer\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
mtype: "<type \'property\'>"
}
member {
name: "dtype"
mtype: "<type \'property\'>"
}
member {
name: "graph"
mtype: "<type \'property\'>"
}
member {
name: "inbound_nodes"
mtype: "<type \'property\'>"
}
member {
name: "input"
mtype: "<type \'property\'>"
}
member {
name: "input_mask"
mtype: "<type \'property\'>"
}
member {
name: "input_shape"
mtype: "<type \'property\'>"
}
member {
name: "losses"
mtype: "<type \'property\'>"
}
member {
name: "name"
mtype: "<type \'property\'>"
}
member {
name: "non_trainable_variables"
mtype: "<type \'property\'>"
}
member {
name: "non_trainable_weights"
mtype: "<type \'property\'>"
}
member {
name: "outbound_nodes"
mtype: "<type \'property\'>"
}
member {
name: "output"
mtype: "<type \'property\'>"
}
member {
name: "output_mask"
mtype: "<type \'property\'>"
}
member {
name: "output_shape"
mtype: "<type \'property\'>"
}
member {
name: "scope_name"
mtype: "<type \'property\'>"
}
member {
name: "trainable_variables"
mtype: "<type \'property\'>"
}
member {
name: "trainable_weights"
mtype: "<type \'property\'>"
}
member {
name: "updates"
mtype: "<type \'property\'>"
}
member {
name: "variables"
mtype: "<type \'property\'>"
}
member {
name: "weights"
mtype: "<type \'property\'>"
}
member_method {
name: "__init__"
argspec: "args=[\'self\', \'filters\', \'kernel_size\', \'strides\', \'padding\', \'data_format\', \'depth_multiplier\', \'activation\', \'use_bias\', \'depthwise_initializer\', \'pointwise_initializer\', \'bias_initializer\', \'depthwise_regularizer\', \'pointwise_regularizer\', \'bias_regularizer\', \'activity_regularizer\', \'depthwise_constraint\', \'pointwise_constraint\', \'bias_constraint\'], varargs=None, keywords=kwargs, defaults=[\'(1, 1)\', \'valid\', \'None\', \'1\', \'None\', \'True\', \'glorot_uniform\', \'glorot_uniform\', \'zeros\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\'], "
}
member_method {
name: "add_loss"
argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
}
member_method {
name: "add_update"
argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
}
member_method {
name: "add_variable"
argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\'], "
}
member_method {
name: "add_weight"
argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\'], "
}
member_method {
name: "apply"
argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
}
member_method {
name: "build"
argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "call"
argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "compute_mask"
argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
}
member_method {
name: "count_params"
argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "from_config"
argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "get_config"
argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "get_input_at"
argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "get_input_mask_at"
argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "get_input_shape_at"
argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "get_losses_for"
argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "get_output_at"
argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "get_output_mask_at"
argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "get_output_shape_at"
argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "get_updates_for"
argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "get_weights"
argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "set_weights"
argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
}
}
| {
"pile_set_name": "Github"
} |
// SPDX-License-Identifier: GPL-2.0
/*
* linux/fs/lockd/mon.c
*
* The kernel statd client.
*
* Copyright (C) 1996, Olaf Kirch <[email protected]>
*/
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/ktime.h>
#include <linux/slab.h>
#include <linux/sunrpc/clnt.h>
#include <linux/sunrpc/addr.h>
#include <linux/sunrpc/xprtsock.h>
#include <linux/sunrpc/svc.h>
#include <linux/lockd/lockd.h>
#include <asm/unaligned.h>
#include "netns.h"
#define NLMDBG_FACILITY NLMDBG_MONITOR
#define NSM_PROGRAM 100024
#define NSM_VERSION 1
enum {
NSMPROC_NULL,
NSMPROC_STAT,
NSMPROC_MON,
NSMPROC_UNMON,
NSMPROC_UNMON_ALL,
NSMPROC_SIMU_CRASH,
NSMPROC_NOTIFY,
};
struct nsm_args {
struct nsm_private *priv;
u32 prog; /* RPC callback info */
u32 vers;
u32 proc;
char *mon_name;
const char *nodename;
};
struct nsm_res {
u32 status;
u32 state;
};
static const struct rpc_program nsm_program;
static DEFINE_SPINLOCK(nsm_lock);
/*
* Local NSM state
*/
u32 __read_mostly nsm_local_state;
bool __read_mostly nsm_use_hostnames;
static inline struct sockaddr *nsm_addr(const struct nsm_handle *nsm)
{
return (struct sockaddr *)&nsm->sm_addr;
}
static struct rpc_clnt *nsm_create(struct net *net, const char *nodename)
{
struct sockaddr_in sin = {
.sin_family = AF_INET,
.sin_addr.s_addr = htonl(INADDR_LOOPBACK),
};
struct rpc_create_args args = {
.net = net,
.protocol = XPRT_TRANSPORT_TCP,
.address = (struct sockaddr *)&sin,
.addrsize = sizeof(sin),
.servername = "rpc.statd",
.nodename = nodename,
.program = &nsm_program,
.version = NSM_VERSION,
.authflavor = RPC_AUTH_NULL,
.flags = RPC_CLNT_CREATE_NOPING,
.cred = current_cred(),
};
return rpc_create(&args);
}
static int nsm_mon_unmon(struct nsm_handle *nsm, u32 proc, struct nsm_res *res,
const struct nlm_host *host)
{
int status;
struct rpc_clnt *clnt;
struct nsm_args args = {
.priv = &nsm->sm_priv,
.prog = NLM_PROGRAM,
.vers = 3,
.proc = NLMPROC_NSM_NOTIFY,
.mon_name = nsm->sm_mon_name,
.nodename = host->nodename,
};
struct rpc_message msg = {
.rpc_argp = &args,
.rpc_resp = res,
};
memset(res, 0, sizeof(*res));
clnt = nsm_create(host->net, host->nodename);
if (IS_ERR(clnt)) {
dprintk("lockd: failed to create NSM upcall transport, "
"status=%ld, net=%x\n", PTR_ERR(clnt),
host->net->ns.inum);
return PTR_ERR(clnt);
}
msg.rpc_proc = &clnt->cl_procinfo[proc];
status = rpc_call_sync(clnt, &msg, RPC_TASK_SOFTCONN);
if (status == -ECONNREFUSED) {
dprintk("lockd: NSM upcall RPC failed, status=%d, forcing rebind\n",
status);
rpc_force_rebind(clnt);
status = rpc_call_sync(clnt, &msg, RPC_TASK_SOFTCONN);
}
if (status < 0)
dprintk("lockd: NSM upcall RPC failed, status=%d\n",
status);
else
status = 0;
rpc_shutdown_client(clnt);
return status;
}
/**
* nsm_monitor - Notify a peer in case we reboot
* @host: pointer to nlm_host of peer to notify
*
* If this peer is not already monitored, this function sends an
* upcall to the local rpc.statd to record the name/address of
* the peer to notify in case we reboot.
*
* Returns zero if the peer is monitored by the local rpc.statd;
* otherwise a negative errno value is returned.
*/
int nsm_monitor(const struct nlm_host *host)
{
struct nsm_handle *nsm = host->h_nsmhandle;
struct nsm_res res;
int status;
dprintk("lockd: nsm_monitor(%s)\n", nsm->sm_name);
if (nsm->sm_monitored)
return 0;
/*
* Choose whether to record the caller_name or IP address of
* this peer in the local rpc.statd's database.
*/
nsm->sm_mon_name = nsm_use_hostnames ? nsm->sm_name : nsm->sm_addrbuf;
status = nsm_mon_unmon(nsm, NSMPROC_MON, &res, host);
if (unlikely(res.status != 0))
status = -EIO;
if (unlikely(status < 0)) {
pr_notice_ratelimited("lockd: cannot monitor %s\n", nsm->sm_name);
return status;
}
nsm->sm_monitored = 1;
if (unlikely(nsm_local_state != res.state)) {
nsm_local_state = res.state;
dprintk("lockd: NSM state changed to %d\n", nsm_local_state);
}
return 0;
}
/**
* nsm_unmonitor - Unregister peer notification
* @host: pointer to nlm_host of peer to stop monitoring
*
* If this peer is monitored, this function sends an upcall to
* tell the local rpc.statd not to send this peer a notification
* when we reboot.
*/
void nsm_unmonitor(const struct nlm_host *host)
{
struct nsm_handle *nsm = host->h_nsmhandle;
struct nsm_res res;
int status;
if (refcount_read(&nsm->sm_count) == 1
&& nsm->sm_monitored && !nsm->sm_sticky) {
dprintk("lockd: nsm_unmonitor(%s)\n", nsm->sm_name);
status = nsm_mon_unmon(nsm, NSMPROC_UNMON, &res, host);
if (res.status != 0)
status = -EIO;
if (status < 0)
printk(KERN_NOTICE "lockd: cannot unmonitor %s\n",
nsm->sm_name);
else
nsm->sm_monitored = 0;
}
}
static struct nsm_handle *nsm_lookup_hostname(const struct list_head *nsm_handles,
const char *hostname, const size_t len)
{
struct nsm_handle *nsm;
list_for_each_entry(nsm, nsm_handles, sm_link)
if (strlen(nsm->sm_name) == len &&
memcmp(nsm->sm_name, hostname, len) == 0)
return nsm;
return NULL;
}
static struct nsm_handle *nsm_lookup_addr(const struct list_head *nsm_handles,
const struct sockaddr *sap)
{
struct nsm_handle *nsm;
list_for_each_entry(nsm, nsm_handles, sm_link)
if (rpc_cmp_addr(nsm_addr(nsm), sap))
return nsm;
return NULL;
}
static struct nsm_handle *nsm_lookup_priv(const struct list_head *nsm_handles,
const struct nsm_private *priv)
{
struct nsm_handle *nsm;
list_for_each_entry(nsm, nsm_handles, sm_link)
if (memcmp(nsm->sm_priv.data, priv->data,
sizeof(priv->data)) == 0)
return nsm;
return NULL;
}
/*
* Construct a unique cookie to match this nsm_handle to this monitored
* host. It is passed to the local rpc.statd via NSMPROC_MON, and
* returned via NLMPROC_SM_NOTIFY, in the "priv" field of these
* requests.
*
* The NSM protocol requires that these cookies be unique while the
* system is running. We prefer a stronger requirement of making them
* unique across reboots. If user space bugs cause a stale cookie to
* be sent to the kernel, it could cause the wrong host to lose its
* lock state if cookies were not unique across reboots.
*
* The cookies are exposed only to local user space via loopback. They
* do not appear on the physical network. If we want greater security
* for some reason, nsm_init_private() could perform a one-way hash to
* obscure the contents of the cookie.
*/
static void nsm_init_private(struct nsm_handle *nsm)
{
u64 *p = (u64 *)&nsm->sm_priv.data;
s64 ns;
ns = ktime_get_ns();
put_unaligned(ns, p);
put_unaligned((unsigned long)nsm, p + 1);
}
static struct nsm_handle *nsm_create_handle(const struct sockaddr *sap,
const size_t salen,
const char *hostname,
const size_t hostname_len)
{
struct nsm_handle *new;
new = kzalloc(sizeof(*new) + hostname_len + 1, GFP_KERNEL);
if (unlikely(new == NULL))
return NULL;
refcount_set(&new->sm_count, 1);
new->sm_name = (char *)(new + 1);
memcpy(nsm_addr(new), sap, salen);
new->sm_addrlen = salen;
nsm_init_private(new);
if (rpc_ntop(nsm_addr(new), new->sm_addrbuf,
sizeof(new->sm_addrbuf)) == 0)
(void)snprintf(new->sm_addrbuf, sizeof(new->sm_addrbuf),
"unsupported address family");
memcpy(new->sm_name, hostname, hostname_len);
new->sm_name[hostname_len] = '\0';
return new;
}
/**
* nsm_get_handle - Find or create a cached nsm_handle
* @net: network namespace
* @sap: pointer to socket address of handle to find
* @salen: length of socket address
* @hostname: pointer to C string containing hostname to find
* @hostname_len: length of C string
*
* Behavior is modulated by the global nsm_use_hostnames variable.
*
* Returns a cached nsm_handle after bumping its ref count, or
* returns a fresh nsm_handle if a handle that matches @sap and/or
* @hostname cannot be found in the handle cache. Returns NULL if
* an error occurs.
*/
struct nsm_handle *nsm_get_handle(const struct net *net,
const struct sockaddr *sap,
const size_t salen, const char *hostname,
const size_t hostname_len)
{
struct nsm_handle *cached, *new = NULL;
struct lockd_net *ln = net_generic(net, lockd_net_id);
if (hostname && memchr(hostname, '/', hostname_len) != NULL) {
if (printk_ratelimit()) {
printk(KERN_WARNING "Invalid hostname \"%.*s\" "
"in NFS lock request\n",
(int)hostname_len, hostname);
}
return NULL;
}
retry:
spin_lock(&nsm_lock);
if (nsm_use_hostnames && hostname != NULL)
cached = nsm_lookup_hostname(&ln->nsm_handles,
hostname, hostname_len);
else
cached = nsm_lookup_addr(&ln->nsm_handles, sap);
if (cached != NULL) {
refcount_inc(&cached->sm_count);
spin_unlock(&nsm_lock);
kfree(new);
dprintk("lockd: found nsm_handle for %s (%s), "
"cnt %d\n", cached->sm_name,
cached->sm_addrbuf,
refcount_read(&cached->sm_count));
return cached;
}
if (new != NULL) {
list_add(&new->sm_link, &ln->nsm_handles);
spin_unlock(&nsm_lock);
dprintk("lockd: created nsm_handle for %s (%s)\n",
new->sm_name, new->sm_addrbuf);
return new;
}
spin_unlock(&nsm_lock);
new = nsm_create_handle(sap, salen, hostname, hostname_len);
if (unlikely(new == NULL))
return NULL;
goto retry;
}
/**
* nsm_reboot_lookup - match NLMPROC_SM_NOTIFY arguments to an nsm_handle
* @net: network namespace
* @info: pointer to NLMPROC_SM_NOTIFY arguments
*
* Returns a matching nsm_handle if found in the nsm cache. The returned
* nsm_handle's reference count is bumped. Otherwise returns NULL if some
* error occurred.
*/
struct nsm_handle *nsm_reboot_lookup(const struct net *net,
const struct nlm_reboot *info)
{
struct nsm_handle *cached;
struct lockd_net *ln = net_generic(net, lockd_net_id);
spin_lock(&nsm_lock);
cached = nsm_lookup_priv(&ln->nsm_handles, &info->priv);
if (unlikely(cached == NULL)) {
spin_unlock(&nsm_lock);
dprintk("lockd: never saw rebooted peer '%.*s' before\n",
info->len, info->mon);
return cached;
}
refcount_inc(&cached->sm_count);
spin_unlock(&nsm_lock);
dprintk("lockd: host %s (%s) rebooted, cnt %d\n",
cached->sm_name, cached->sm_addrbuf,
refcount_read(&cached->sm_count));
return cached;
}
/**
* nsm_release - Release an NSM handle
* @nsm: pointer to handle to be released
*
*/
void nsm_release(struct nsm_handle *nsm)
{
if (refcount_dec_and_lock(&nsm->sm_count, &nsm_lock)) {
list_del(&nsm->sm_link);
spin_unlock(&nsm_lock);
dprintk("lockd: destroyed nsm_handle for %s (%s)\n",
nsm->sm_name, nsm->sm_addrbuf);
kfree(nsm);
}
}
/*
* XDR functions for NSM.
*
* See http://www.opengroup.org/ for details on the Network
* Status Monitor wire protocol.
*/
static void encode_nsm_string(struct xdr_stream *xdr, const char *string)
{
const u32 len = strlen(string);
__be32 *p;
p = xdr_reserve_space(xdr, 4 + len);
xdr_encode_opaque(p, string, len);
}
/*
* "mon_name" specifies the host to be monitored.
*/
static void encode_mon_name(struct xdr_stream *xdr, const struct nsm_args *argp)
{
encode_nsm_string(xdr, argp->mon_name);
}
/*
* The "my_id" argument specifies the hostname and RPC procedure
* to be called when the status manager receives notification
* (via the NLMPROC_SM_NOTIFY call) that the state of host "mon_name"
* has changed.
*/
static void encode_my_id(struct xdr_stream *xdr, const struct nsm_args *argp)
{
__be32 *p;
encode_nsm_string(xdr, argp->nodename);
p = xdr_reserve_space(xdr, 4 + 4 + 4);
*p++ = cpu_to_be32(argp->prog);
*p++ = cpu_to_be32(argp->vers);
*p = cpu_to_be32(argp->proc);
}
/*
* The "mon_id" argument specifies the non-private arguments
* of an NSMPROC_MON or NSMPROC_UNMON call.
*/
static void encode_mon_id(struct xdr_stream *xdr, const struct nsm_args *argp)
{
encode_mon_name(xdr, argp);
encode_my_id(xdr, argp);
}
/*
* The "priv" argument may contain private information required
* by the NSMPROC_MON call. This information will be supplied in the
* NLMPROC_SM_NOTIFY call.
*/
static void encode_priv(struct xdr_stream *xdr, const struct nsm_args *argp)
{
__be32 *p;
p = xdr_reserve_space(xdr, SM_PRIV_SIZE);
xdr_encode_opaque_fixed(p, argp->priv->data, SM_PRIV_SIZE);
}
static void nsm_xdr_enc_mon(struct rpc_rqst *req, struct xdr_stream *xdr,
const void *argp)
{
encode_mon_id(xdr, argp);
encode_priv(xdr, argp);
}
static void nsm_xdr_enc_unmon(struct rpc_rqst *req, struct xdr_stream *xdr,
const void *argp)
{
encode_mon_id(xdr, argp);
}
static int nsm_xdr_dec_stat_res(struct rpc_rqst *rqstp,
struct xdr_stream *xdr,
void *data)
{
struct nsm_res *resp = data;
__be32 *p;
p = xdr_inline_decode(xdr, 4 + 4);
if (unlikely(p == NULL))
return -EIO;
resp->status = be32_to_cpup(p++);
resp->state = be32_to_cpup(p);
dprintk("lockd: %s status %d state %d\n",
__func__, resp->status, resp->state);
return 0;
}
static int nsm_xdr_dec_stat(struct rpc_rqst *rqstp,
struct xdr_stream *xdr,
void *data)
{
struct nsm_res *resp = data;
__be32 *p;
p = xdr_inline_decode(xdr, 4);
if (unlikely(p == NULL))
return -EIO;
resp->state = be32_to_cpup(p);
dprintk("lockd: %s state %d\n", __func__, resp->state);
return 0;
}
#define SM_my_name_sz (1+XDR_QUADLEN(SM_MAXSTRLEN))
#define SM_my_id_sz (SM_my_name_sz+3)
#define SM_mon_name_sz (1+XDR_QUADLEN(SM_MAXSTRLEN))
#define SM_mon_id_sz (SM_mon_name_sz+SM_my_id_sz)
#define SM_priv_sz (XDR_QUADLEN(SM_PRIV_SIZE))
#define SM_mon_sz (SM_mon_id_sz+SM_priv_sz)
#define SM_monres_sz 2
#define SM_unmonres_sz 1
static const struct rpc_procinfo nsm_procedures[] = {
[NSMPROC_MON] = {
.p_proc = NSMPROC_MON,
.p_encode = nsm_xdr_enc_mon,
.p_decode = nsm_xdr_dec_stat_res,
.p_arglen = SM_mon_sz,
.p_replen = SM_monres_sz,
.p_statidx = NSMPROC_MON,
.p_name = "MONITOR",
},
[NSMPROC_UNMON] = {
.p_proc = NSMPROC_UNMON,
.p_encode = nsm_xdr_enc_unmon,
.p_decode = nsm_xdr_dec_stat,
.p_arglen = SM_mon_id_sz,
.p_replen = SM_unmonres_sz,
.p_statidx = NSMPROC_UNMON,
.p_name = "UNMONITOR",
},
};
static unsigned int nsm_version1_counts[ARRAY_SIZE(nsm_procedures)];
static const struct rpc_version nsm_version1 = {
.number = 1,
.nrprocs = ARRAY_SIZE(nsm_procedures),
.procs = nsm_procedures,
.counts = nsm_version1_counts,
};
static const struct rpc_version *nsm_version[] = {
[1] = &nsm_version1,
};
static struct rpc_stat nsm_stats;
static const struct rpc_program nsm_program = {
.name = "statd",
.number = NSM_PROGRAM,
.nrvers = ARRAY_SIZE(nsm_version),
.version = nsm_version,
.stats = &nsm_stats
};
| {
"pile_set_name": "Github"
} |
//===----------------------------------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is dual licensed under the MIT and the University of Illinois Open
// Source Licenses. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
// UNSUPPORTED: c++98, c++03
// <memory>
// template <class OuterAlloc, class... InnerAllocs>
// class scoped_allocator_adaptor
// typedef see below propagate_on_container_move_assignment;
#include <scoped_allocator>
#include <type_traits>
#include "allocators.h"
int main()
{
static_assert((std::is_same<
std::scoped_allocator_adaptor<A1<int>>::propagate_on_container_move_assignment,
std::false_type>::value), "");
static_assert((std::is_same<
std::scoped_allocator_adaptor<A1<int>, A2<int>>::propagate_on_container_move_assignment,
std::true_type>::value), "");
static_assert((std::is_same<
std::scoped_allocator_adaptor<A1<int>, A2<int>, A3<int>>::propagate_on_container_move_assignment,
std::true_type>::value), "");
}
| {
"pile_set_name": "Github"
} |
// Some interop code taken from Mike Marshall's AnyForm
using System;
using System.Drawing;
using System.Runtime.InteropServices;
namespace Hardcodet.Wpf.TaskbarNotification.Interop
{
/// <summary>
/// This contains the logic to access the location of the app bar and communicate with it.
/// </summary>
public class AppBarInfo
{
[DllImport("user32.dll", CharSet = CharSet.Unicode)]
private static extern IntPtr FindWindow(string lpClassName, string lpWindowName);
[DllImport("shell32.dll")]
private static extern uint SHAppBarMessage(uint dwMessage, ref APPBARDATA data);
[DllImport("user32.dll")]
private static extern int SystemParametersInfo(uint uiAction, uint uiParam,
IntPtr pvParam, uint fWinIni);
private const int ABM_GETTASKBARPOS = 0x00000005;
private APPBARDATA m_data;
/// <summary>
/// Get on which edge the app bar is located
/// </summary>
public ScreenEdge Edge
{
get { return (ScreenEdge) m_data.uEdge; }
}
/// <summary>
/// Get the working area
/// </summary>
public Rectangle WorkArea
{
get { return GetRectangle(m_data.rc); }
}
private Rectangle GetRectangle(RECT rc)
{
return new Rectangle(rc.left, rc.top, rc.right - rc.left, rc.bottom - rc.top);
}
/// <summary>
/// Update the location of the appbar with the specified classname and window name.
/// </summary>
/// <param name="strClassName">string</param>
/// <param name="strWindowName">string</param>
private void GetPosition(string strClassName, string strWindowName)
{
m_data = new APPBARDATA();
m_data.cbSize = (uint) Marshal.SizeOf(m_data.GetType());
IntPtr hWnd = FindWindow(strClassName, strWindowName);
if (hWnd != IntPtr.Zero)
{
uint uResult = SHAppBarMessage(ABM_GETTASKBARPOS, ref m_data);
if (uResult != 1)
{
throw new Exception("Failed to communicate with the given AppBar");
}
}
else
{
throw new Exception("Failed to find an AppBar that matched the given criteria");
}
}
/// <summary>
/// Updates the system taskbar position
/// </summary>
public void GetSystemTaskBarPosition()
{
GetPosition("Shell_TrayWnd", null);
}
/// <summary>
/// A value that specifies an edge of the screen.
/// </summary>
public enum ScreenEdge
{
/// <summary>
/// Undefined
/// </summary>
Undefined = -1,
/// <summary>
/// Left edge.
/// </summary>
Left = 0,
/// <summary>
/// Top edge.
/// </summary>
Top = 1,
/// <summary>
/// Right edge.
/// </summary>
Right = 2,
/// <summary>
/// Bottom edge.
/// </summary>
Bottom = 3
}
[StructLayout(LayoutKind.Sequential)]
private struct APPBARDATA
{
public uint cbSize;
public IntPtr hWnd;
public uint uCallbackMessage;
public uint uEdge;
public RECT rc;
public int lParam;
}
[StructLayout(LayoutKind.Sequential)]
private struct RECT
{
public int left;
public int top;
public int right;
public int bottom;
}
}
} | {
"pile_set_name": "Github"
} |
/* sp.h
*
* Copyright (C) 2006-2020 wolfSSL Inc.
*
* This file is part of wolfSSL.
*
* wolfSSL is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* wolfSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335, USA
*/
#ifndef WOLF_CRYPT_SP_H
#define WOLF_CRYPT_SP_H
#include <wolfssl/wolfcrypt/types.h>
#if defined(WOLFSSL_HAVE_SP_RSA) || defined(WOLFSSL_HAVE_SP_DH) || \
defined(WOLFSSL_HAVE_SP_ECC)
#include <stdint.h>
#include <wolfssl/wolfcrypt/integer.h>
#include <wolfssl/wolfcrypt/sp_int.h>
#include <wolfssl/wolfcrypt/ecc.h>
#if defined(_MSC_VER)
#define SP_NOINLINE __declspec(noinline)
#elif defined(__IAR_SYSTEMS_ICC__) || defined(__GNUC__) || defined(__KEIL__)
#define SP_NOINLINE __attribute__((noinline))
#else
#define SP_NOINLINE
#endif
#ifdef __cplusplus
extern "C" {
#endif
#ifdef WOLFSSL_HAVE_SP_RSA
WOLFSSL_LOCAL int sp_RsaPublic_2048(const byte* in, word32 inLen,
mp_int* em, mp_int* mm, byte* out, word32* outLen);
WOLFSSL_LOCAL int sp_RsaPrivate_2048(const byte* in, word32 inLen,
mp_int* dm, mp_int* pm, mp_int* qm, mp_int* dpm, mp_int* dqm, mp_int* qim,
mp_int* mm, byte* out, word32* outLen);
WOLFSSL_LOCAL int sp_RsaPublic_3072(const byte* in, word32 inLen,
mp_int* em, mp_int* mm, byte* out, word32* outLen);
WOLFSSL_LOCAL int sp_RsaPrivate_3072(const byte* in, word32 inLen,
mp_int* dm, mp_int* pm, mp_int* qm, mp_int* dpm, mp_int* dqm, mp_int* qim,
mp_int* mm, byte* out, word32* outLen);
WOLFSSL_LOCAL int sp_RsaPublic_4096(const byte* in, word32 inLen,
mp_int* em, mp_int* mm, byte* out, word32* outLen);
WOLFSSL_LOCAL int sp_RsaPrivate_4096(const byte* in, word32 inLen,
mp_int* dm, mp_int* pm, mp_int* qm, mp_int* dpm, mp_int* dqm, mp_int* qim,
mp_int* mm, byte* out, word32* outLen);
#endif /* WOLFSSL_HAVE_SP_RSA */
#if defined(WOLFSSL_HAVE_SP_DH) || defined(WOLFSSL_HAVE_SP_RSA)
WOLFSSL_LOCAL int sp_ModExp_1024(mp_int* base, mp_int* exp, mp_int* mod,
mp_int* res);
WOLFSSL_LOCAL int sp_ModExp_1536(mp_int* base, mp_int* exp, mp_int* mod,
mp_int* res);
WOLFSSL_LOCAL int sp_ModExp_2048(mp_int* base, mp_int* exp, mp_int* mod,
mp_int* res);
WOLFSSL_LOCAL int sp_ModExp_3072(mp_int* base, mp_int* exp, mp_int* mod,
mp_int* res);
WOLFSSL_LOCAL int sp_ModExp_4096(mp_int* base, mp_int* exp, mp_int* mod,
mp_int* res);
#endif
#ifdef WOLFSSL_HAVE_SP_DH
WOLFSSL_LOCAL int sp_DhExp_2048(mp_int* base, const byte* exp, word32 expLen,
mp_int* mod, byte* out, word32* outLen);
WOLFSSL_LOCAL int sp_DhExp_3072(mp_int* base, const byte* exp, word32 expLen,
mp_int* mod, byte* out, word32* outLen);
WOLFSSL_LOCAL int sp_DhExp_4096(mp_int* base, const byte* exp, word32 expLen,
mp_int* mod, byte* out, word32* outLen);
#endif /* WOLFSSL_HAVE_SP_DH */
#ifdef WOLFSSL_HAVE_SP_ECC
int sp_ecc_mulmod_256(mp_int* km, ecc_point* gm, ecc_point* rm, int map,
void* heap);
int sp_ecc_mulmod_base_256(mp_int* km, ecc_point* rm, int map, void* heap);
int sp_ecc_make_key_256(WC_RNG* rng, mp_int* priv, ecc_point* pub, void* heap);
int sp_ecc_secret_gen_256(mp_int* priv, ecc_point* pub, byte* out,
word32* outlen, void* heap);
int sp_ecc_sign_256(const byte* hash, word32 hashLen, WC_RNG* rng, mp_int* priv,
mp_int* rm, mp_int* sm, mp_int* km, void* heap);
int sp_ecc_verify_256(const byte* hash, word32 hashLen, mp_int* pX, mp_int* pY,
mp_int* pZ, mp_int* r, mp_int* sm, int* res, void* heap);
int sp_ecc_is_point_256(mp_int* pX, mp_int* pY);
int sp_ecc_check_key_256(mp_int* pX, mp_int* pY, mp_int* privm, void* heap);
int sp_ecc_proj_add_point_256(mp_int* pX, mp_int* pY, mp_int* pZ,
mp_int* qX, mp_int* qY, mp_int* qZ,
mp_int* rX, mp_int* rY, mp_int* rZ);
int sp_ecc_proj_dbl_point_256(mp_int* pX, mp_int* pY, mp_int* pZ,
mp_int* rX, mp_int* rY, mp_int* rZ);
int sp_ecc_map_256(mp_int* pX, mp_int* pY, mp_int* pZ);
int sp_ecc_uncompress_256(mp_int* xm, int odd, mp_int* ym);
int sp_ecc_mulmod_384(mp_int* km, ecc_point* gm, ecc_point* rm, int map,
void* heap);
int sp_ecc_mulmod_base_384(mp_int* km, ecc_point* rm, int map, void* heap);
int sp_ecc_make_key_384(WC_RNG* rng, mp_int* priv, ecc_point* pub, void* heap);
int sp_ecc_secret_gen_384(mp_int* priv, ecc_point* pub, byte* out,
word32* outlen, void* heap);
int sp_ecc_sign_384(const byte* hash, word32 hashLen, WC_RNG* rng, mp_int* priv,
mp_int* rm, mp_int* sm, mp_int* km, void* heap);
int sp_ecc_verify_384(const byte* hash, word32 hashLen, mp_int* pX, mp_int* pY,
mp_int* pZ, mp_int* r, mp_int* sm, int* res, void* heap);
int sp_ecc_is_point_384(mp_int* pX, mp_int* pY);
int sp_ecc_check_key_384(mp_int* pX, mp_int* pY, mp_int* privm, void* heap);
int sp_ecc_proj_add_point_384(mp_int* pX, mp_int* pY, mp_int* pZ,
mp_int* qX, mp_int* qY, mp_int* qZ,
mp_int* rX, mp_int* rY, mp_int* rZ);
int sp_ecc_proj_dbl_point_384(mp_int* pX, mp_int* pY, mp_int* pZ,
mp_int* rX, mp_int* rY, mp_int* rZ);
int sp_ecc_map_384(mp_int* pX, mp_int* pY, mp_int* pZ);
int sp_ecc_uncompress_384(mp_int* xm, int odd, mp_int* ym);
#endif /*ifdef WOLFSSL_HAVE_SP_ECC */
#ifdef __cplusplus
} /* extern "C" */
#endif
#endif /* WOLFSSL_HAVE_SP_RSA || WOLFSSL_HAVE_SP_DH || WOLFSSL_HAVE_SP_ECC */
#endif /* WOLF_CRYPT_SP_H */
| {
"pile_set_name": "Github"
} |
//
// NSURLConnection+RACSupport.h
// ReactiveCocoa
//
// Created by Justin Spahr-Summers on 2013-10-01.
// Copyright (c) 2013 GitHub, Inc. All rights reserved.
//
#import <Foundation/Foundation.h>
@class RACSignal;
@interface NSURLConnection (RACSupport)
// Lazily loads data for the given request in the background.
//
// request - The URL request to load. This must not be nil.
//
// Returns a signal which will begin loading the request upon each subscription,
// then send a `RACTuple` of the received `NSURLResponse` and downloaded
// `NSData`, and complete on a background thread. If any errors occur, the
// returned signal will error out.
+ (RACSignal *)rac_sendAsynchronousRequest:(NSURLRequest *)request;
@end
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="UTF-8"?>
<Scheme
LastUpgradeVersion = "0700"
version = "1.3">
<BuildAction
parallelizeBuildables = "YES"
buildImplicitDependencies = "YES">
<BuildActionEntries>
<BuildActionEntry
buildForTesting = "YES"
buildForRunning = "YES"
buildForProfiling = "YES"
buildForArchiving = "YES"
buildForAnalyzing = "YES">
<BuildableReference
BuildableIdentifier = "primary"
BlueprintIdentifier = "9DA11B731990FF7400DC5222"
BuildableName = "Reading and Modifying the User’s Total Calories Burned.app"
BlueprintName = "Reading and Modifying the User’s Total Calories Burned"
ReferencedContainer = "container:Reading and Modifying the User’s Total Calories Burned.xcodeproj">
</BuildableReference>
</BuildActionEntry>
<BuildActionEntry
buildForTesting = "YES"
buildForRunning = "YES"
buildForProfiling = "NO"
buildForArchiving = "NO"
buildForAnalyzing = "YES">
<BuildableReference
BuildableIdentifier = "primary"
BlueprintIdentifier = "9DA11B851990FF7500DC5222"
BuildableName = "Reading and Modifying the User’s Total Calories BurnedTests.xctest"
BlueprintName = "Reading and Modifying the User’s Total Calories BurnedTests"
ReferencedContainer = "container:Reading and Modifying the User’s Total Calories Burned.xcodeproj">
</BuildableReference>
</BuildActionEntry>
</BuildActionEntries>
</BuildAction>
<TestAction
selectedDebuggerIdentifier = "Xcode.DebuggerFoundation.Debugger.LLDB"
selectedLauncherIdentifier = "Xcode.DebuggerFoundation.Launcher.LLDB"
shouldUseLaunchSchemeArgsEnv = "YES"
buildConfiguration = "Debug">
<Testables>
<TestableReference
skipped = "NO">
<BuildableReference
BuildableIdentifier = "primary"
BlueprintIdentifier = "9DA11B851990FF7500DC5222"
BuildableName = "Reading and Modifying the User’s Total Calories BurnedTests.xctest"
BlueprintName = "Reading and Modifying the User’s Total Calories BurnedTests"
ReferencedContainer = "container:Reading and Modifying the User’s Total Calories Burned.xcodeproj">
</BuildableReference>
</TestableReference>
</Testables>
<MacroExpansion>
<BuildableReference
BuildableIdentifier = "primary"
BlueprintIdentifier = "9DA11B731990FF7400DC5222"
BuildableName = "Reading and Modifying the User’s Total Calories Burned.app"
BlueprintName = "Reading and Modifying the User’s Total Calories Burned"
ReferencedContainer = "container:Reading and Modifying the User’s Total Calories Burned.xcodeproj">
</BuildableReference>
</MacroExpansion>
<AdditionalOptions>
</AdditionalOptions>
</TestAction>
<LaunchAction
selectedDebuggerIdentifier = "Xcode.DebuggerFoundation.Debugger.LLDB"
selectedLauncherIdentifier = "Xcode.DebuggerFoundation.Launcher.LLDB"
launchStyle = "0"
useCustomWorkingDirectory = "NO"
buildConfiguration = "Debug"
ignoresPersistentStateOnLaunch = "NO"
debugDocumentVersioning = "YES"
debugServiceExtension = "internal"
allowLocationSimulation = "YES">
<BuildableProductRunnable
runnableDebuggingMode = "0">
<BuildableReference
BuildableIdentifier = "primary"
BlueprintIdentifier = "9DA11B731990FF7400DC5222"
BuildableName = "Reading and Modifying the User’s Total Calories Burned.app"
BlueprintName = "Reading and Modifying the User’s Total Calories Burned"
ReferencedContainer = "container:Reading and Modifying the User’s Total Calories Burned.xcodeproj">
</BuildableReference>
</BuildableProductRunnable>
<AdditionalOptions>
</AdditionalOptions>
</LaunchAction>
<ProfileAction
shouldUseLaunchSchemeArgsEnv = "YES"
savedToolIdentifier = ""
useCustomWorkingDirectory = "NO"
buildConfiguration = "Release"
debugDocumentVersioning = "YES">
<BuildableProductRunnable
runnableDebuggingMode = "0">
<BuildableReference
BuildableIdentifier = "primary"
BlueprintIdentifier = "9DA11B731990FF7400DC5222"
BuildableName = "Reading and Modifying the User’s Total Calories Burned.app"
BlueprintName = "Reading and Modifying the User’s Total Calories Burned"
ReferencedContainer = "container:Reading and Modifying the User’s Total Calories Burned.xcodeproj">
</BuildableReference>
</BuildableProductRunnable>
</ProfileAction>
<AnalyzeAction
buildConfiguration = "Debug">
</AnalyzeAction>
<ArchiveAction
buildConfiguration = "Release"
revealArchiveInOrganizer = "YES">
</ArchiveAction>
</Scheme>
| {
"pile_set_name": "Github"
} |
module Puppet::Parser::Functions
newfunction(:delete_values, :type => :rvalue, :doc => <<-EOS
Deletes all instances of a given value from a hash.
*Examples:*
delete_values({'a'=>'A','b'=>'B','c'=>'C','B'=>'D'}, 'B')
Would return: {'a'=>'A','c'=>'C','B'=>'D'}
EOS
) do |arguments|
raise(Puppet::ParseError,
"delete_values(): Wrong number of arguments given " +
"(#{arguments.size} of 2)") if arguments.size != 2
hash, item = arguments
if not hash.is_a?(Hash)
raise(TypeError, "delete_values(): First argument must be a Hash. " + \
"Given an argument of class #{hash.class}.")
end
hash.dup.delete_if { |key, val| item == val }
end
end
| {
"pile_set_name": "Github"
} |
#!/usr/bin/make -f
#
%:
dh $@
| {
"pile_set_name": "Github"
} |
package abi37_0_0.org.unimodules.adapters.react;
import android.os.Bundle;
import abi37_0_0.com.facebook.react.bridge.Arguments;
import java.util.List;
import javax.annotation.Nullable;
import abi37_0_0.org.unimodules.core.Promise;
/**
* Decorator for {@link abi37_0_0.com.facebook.react.bridge.Promise},
* so we don't have to implement these inline in {@link NativeModulesProxy}.
*/
/* package */ class PromiseWrapper extends Promise {
private abi37_0_0.com.facebook.react.bridge.Promise mPromise;
/* package */ PromiseWrapper(abi37_0_0.com.facebook.react.bridge.Promise promise) {
super();
mPromise = promise;
}
public void resolve(@Nullable Object value) {
if (value instanceof Bundle) {
mPromise.resolve(Arguments.fromBundle((Bundle) value));
} else if (value instanceof List) {
mPromise.resolve(Arguments.fromList((List) value));
} else {
mPromise.resolve(value);
}
}
public void reject(String code, String message, Throwable e) {
mPromise.reject(code, message, e);
}
}
| {
"pile_set_name": "Github"
} |
// RUN: %clang_cc1 -fsyntax-only -std=c++11 -verify %s
class S {
public:
S ();
};
struct D : S {
D() :
b1(0), // expected-note {{previous initialization is here}}
b2(1),
b1(0), // expected-error {{multiple initializations given for non-static member 'b1'}}
S(), // expected-note {{previous initialization is here}}
S() // expected-error {{multiple initializations given for base 'S'}}
{}
int b1;
int b2;
};
struct A {
struct {
int a;
int b;
};
A();
};
A::A() : a(10), b(20) { }
namespace Test1 {
template<typename T> struct A {};
template<typename T> struct B : A<T> {
B() : A<T>(), // expected-note {{previous initialization is here}}
A<T>() { } // expected-error {{multiple initializations given for base 'A<T>'}}
};
}
namespace Test2 {
template<typename T> struct A : T {
A() : T(), // expected-note {{previous initialization is here}}
T() { } // expected-error {{multiple initializations given for base 'T'}}
};
}
namespace Test3 {
template<typename T> struct A {
T t;
A() : t(1), // expected-note {{previous initialization is here}}
t(2) { } // expected-error {{multiple initializations given for non-static member 't'}}
};
}
namespace test4 {
class A {
union {
struct {
int a;
int b;
};
int c;
union {
int d;
int e;
};
};
A(char _) : a(0), b(0) {}
A(short _) : a(0), c(0) {} // expected-error {{initializing multiple members of union}} expected-note {{previous initialization is here}}
A(int _) : d(0), e(0) {} // expected-error {{initializing multiple members of union}} expected-note {{previous initialization is here}}
A(long _) : a(0), d(0) {} // expected-error {{initializing multiple members of union}} expected-note {{previous initialization is here}}
};
}
namespace test5 {
struct Base {
Base(int);
};
struct A : Base {
A() : decltype(Base(1))(3) {
}
A(int) : Base(3), // expected-note {{previous initialization is here}}
decltype(Base(1))(2), // expected-error {{multiple initializations given for base 'decltype(test5::Base(1))' (aka 'test5::Base')}}
decltype(int())() { // expected-error {{constructor initializer 'decltype(int())' (aka 'int') does not name a class}}
}
A(float) : decltype(A())(3) {
}
};
}
| {
"pile_set_name": "Github"
} |
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build linux
// +build ppc64 ppc64le
// +build !gccgo
#include "textflag.h"
//
// System calls for ppc64, Linux
//
// Just jump to package syscall's implementation for all these functions.
// The runtime may know about them.
TEXT ·SyscallNoError(SB),NOSPLIT,$0-48
BL runtime·entersyscall(SB)
MOVD a1+8(FP), R3
MOVD a2+16(FP), R4
MOVD a3+24(FP), R5
MOVD R0, R6
MOVD R0, R7
MOVD R0, R8
MOVD trap+0(FP), R9 // syscall entry
SYSCALL R9
MOVD R3, r1+32(FP)
MOVD R4, r2+40(FP)
BL runtime·exitsyscall(SB)
RET
TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-48
MOVD a1+8(FP), R3
MOVD a2+16(FP), R4
MOVD a3+24(FP), R5
MOVD R0, R6
MOVD R0, R7
MOVD R0, R8
MOVD trap+0(FP), R9 // syscall entry
SYSCALL R9
MOVD R3, r1+32(FP)
MOVD R4, r2+40(FP)
RET
| {
"pile_set_name": "Github"
} |
from __future__ import absolute_import, division, unicode_literals
from pip._vendor.six import text_type
from collections import OrderedDict
from lxml import etree
from ..treebuilders.etree import tag_regexp
from . import base
from .. import _ihatexml
def ensure_str(s):
if s is None:
return None
elif isinstance(s, text_type):
return s
else:
return s.decode("ascii", "strict")
class Root(object):
def __init__(self, et):
self.elementtree = et
self.children = []
try:
if et.docinfo.internalDTD:
self.children.append(Doctype(self,
ensure_str(et.docinfo.root_name),
ensure_str(et.docinfo.public_id),
ensure_str(et.docinfo.system_url)))
except AttributeError:
pass
try:
node = et.getroot()
except AttributeError:
node = et
while node.getprevious() is not None:
node = node.getprevious()
while node is not None:
self.children.append(node)
node = node.getnext()
self.text = None
self.tail = None
def __getitem__(self, key):
return self.children[key]
def getnext(self):
return None
def __len__(self):
return 1
class Doctype(object):
def __init__(self, root_node, name, public_id, system_id):
self.root_node = root_node
self.name = name
self.public_id = public_id
self.system_id = system_id
self.text = None
self.tail = None
def getnext(self):
return self.root_node.children[1]
class FragmentRoot(Root):
def __init__(self, children):
self.children = [FragmentWrapper(self, child) for child in children]
self.text = self.tail = None
def getnext(self):
return None
class FragmentWrapper(object):
def __init__(self, fragment_root, obj):
self.root_node = fragment_root
self.obj = obj
if hasattr(self.obj, 'text'):
self.text = ensure_str(self.obj.text)
else:
self.text = None
if hasattr(self.obj, 'tail'):
self.tail = ensure_str(self.obj.tail)
else:
self.tail = None
def __getattr__(self, name):
return getattr(self.obj, name)
def getnext(self):
siblings = self.root_node.children
idx = siblings.index(self)
if idx < len(siblings) - 1:
return siblings[idx + 1]
else:
return None
def __getitem__(self, key):
return self.obj[key]
def __bool__(self):
return bool(self.obj)
def getparent(self):
return None
def __str__(self):
return str(self.obj)
def __unicode__(self):
return str(self.obj)
def __len__(self):
return len(self.obj)
class TreeWalker(base.NonRecursiveTreeWalker):
def __init__(self, tree):
# pylint:disable=redefined-variable-type
if isinstance(tree, list):
self.fragmentChildren = set(tree)
tree = FragmentRoot(tree)
else:
self.fragmentChildren = set()
tree = Root(tree)
base.NonRecursiveTreeWalker.__init__(self, tree)
self.filter = _ihatexml.InfosetFilter()
def getNodeDetails(self, node):
if isinstance(node, tuple): # Text node
node, key = node
assert key in ("text", "tail"), "Text nodes are text or tail, found %s" % key
return base.TEXT, ensure_str(getattr(node, key))
elif isinstance(node, Root):
return (base.DOCUMENT,)
elif isinstance(node, Doctype):
return base.DOCTYPE, node.name, node.public_id, node.system_id
elif isinstance(node, FragmentWrapper) and not hasattr(node, "tag"):
return base.TEXT, ensure_str(node.obj)
elif node.tag == etree.Comment:
return base.COMMENT, ensure_str(node.text)
elif node.tag == etree.Entity:
return base.ENTITY, ensure_str(node.text)[1:-1] # strip &;
else:
# This is assumed to be an ordinary element
match = tag_regexp.match(ensure_str(node.tag))
if match:
namespace, tag = match.groups()
else:
namespace = None
tag = ensure_str(node.tag)
attrs = OrderedDict()
for name, value in list(node.attrib.items()):
name = ensure_str(name)
value = ensure_str(value)
match = tag_regexp.match(name)
if match:
attrs[(match.group(1), match.group(2))] = value
else:
attrs[(None, name)] = value
return (base.ELEMENT, namespace, self.filter.fromXmlName(tag),
attrs, len(node) > 0 or node.text)
def getFirstChild(self, node):
assert not isinstance(node, tuple), "Text nodes have no children"
assert len(node) or node.text, "Node has no children"
if node.text:
return (node, "text")
else:
return node[0]
def getNextSibling(self, node):
if isinstance(node, tuple): # Text node
node, key = node
assert key in ("text", "tail"), "Text nodes are text or tail, found %s" % key
if key == "text":
# XXX: we cannot use a "bool(node) and node[0] or None" construct here
# because node[0] might evaluate to False if it has no child element
if len(node):
return node[0]
else:
return None
else: # tail
return node.getnext()
return (node, "tail") if node.tail else node.getnext()
def getParentNode(self, node):
if isinstance(node, tuple): # Text node
node, key = node
assert key in ("text", "tail"), "Text nodes are text or tail, found %s" % key
if key == "text":
return node
# else: fallback to "normal" processing
elif node in self.fragmentChildren:
return None
return node.getparent()
| {
"pile_set_name": "Github"
} |
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// SSE4.1 variant of methods for lossless encoder
//
// Author: Skal ([email protected])
#include "src/dsp/dsp.h"
#if defined(WEBP_USE_SSE41)
#include <assert.h>
#include <smmintrin.h>
#include "src/dsp/lossless.h"
// For sign-extended multiplying constants, pre-shifted by 5:
#define CST_5b(X) (((int16_t)((uint16_t)(X) << 8)) >> 5)
//------------------------------------------------------------------------------
// Subtract-Green Transform
static void SubtractGreenFromBlueAndRed_SSE41(uint32_t* argb_data,
int num_pixels) {
int i;
const __m128i kCstShuffle = _mm_set_epi8(-1, 13, -1, 13, -1, 9, -1, 9,
-1, 5, -1, 5, -1, 1, -1, 1);
for (i = 0; i + 4 <= num_pixels; i += 4) {
const __m128i in = _mm_loadu_si128((__m128i*)&argb_data[i]);
const __m128i in_0g0g = _mm_shuffle_epi8(in, kCstShuffle);
const __m128i out = _mm_sub_epi8(in, in_0g0g);
_mm_storeu_si128((__m128i*)&argb_data[i], out);
}
// fallthrough and finish off with plain-C
if (i != num_pixels) {
VP8LSubtractGreenFromBlueAndRed_C(argb_data + i, num_pixels - i);
}
}
//------------------------------------------------------------------------------
// Color Transform
#define SPAN 8
static void CollectColorBlueTransforms_SSE41(const uint32_t* argb, int stride,
int tile_width, int tile_height,
int green_to_blue, int red_to_blue,
int histo[]) {
const __m128i mults_r = _mm_set1_epi16(CST_5b(red_to_blue));
const __m128i mults_g = _mm_set1_epi16(CST_5b(green_to_blue));
const __m128i mask_g = _mm_set1_epi16((short)0xff00); // green mask
const __m128i mask_gb = _mm_set1_epi32(0xffff); // green/blue mask
const __m128i mask_b = _mm_set1_epi16(0x00ff); // blue mask
const __m128i shuffler_lo = _mm_setr_epi8(-1, 2, -1, 6, -1, 10, -1, 14, -1,
-1, -1, -1, -1, -1, -1, -1);
const __m128i shuffler_hi = _mm_setr_epi8(-1, -1, -1, -1, -1, -1, -1, -1, -1,
2, -1, 6, -1, 10, -1, 14);
int y;
for (y = 0; y < tile_height; ++y) {
const uint32_t* const src = argb + y * stride;
int i, x;
for (x = 0; x + SPAN <= tile_width; x += SPAN) {
uint16_t values[SPAN];
const __m128i in0 = _mm_loadu_si128((__m128i*)&src[x + 0]);
const __m128i in1 = _mm_loadu_si128((__m128i*)&src[x + SPAN / 2]);
const __m128i r0 = _mm_shuffle_epi8(in0, shuffler_lo);
const __m128i r1 = _mm_shuffle_epi8(in1, shuffler_hi);
const __m128i r = _mm_or_si128(r0, r1); // r 0
const __m128i gb0 = _mm_and_si128(in0, mask_gb);
const __m128i gb1 = _mm_and_si128(in1, mask_gb);
const __m128i gb = _mm_packus_epi32(gb0, gb1); // g b
const __m128i g = _mm_and_si128(gb, mask_g); // g 0
const __m128i A = _mm_mulhi_epi16(r, mults_r); // x dbr
const __m128i B = _mm_mulhi_epi16(g, mults_g); // x dbg
const __m128i C = _mm_sub_epi8(gb, B); // x b'
const __m128i D = _mm_sub_epi8(C, A); // x b''
const __m128i E = _mm_and_si128(D, mask_b); // 0 b''
_mm_storeu_si128((__m128i*)values, E);
for (i = 0; i < SPAN; ++i) ++histo[values[i]];
}
}
{
const int left_over = tile_width & (SPAN - 1);
if (left_over > 0) {
VP8LCollectColorBlueTransforms_C(argb + tile_width - left_over, stride,
left_over, tile_height,
green_to_blue, red_to_blue, histo);
}
}
}
static void CollectColorRedTransforms_SSE41(const uint32_t* argb, int stride,
int tile_width, int tile_height,
int green_to_red, int histo[]) {
const __m128i mults_g = _mm_set1_epi16(CST_5b(green_to_red));
const __m128i mask_g = _mm_set1_epi32(0x00ff00); // green mask
const __m128i mask = _mm_set1_epi16(0xff);
int y;
for (y = 0; y < tile_height; ++y) {
const uint32_t* const src = argb + y * stride;
int i, x;
for (x = 0; x + SPAN <= tile_width; x += SPAN) {
uint16_t values[SPAN];
const __m128i in0 = _mm_loadu_si128((__m128i*)&src[x + 0]);
const __m128i in1 = _mm_loadu_si128((__m128i*)&src[x + SPAN / 2]);
const __m128i g0 = _mm_and_si128(in0, mask_g); // 0 0 | g 0
const __m128i g1 = _mm_and_si128(in1, mask_g);
const __m128i g = _mm_packus_epi32(g0, g1); // g 0
const __m128i A0 = _mm_srli_epi32(in0, 16); // 0 0 | x r
const __m128i A1 = _mm_srli_epi32(in1, 16);
const __m128i A = _mm_packus_epi32(A0, A1); // x r
const __m128i B = _mm_mulhi_epi16(g, mults_g); // x dr
const __m128i C = _mm_sub_epi8(A, B); // x r'
const __m128i D = _mm_and_si128(C, mask); // 0 r'
_mm_storeu_si128((__m128i*)values, D);
for (i = 0; i < SPAN; ++i) ++histo[values[i]];
}
}
{
const int left_over = tile_width & (SPAN - 1);
if (left_over > 0) {
VP8LCollectColorRedTransforms_C(argb + tile_width - left_over, stride,
left_over, tile_height, green_to_red,
histo);
}
}
}
//------------------------------------------------------------------------------
// Entry point
extern void VP8LEncDspInitSSE41(void);
WEBP_TSAN_IGNORE_FUNCTION void VP8LEncDspInitSSE41(void) {
VP8LSubtractGreenFromBlueAndRed = SubtractGreenFromBlueAndRed_SSE41;
VP8LCollectColorBlueTransforms = CollectColorBlueTransforms_SSE41;
VP8LCollectColorRedTransforms = CollectColorRedTransforms_SSE41;
}
#else // !WEBP_USE_SSE41
WEBP_DSP_INIT_STUB(VP8LEncDspInitSSE41)
#endif // WEBP_USE_SSE41
| {
"pile_set_name": "Github"
} |
%% @author Marc Worrell <[email protected]>
%% @copyright 2010 Marc Worrell
%% Date: 2010-02-15
%% Copyright 2010 Marc Worrell
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
-module(action_backup_backup_start).
-include_lib("zotonic_core/include/zotonic.hrl").
-export([
render_action/4,
event/2
]).
render_action(TriggerId, TargetId, Args, Context) ->
IsFullBackup = z_convert:to_bool(proplists:get_value(is_full_backup, Args, false)),
{PostbackMsgJS, _PickledPostback} = z_render:make_postback({backup_start, IsFullBackup}, undefined,
TriggerId, TargetId, ?MODULE, Context),
{PostbackMsgJS, Context}.
%% @doc Download a backup.
%% @spec event(Event, Context1) -> Context2
event(#postback{message={backup_start, IsFullBackup}}, Context) ->
case z_acl:is_allowed(use, mod_backup, Context) of
true ->
case mod_backup:start_backup(IsFullBackup, Context) of
ok ->
z_render:growl("Started the backup. You can keep this page open or continue working.", Context);
{error, in_progress} ->
z_render:growl_error("Could not start the backup because a backup is already in progress.", Context)
end;
false ->
z_render:growl_error("Only administrators can start a backup.", Context)
end.
| {
"pile_set_name": "Github"
} |
// Jest Snapshot v1, https://goo.gl/fbAQLP
exports[`renders 1`] = `
<SimpleBubbleChart
displayOrganizations={false}
helpText="foobar"
projects={Array []}
sizeMetric={
Object {
"key": "uncovered_lines",
"type": "SHORT_INT",
}
}
title="projects.visualization.coverage"
xMetric={
Object {
"key": "complexity",
"type": "SHORT_INT",
}
}
yDomain={
Array [
100,
0,
]
}
yMetric={
Object {
"key": "coverage",
"type": "PERCENT",
}
}
/>
`;
| {
"pile_set_name": "Github"
} |
/*
* (C) Copyright 2015 Google, Inc
* Written by Simon Glass <[email protected]>
*
* SPDX-License-Identifier: GPL-2.0+
*/
#include <common.h>
#include <dm.h>
#include <i2c.h>
#include <rtc.h>
#include <asm/rtc.h>
#define REG_COUNT 0x80
static int sandbox_rtc_get(struct udevice *dev, struct rtc_time *time)
{
time->tm_sec = dm_i2c_reg_read(dev, REG_SEC);
if (time->tm_sec < 0)
return time->tm_sec;
time->tm_min = dm_i2c_reg_read(dev, REG_MIN);
if (time->tm_min < 0)
return time->tm_min;
time->tm_hour = dm_i2c_reg_read(dev, REG_HOUR);
if (time->tm_hour < 0)
return time->tm_hour;
time->tm_mday = dm_i2c_reg_read(dev, REG_MDAY);
if (time->tm_mday < 0)
return time->tm_mday;
time->tm_mon = dm_i2c_reg_read(dev, REG_MON);
if (time->tm_mon < 0)
return time->tm_mon;
time->tm_year = dm_i2c_reg_read(dev, REG_YEAR);
if (time->tm_year < 0)
return time->tm_year;
time->tm_year += 1900;
time->tm_wday = dm_i2c_reg_read(dev, REG_WDAY);
if (time->tm_wday < 0)
return time->tm_wday;
return 0;
}
static int sandbox_rtc_set(struct udevice *dev, const struct rtc_time *time)
{
int ret;
ret = dm_i2c_reg_write(dev, REG_SEC, time->tm_sec);
if (ret < 0)
return ret;
ret = dm_i2c_reg_write(dev, REG_MIN, time->tm_min);
if (ret < 0)
return ret;
ret = dm_i2c_reg_write(dev, REG_HOUR, time->tm_hour);
if (ret < 0)
return ret;
ret = dm_i2c_reg_write(dev, REG_MDAY, time->tm_mday);
if (ret < 0)
return ret;
ret = dm_i2c_reg_write(dev, REG_MON, time->tm_mon);
if (ret < 0)
return ret;
ret = dm_i2c_reg_write(dev, REG_YEAR, time->tm_year - 1900);
if (ret < 0)
return ret;
ret = dm_i2c_reg_write(dev, REG_WDAY, time->tm_wday);
if (ret < 0)
return ret;
return 0;
}
static int sandbox_rtc_reset(struct udevice *dev)
{
return dm_i2c_reg_write(dev, REG_RESET, 0);
}
static int sandbox_rtc_read8(struct udevice *dev, unsigned int reg)
{
return dm_i2c_reg_read(dev, reg);
}
static int sandbox_rtc_write8(struct udevice *dev, unsigned int reg, int val)
{
return dm_i2c_reg_write(dev, reg, val);
}
static const struct rtc_ops sandbox_rtc_ops = {
.get = sandbox_rtc_get,
.set = sandbox_rtc_set,
.reset = sandbox_rtc_reset,
.read8 = sandbox_rtc_read8,
.write8 = sandbox_rtc_write8,
};
static const struct udevice_id sandbox_rtc_ids[] = {
{ .compatible = "sandbox-rtc" },
{ }
};
U_BOOT_DRIVER(rtc_sandbox) = {
.name = "rtc-sandbox",
.id = UCLASS_RTC,
.of_match = sandbox_rtc_ids,
.ops = &sandbox_rtc_ops,
};
| {
"pile_set_name": "Github"
} |
package info.nightscout.android.model.store;
import java.util.Date;
import info.nightscout.android.utils.FormatKit;
import io.realm.RealmObject;
import io.realm.annotations.Ignore;
import io.realm.annotations.Index;
import io.realm.annotations.PrimaryKey;
public class StatPushover extends RealmObject implements StatInterface {
@Ignore
private static final String TAG = StatNightscout.class.getSimpleName();
@PrimaryKey
private String key;
@Index
private Date date;
private int run;
private int error;
private int validError;
private int messagesSent;
private int limit;
private int remaining;
private long resetTime;
@Override
public String toString() {
return String.format("Run: %s Error: %s ValidError: %s Sent: %s Limit: %s/%s Reset: %s",
run,
error,
validError,
messagesSent,
limit - remaining,
limit,
resetTime == 0 ? "-" : FormatKit.getInstance().formatAsYMD(resetTime)
);
}
@Override
public String getKey() {
return key;
}
@Override
public void setKey(String key) {
this.key = key;
}
@Override
public Date getDate() {
return date;
}
@Override
public void setDate(Date date) {
this.date = date;
}
public int getRun() {
return run;
}
public void incRun() {
run++;
}
public int getError() {
return error;
}
public void incError() {
error++;
}
public int getValidError() {
return validError;
}
public void incValidError() {
validError++;
}
public int getMessagesSent() {
return messagesSent;
}
public void setMessagesSent(int messagesSent) {
this.messagesSent = messagesSent;
}
public void incMessagesSent() {
messagesSent++;
}
public int getLimit() {
return limit;
}
public void setLimit(int limit) {
this.limit = limit;
}
public int getRemaining() {
return remaining;
}
public void setRemaining(int remaining) {
this.remaining = remaining;
}
public long getResetTime() {
return resetTime;
}
public void setResetTime(long resetTime) {
this.resetTime = resetTime;
}
}
| {
"pile_set_name": "Github"
} |
# #coding=utf-8
# # coding=utf-8
# '''
# Created on 2014-1-5
#
# @author: zhangtiande
# '''
# from django.conf.urls import url
# from doraemon.administrate.views.admin_user_group_view import user_group,group_create,check_value_exists,group_delete,usergroup_list,usergroup_edit_get,group_permission_list,update_description,update_group_permission
#
# admin_usergroup_router=[
# url(r"usergroup/(all)$",user_group),
# url(r"usergroup/create$",group_create),
# url(r"usergroup/check_value_exists$",check_value_exists),
# url(r"usergroup/usergroup_list$",usergroup_list),
# url(r"usergroup/(\d{1,6})/delete$",group_delete),
# url(r"usergroup/(\d{1,6})/edit_get$",usergroup_edit_get),
# url(r"usergroup/(\d{1,6})/update_permission$",update_group_permission),
# url(r"usergroup/(\d{1,6})/group_permission_list$",group_permission_list),
# url(r"usergroup/(\d{1,6})/update_description",update_description),
# ] | {
"pile_set_name": "Github"
} |
package com.nilhcem.fakesmtp.core.server;
import static org.junit.Assert.*;
import org.junit.Test;
import com.nilhcem.fakesmtp.core.exception.BindPortException;
import com.nilhcem.fakesmtp.core.exception.OutOfRangePortException;
import com.nilhcem.fakesmtp.server.SMTPServerHandler;
public class SMTPServerHandlerTest {
@Test
public void uniqueInstance() {
SMTPServerHandler a = SMTPServerHandler.INSTANCE;
SMTPServerHandler b = SMTPServerHandler.INSTANCE;
assertSame(a, b);
}
@Test(expected = OutOfRangePortException.class)
public void testOutOfRangePort() throws BindPortException, OutOfRangePortException {
SMTPServerHandler.INSTANCE.startServer(9999999, null);
}
@Test
public void stopShouldDoNothingIfServerIsAlreadyStopped() {
SMTPServerHandler.INSTANCE.stopServer();
SMTPServerHandler.INSTANCE.stopServer();
SMTPServerHandler.INSTANCE.stopServer();
}
}
| {
"pile_set_name": "Github"
} |
{**************************************************************************************************}
{ }
{ Last modified: $Date:: $ }
{ Revision: $Rev:: $ }
{ Author: $Author:: $ }
{ }
{**************************************************************************************************}
(*****************************************************************************
This IDL-file has been converted by "the fIDLer".
[written by -=Assarbad=- <oliver at assarbad dot net> Sept-2004] under MPL
Visit the fIDLer homepage at: http://assarbad.net/en/stuff/
{The 3 above lines should be retained}
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
NOTE:
There's no guarantee for correct case of parameter or variable types.
If you have a type like BLA_YADDA in IDL then fIDLer will have converted it
to 'TBlaYadda' already. But if the type identifier was BLAYADDA and both
BLA and YADDA being distinct words the result will not be correctly
capitalized!
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The original file was 'MSTask.Idl'
File converted: 2004-10-08@18:38:57
Cosmetics and review by:
2004-10-08 - Oliver Schneider <oliver at assarbad dot net>
Changes:
2004-11-15 - Scott Price <scottprice@users dot sourceforge dot net>
*****************************************************************************)
unit MSTask;
{$I jcl.inc}
{$I windowsonly.inc}
{$IFDEF SUPPORTS_WEAKPACKAGEUNIT}
{$IFDEF UNITVERSIONING}
{$WEAKPACKAGEUNIT OFF}
{$ELSE ~UNITVERSIONING}
{$WEAKPACKAGEUNIT ON}
{$ENDIF ~UNITVERSIONING}
{$ENDIF SUPPORTS_WEAKPACKAGEUNIT}
{$ALIGN ON}
{$MINENUMSIZE 4}
interface
uses
{$IFDEF UNITVERSIONING}
JclUnitVersioning,
{$ENDIF UNITVERSIONING}
{$IFDEF HAS_UNITSCOPE}
Winapi.ActiveX, Winapi.Windows;
{$ELSE ~HAS_UNITSCOPE}
ActiveX, Windows;
{$ENDIF ~HAS_UNITSCOPE}
//DOM-IGNORE-BEGIN
(*$HPPEMIT '#include <MSTask.h>' *)
//+----------------------------------------------------------------------------
//
// Task Scheduler
//
// Microsoft Windows
// Copyright (C) Microsoft Corporation, 1992 - 1999.
//
// File: mstask.idl
//
// Contents: ITaskTrigger, ITask, ITaskScheduler, IEnumWorkItems
// interfaces and related definitions
//
// History: 06-Sep-95 EricB created
//
//-----------------------------------------------------------------------------
// import "oaidl.idl";
// import "oleidl.idl";
// 148BD520-A2AB-11CE-B11F-00AA00530503 - Task object class ID
// 148BD52A-A2AB-11CE-B11F-00AA00530503 - Task Scheduler class ID
// A6B952F0-A4B1-11D0-997D-00AA006887EC - IScheduledWorkItem interface ID
// 148BD524-A2AB-11CE-B11F-00AA00530503 - ITask interface ID
// 148BD527-A2AB-11CE-B11F-00AA00530503 - ITaskScheduler interface ID
// 148BD528-A2AB-11CE-B11F-00AA00530503 - IEnumWorkItems interface ID
// 148BD52B-A2AB-11CE-B11F-00AA00530503 - ITaskTrigger interface ID
//+----------------------------------------------------------------------------
//
// Datatypes
//
//-----------------------------------------------------------------------------
const
{$EXTERNALSYM TASK_SUNDAY}
TASK_SUNDAY = $1;
const
{$EXTERNALSYM TASK_MONDAY}
TASK_MONDAY = $2;
const
{$EXTERNALSYM TASK_TUESDAY}
TASK_TUESDAY = $4;
const
{$EXTERNALSYM TASK_WEDNESDAY}
TASK_WEDNESDAY = $8;
const
{$EXTERNALSYM TASK_THURSDAY}
TASK_THURSDAY = $10;
const
{$EXTERNALSYM TASK_FRIDAY}
TASK_FRIDAY = $20;
const
{$EXTERNALSYM TASK_SATURDAY}
TASK_SATURDAY = $40;
const
{$EXTERNALSYM TASK_FIRST_WEEK}
TASK_FIRST_WEEK = 1;
const
{$EXTERNALSYM TASK_SECOND_WEEK}
TASK_SECOND_WEEK = 2;
const
{$EXTERNALSYM TASK_THIRD_WEEK}
TASK_THIRD_WEEK = 3;
const
{$EXTERNALSYM TASK_FOURTH_WEEK}
TASK_FOURTH_WEEK = 4;
const
{$EXTERNALSYM TASK_LAST_WEEK}
TASK_LAST_WEEK = 5;
const
{$EXTERNALSYM TASK_JANUARY}
TASK_JANUARY = $1;
const
{$EXTERNALSYM TASK_FEBRUARY}
TASK_FEBRUARY = $2;
const
{$EXTERNALSYM TASK_MARCH}
TASK_MARCH = $4;
const
{$EXTERNALSYM TASK_APRIL}
TASK_APRIL = $8;
const
{$EXTERNALSYM TASK_MAY}
TASK_MAY = $10;
const
{$EXTERNALSYM TASK_JUNE}
TASK_JUNE = $20;
const
{$EXTERNALSYM TASK_JULY}
TASK_JULY = $40;
const
{$EXTERNALSYM TASK_AUGUST}
TASK_AUGUST = $80;
const
{$EXTERNALSYM TASK_SEPTEMBER}
TASK_SEPTEMBER = $100;
const
{$EXTERNALSYM TASK_OCTOBER}
TASK_OCTOBER = $200;
const
{$EXTERNALSYM TASK_NOVEMBER}
TASK_NOVEMBER = $400;
const
{$EXTERNALSYM TASK_DECEMBER}
TASK_DECEMBER = $800;
const
{$EXTERNALSYM TASK_FLAG_INTERACTIVE}
TASK_FLAG_INTERACTIVE = $1;
const
{$EXTERNALSYM TASK_FLAG_DELETE_WHEN_DONE}
TASK_FLAG_DELETE_WHEN_DONE = $2;
const
{$EXTERNALSYM TASK_FLAG_DISABLED}
TASK_FLAG_DISABLED = $4;
const
{$EXTERNALSYM TASK_FLAG_START_ONLY_IF_IDLE}
TASK_FLAG_START_ONLY_IF_IDLE = $10;
const
{$EXTERNALSYM TASK_FLAG_KILL_ON_IDLE_END}
TASK_FLAG_KILL_ON_IDLE_END = $20;
const
{$EXTERNALSYM TASK_FLAG_DONT_START_IF_ON_BATTERIES}
TASK_FLAG_DONT_START_IF_ON_BATTERIES = $40;
const
{$EXTERNALSYM TASK_FLAG_KILL_IF_GOING_ON_BATTERIES}
TASK_FLAG_KILL_IF_GOING_ON_BATTERIES = $80;
const
{$EXTERNALSYM TASK_FLAG_RUN_ONLY_IF_DOCKED}
TASK_FLAG_RUN_ONLY_IF_DOCKED = $100;
const
{$EXTERNALSYM TASK_FLAG_HIDDEN}
TASK_FLAG_HIDDEN = $200;
const
{$EXTERNALSYM TASK_FLAG_RUN_IF_CONNECTED_TO_INTERNET}
TASK_FLAG_RUN_IF_CONNECTED_TO_INTERNET = $400;
const
{$EXTERNALSYM TASK_FLAG_RESTART_ON_IDLE_RESUME}
TASK_FLAG_RESTART_ON_IDLE_RESUME = $800;
const
{$EXTERNALSYM TASK_FLAG_SYSTEM_REQUIRED}
TASK_FLAG_SYSTEM_REQUIRED = $1000;
const
{$EXTERNALSYM TASK_FLAG_RUN_ONLY_IF_LOGGED_ON}
TASK_FLAG_RUN_ONLY_IF_LOGGED_ON = $2000;
const
{$EXTERNALSYM TASK_TRIGGER_FLAG_HAS_END_DATE}
TASK_TRIGGER_FLAG_HAS_END_DATE = $1;
const
{$EXTERNALSYM TASK_TRIGGER_FLAG_KILL_AT_DURATION_END}
TASK_TRIGGER_FLAG_KILL_AT_DURATION_END = $2;
const
{$EXTERNALSYM TASK_TRIGGER_FLAG_DISABLED}
TASK_TRIGGER_FLAG_DISABLED = $4;
//
// 1440 = 60 mins/hour * 24 hrs/day since a trigger/TASK could run all day at
// one minute intervals.
//
const
{$EXTERNALSYM TASK_MAX_RUN_TIMES}
TASK_MAX_RUN_TIMES: Integer = 1440;
//
// The TASK_TRIGGER_TYPE field of the TASK_TRIGGER structure determines
// which member of the TRIGGER_TYPE_UNION field to use.
//
type
{$EXTERNALSYM _TASK_TRIGGER_TYPE}
_TASK_TRIGGER_TYPE = (
{$EXTERNALSYM TASK_TIME_TRIGGER_ONCE}
TASK_TIME_TRIGGER_ONCE, // 0 // Ignore the Type field.
{$EXTERNALSYM TASK_TIME_TRIGGER_DAILY}
TASK_TIME_TRIGGER_DAILY, // 1 // Use DAILY
{$EXTERNALSYM TASK_TIME_TRIGGER_WEEKLY}
TASK_TIME_TRIGGER_WEEKLY, // 2 // Use WEEKLY
{$EXTERNALSYM TASK_TIME_TRIGGER_MONTHLYDATE}
TASK_TIME_TRIGGER_MONTHLYDATE, // 3 // Use MONTHLYDATE
{$EXTERNALSYM TASK_TIME_TRIGGER_MONTHLYDOW}
TASK_TIME_TRIGGER_MONTHLYDOW, // 4 // Use MONTHLYDOW
{$EXTERNALSYM TASK_EVENT_TRIGGER_ON_IDLE}
TASK_EVENT_TRIGGER_ON_IDLE, // 5 // Ignore the Type field.
{$EXTERNALSYM TASK_EVENT_TRIGGER_AT_SYSTEMSTART}
TASK_EVENT_TRIGGER_AT_SYSTEMSTART, // 6 // Ignore the Type field.
{$EXTERNALSYM TASK_EVENT_TRIGGER_AT_LOGON}
TASK_EVENT_TRIGGER_AT_LOGON // 7 // Ignore the Type field.
);
{$EXTERNALSYM TASK_TRIGGER_TYPE}
TASK_TRIGGER_TYPE = _TASK_TRIGGER_TYPE;
TTaskTriggerType = _TASK_TRIGGER_TYPE;
{$EXTERNALSYM PTASK_TRIGGER_TYPE}
PTASK_TRIGGER_TYPE = ^_TASK_TRIGGER_TYPE;
PTaskTriggerType = ^_TASK_TRIGGER_TYPE;
type
{$EXTERNALSYM _DAILY}
_DAILY = packed record
DaysInterval: WORD;
end;
{$EXTERNALSYM DAILY}
DAILY = _DAILY;
TDaily = _DAILY;
type
{$EXTERNALSYM _WEEKLY}
_WEEKLY = packed record
WeeksInterval: WORD;
rgfDaysOfTheWeek: WORD;
end;
{$EXTERNALSYM WEEKLY}
WEEKLY = _WEEKLY;
TWeekly = _WEEKLY;
type
{$EXTERNALSYM _MONTHLYDATE}
_MONTHLYDATE = packed record
rgfDays: DWORD;
rgfMonths: WORD;
end;
{$EXTERNALSYM MONTHLYDATE}
MONTHLYDATE = _MONTHLYDATE;
TMonthlyDate = _MONTHLYDATE; // OS: Changed capitalization
type
{$EXTERNALSYM _MONTHLYDOW}
_MONTHLYDOW = packed record
wWhichWeek: WORD;
rgfDaysOfTheWeek: WORD;
rgfMonths: WORD;
end;
{$EXTERNALSYM MONTHLYDOW}
MONTHLYDOW = _MONTHLYDOW;
TMonthlyDOW = _MONTHLYDOW; // OS: Changed capitalization
type
{$EXTERNALSYM _TRIGGER_TYPE_UNION}
_TRIGGER_TYPE_UNION = packed record
case Integer of
0: (Daily: DAILY);
1: (Weekly: WEEKLY);
2: (MonthlyDate: MONTHLYDATE);
3: (MonthlyDOW: MONTHLYDOW);
end;
{$EXTERNALSYM TRIGGER_TYPE_UNION}
TRIGGER_TYPE_UNION = _TRIGGER_TYPE_UNION;
TTriggerTypeUnion = _TRIGGER_TYPE_UNION;
type
{$EXTERNALSYM _TASK_TRIGGER}
_TASK_TRIGGER = record // SP: removed packed record statement as seemed to affect SetTrigger
cbTriggerSize: WORD; // Structure size.
Reserved1: WORD; // Reserved. Must be zero.
wBeginYear: WORD; // Trigger beginning date year.
wBeginMonth: WORD; // Trigger beginning date month.
wBeginDay: WORD; // Trigger beginning date day.
wEndYear: WORD; // Optional trigger ending date year.
wEndMonth: WORD; // Optional trigger ending date month.
wEndDay: WORD; // Optional trigger ending date day.
wStartHour: WORD; // Run bracket start time hour.
wStartMinute: WORD; // Run bracket start time minute.
MinutesDuration: DWORD; // Duration of run bracket.
MinutesInterval: DWORD; // Run bracket repetition interval.
rgFlags: DWORD; // Trigger flags.
TriggerType: TASK_TRIGGER_TYPE; // Trigger type.
Type_: TRIGGER_TYPE_UNION; // Trigger data.
Reserved2: WORD; // Reserved. Must be zero.
wRandomMinutesInterval: WORD; // Maximum number of random minutes
// after start time.
end;
{$EXTERNALSYM TASK_TRIGGER}
TASK_TRIGGER = _TASK_TRIGGER;
TTaskTrigger = _TASK_TRIGGER;
{$EXTERNALSYM PTASK_TRIGGER}
PTASK_TRIGGER = ^_TASK_TRIGGER;
PTaskTrigger = ^_TASK_TRIGGER;
//+----------------------------------------------------------------------------
//
// Interfaces
//
//-----------------------------------------------------------------------------
//+----------------------------------------------------------------------------
//
// Interface: ITaskTrigger
//
// Synopsis: Trigger object interface. A Task object may contain several
// of these.
//
//-----------------------------------------------------------------------------
// {148BD52B-A2AB-11CE-B11F-00AA00530503}
const
{$EXTERNALSYM IID_ITaskTrigger}
IID_ITaskTrigger: TIID = (D1: $148BD52B; D2: $A2AB; D3: $11CE; D4: ($B1, $1F, $00, $AA, $00, $53, $05, $03));
// interface ITaskTrigger;
type
{$EXTERNALSYM ITaskTrigger}
ITaskTrigger = interface(IUnknown)
['{148BD52B-A2AB-11CE-B11F-00AA00530503}']
// Methods:
function SetTrigger(const pTrigger: TTaskTrigger): HRESULT; stdcall;
(*| Parameter(s) was/were [CPP]: {in} const PTASK_TRIGGER pTrigger |*)
function GetTrigger(out pTrigger: TTaskTrigger): HRESULT; stdcall;
(*| Parameter(s) was/were [CPP]: {out} PTASK_TRIGGER pTrigger |*)
function GetTriggerString(out ppwszTrigger: LPWSTR): HRESULT; stdcall;
(*| Parameter(s) was/were [CPP]: {out} LPWSTR * ppwszTrigger |*)
end;
//+----------------------------------------------------------------------------
//
// Interface: IScheduledWorkItem
//
// Synopsis: Abstract base class for any runnable work item that can be
// scheduled by the task scheduler.
//
//-----------------------------------------------------------------------------
// {a6b952f0-a4b1-11d0-997d-00aa006887ec}
const
{$EXTERNALSYM IID_IScheduledWorkItem}
IID_IScheduledWorkItem: TIID = (D1: $A6B952F0; D2: $A4B1; D3: $11D0; D4: ($99, $7D, $00, $AA, $00, $68, $87, $EC));
// interface IScheduledWorkItem;
type
{$EXTERNALSYM IScheduledWorkItem}
IScheduledWorkItem = interface(IUnknown)
['{A6B952F0-A4B1-11D0-997D-00AA006887EC}']
// Methods concerning scheduling:
function CreateTrigger(out piNewTrigger: WORD; out ppTrigger: ITaskTrigger): HRESULT; stdcall;
(*| Parameter(s) was/were [CPP]: {out} WORD * piNewTrigger, {out} ITaskTrigger ** ppTrigger |*)
function DeleteTrigger(iTrigger: WORD): HRESULT; stdcall;
(*| Parameter(s) was/were [CPP]: {in} WORD iTrigger |*)
function GetTriggerCount(out pwCount: WORD): HRESULT; stdcall;
(*| Parameter(s) was/were [CPP]: {out} WORD * pwCount |*)
function GetTrigger(iTrigger: WORD; out ppTrigger: ITaskTrigger): HRESULT; stdcall;
(*| Parameter(s) was/were [CPP]: {in} WORD iTrigger, {out} ITaskTrigger ** ppTrigger |*)
function GetTriggerString(iTrigger: WORD; out ppwszTrigger: LPWSTR): HRESULT; stdcall;
(*| Parameter(s) was/were [CPP]: {in} WORD iTrigger, {out} LPWSTR * ppwszTrigger |*)
function GetRunTimes(pstBegin: PSystemTime; pstEnd: PSystemTime; var pCount: WORD; out rgstTaskTimes: PSystemTime): HRESULT; stdcall;
(*| Parameter(s) was/were [CPP]: {in} const LPSYSTEMTIME pstBegin, {in} const LPSYSTEMTIME pstEnd, {in; out} WORD * pCount, {out} LPSYSTEMTIME * rgstTaskTimes |*)
function GetNextRunTime(var pstNextRun: SYSTEMTIME): HRESULT; stdcall;
(*| Parameter(s) was/were [CPP]: {in; out} SYSTEMTIME * pstNextRun |*)
function SetIdleWait(wIdleMinutes: WORD; wDeadlineMinutes: WORD): HRESULT; stdcall;
(*| Parameter(s) was/were [CPP]: {in} WORD wIdleMinutes, {in} WORD wDeadlineMinutes |*)
function GetIdleWait(out pwIdleMinutes: WORD; out pwDeadlineMinutes: WORD): HRESULT; stdcall;
(*| Parameter(s) was/were [CPP]: {out} WORD * pwIdleMinutes, {out} WORD * pwDeadlineMinutes |*)
// Other methods:
function Run(): HRESULT; stdcall;
function Terminate(): HRESULT; stdcall;
function EditWorkItem(hParent: HWND; dwReserved: DWORD): HRESULT; stdcall;
(*| Parameter(s) was/were [CPP]: {in} HWND hParent, {in} DWORD dwReserved |*)
function GetMostRecentRunTime(out pstLastRun: SYSTEMTIME): HRESULT; stdcall;
(*| Parameter(s) was/were [CPP]: {out} SYSTEMTIME * pstLastRun |*)
function GetStatus(out phrStatus: HRESULT): HRESULT; stdcall;
(*| Parameter(s) was/were [CPP]: {out} HRESULT * phrStatus |*)
function GetExitCode(out pdwExitCode: DWORD): HRESULT; stdcall;
(*| Parameter(s) was/were [CPP]: {out} DWORD * pdwExitCode |*)
// Properties:
function SetComment(pwszComment: LPCWSTR): HRESULT; stdcall;
(*| Parameter(s) was/were [CPP]: {in} LPCWSTR pwszComment |*)
function GetComment(out ppwszComment: LPWSTR): HRESULT; stdcall;
(*| Parameter(s) was/were [CPP]: {out} LPWSTR * ppwszComment |*)
function SetCreator(pwszCreator: LPCWSTR): HRESULT; stdcall;
(*| Parameter(s) was/were [CPP]: {in} LPCWSTR pwszCreator |*)
function GetCreator(out ppwszCreator: LPWSTR): HRESULT; stdcall;
(*| Parameter(s) was/were [CPP]: {out} LPWSTR * ppwszCreator |*)
function SetWorkItemData(cbData: WORD; rgbData: PByte): HRESULT; stdcall;
(*| Parameter(s) was/were [CPP]: {in} WORD cbData, {in} BYTE rgbData[] |*)
function GetWorkItemData(out pcbData: WORD; out prgbData: PByte): HRESULT; stdcall;
(*| Parameter(s) was/were [CPP]: {out} WORD * pcbData, {out} BYTE ** prgbData |*)
function SetErrorRetryCount(wRetryCount: WORD): HRESULT; stdcall;
(*| Parameter(s) was/were [CPP]: {in} WORD wRetryCount |*)
function GetErrorRetryCount(out pwRetryCount: WORD): HRESULT; stdcall;
(*| Parameter(s) was/were [CPP]: {out} WORD * pwRetryCount |*)
function SetErrorRetryInterval(wRetryInterval: WORD): HRESULT; stdcall;
(*| Parameter(s) was/were [CPP]: {in} WORD wRetryInterval |*)
function GetErrorRetryInterval(out pwRetryInterval: WORD): HRESULT; stdcall;
(*| Parameter(s) was/were [CPP]: {out} WORD * pwRetryInterval |*)
function SetFlags(dwFlags: DWORD): HRESULT; stdcall;
(*| Parameter(s) was/were [CPP]: {in} DWORD dwFlags |*)
function GetFlags(out pdwFlags: DWORD): HRESULT; stdcall;
(*| Parameter(s) was/were [CPP]: {out} DWORD * pdwFlags |*)
function SetAccountInformation(pwszAccountName: LPCWSTR; pwszPassword: LPCWSTR): HRESULT; stdcall;
(*| Parameter(s) was/were [CPP]: {in} LPCWSTR pwszAccountName, {in} LPCWSTR pwszPassword |*)
function GetAccountInformation(out ppwszAccountName: LPWSTR): HRESULT; stdcall;
(*| Parameter(s) was/were [CPP]: {out} LPWSTR * ppwszAccountName |*)
end;
//+----------------------------------------------------------------------------
//
// Interface: ITask
//
// Synopsis: Task object interface. The primary means of task object
// manipulation.
//
//-----------------------------------------------------------------------------
// {148BD524-A2AB-11CE-B11F-00AA00530503}
const
{$EXTERNALSYM IID_ITask}
IID_ITask: TIID = (D1: $148BD524; D2: $A2AB; D3: $11CE; D4: ($B1, $1F, $00, $AA, $00, $53, $05, $03));
// interface ITask;
type
{$EXTERNALSYM ITask}
ITask = interface(IScheduledWorkItem)
['{148BD524-A2AB-11CE-B11F-00AA00530503}']
// Properties that correspond to parameters of CreateProcess:
function SetApplicationName(pwszApplicationName: LPCWSTR): HRESULT; stdcall;
(*| Parameter(s) was/were [CPP]: {in} LPCWSTR pwszApplicationName |*)
function GetApplicationName(out ppwszApplicationName: LPWSTR): HRESULT; stdcall;
(*| Parameter(s) was/were [CPP]: {out} LPWSTR * ppwszApplicationName |*)
function SetParameters(pwszParameters: LPCWSTR): HRESULT; stdcall;
(*| Parameter(s) was/were [CPP]: {in} LPCWSTR pwszParameters |*)
function GetParameters(out ppwszParameters: LPWSTR): HRESULT; stdcall;
(*| Parameter(s) was/were [CPP]: {out} LPWSTR * ppwszParameters |*)
function SetWorkingDirectory(pwszWorkingDirectory: LPCWSTR): HRESULT; stdcall;
(*| Parameter(s) was/were [CPP]: {in} LPCWSTR pwszWorkingDirectory |*)
function GetWorkingDirectory(out ppwszWorkingDirectory: LPWSTR): HRESULT; stdcall;
(*| Parameter(s) was/were [CPP]: {out} LPWSTR * ppwszWorkingDirectory |*)
function SetPriority(dwPriority: DWORD): HRESULT; stdcall;
(*| Parameter(s) was/were [CPP]: {in} DWORD dwPriority |*)
function GetPriority(out pdwPriority: DWORD): HRESULT; stdcall;
(*| Parameter(s) was/were [CPP]: {out} DWORD * pdwPriority |*)
// Other properties:
function SetTaskFlags(dwFlags: DWORD): HRESULT; stdcall;
(*| Parameter(s) was/were [CPP]: {in} DWORD dwFlags |*)
function GetTaskFlags(out pdwFlags: DWORD): HRESULT; stdcall;
(*| Parameter(s) was/were [CPP]: {out} DWORD * pdwFlags |*)
function SetMaxRunTime(dwMaxRunTimeMS: DWORD): HRESULT; stdcall;
(*| Parameter(s) was/were [CPP]: {in} DWORD dwMaxRunTimeMS |*)
function GetMaxRunTime(out pdwMaxRunTimeMS: DWORD): HRESULT; stdcall;
(*| Parameter(s) was/were [CPP]: {out} DWORD * pdwMaxRunTimeMS |*)
end;
//+----------------------------------------------------------------------------
//
// Interface: IEnumWorkItems
//
// Synopsis: Work item object enumerator. Enumerates the work item objects
// within the Tasks folder.
//
//-----------------------------------------------------------------------------
// {148BD528-A2AB-11CE-B11F-00AA00530503}
const
{$EXTERNALSYM IID_IEnumWorkItems}
IID_IEnumWorkItems: TIID = (D1: $148BD528; D2: $A2AB; D3: $11CE; D4: ($B1, $1F, $00, $AA, $00, $53, $05, $03));
// interface IEnumWorkItems;
type
{$EXTERNALSYM IEnumWorkItems}
IEnumWorkItems = interface(IUnknown)
['{148BD528-A2AB-11CE-B11F-00AA00530503}']
// Methods:
function Next(celt: ULONG; out rgpwszNames: PLPWSTR; out pceltFetched: ULONG): HRESULT; stdcall;
(*| Parameter(s) was/were [CPP]: {in} ULONG celt, {out} LPWSTR ** rgpwszNames, {out} ULONG * pceltFetched |*)
function Skip(celt: ULONG): HRESULT; stdcall;
(*| Parameter(s) was/were [CPP]: {in} ULONG celt |*)
function Reset(): HRESULT; stdcall;
function Clone(out ppEnumWorkItems: IEnumWorkItems): HRESULT; stdcall;
(*| Parameter(s) was/were [CPP]: {out} IEnumWorkItems ** ppEnumWorkItems |*)
end;
//+----------------------------------------------------------------------------
//
// Interface: ITaskScheduler
//
// Synopsis: Task Scheduler interface. Provides location transparent
// manipulation of task and/or queue objects within the Tasks
// folder.
//
//-----------------------------------------------------------------------------
// {148BD527-A2AB-11CE-B11F-00AA00530503}
const
{$EXTERNALSYM IID_ITaskScheduler}
IID_ITaskScheduler: TIID = (D1: $148BD527; D2: $A2AB; D3: $11CE; D4: ($B1, $1F, $00, $AA, $00, $53, $05, $03));
// interface ITaskScheduler;
type
{$EXTERNALSYM ITaskScheduler}
ITaskScheduler = interface(IUnknown)
['{148BD527-A2AB-11CE-B11F-00AA00530503}']
// Methods:
function SetTargetComputer(pwszComputer: LPCWSTR): HRESULT; stdcall;
(*| Parameter(s) was/were [CPP]: {in} LPCWSTR pwszComputer |*)
function GetTargetComputer(out ppwszComputer: LPWSTR): HRESULT; stdcall;
(*| Parameter(s) was/were [CPP]: {out} LPWSTR * ppwszComputer |*)
function Enum(out ppEnumWorkItems: IEnumWorkItems): HRESULT; stdcall;
(*| Parameter(s) was/were [CPP]: {out} IEnumWorkItems ** ppEnumWorkItems |*)
function Activate(pwszName: LPCWSTR; const riid: TIID; out ppUnk: IUnknown): HRESULT; stdcall;
(*| Parameter(s) was/were [CPP]: {in} LPCWSTR pwszName, {in} REFIID riid, {out} IUnknown ** ppUnk |*)
function Delete(pwszName: LPCWSTR): HRESULT; stdcall;
(*| Parameter(s) was/were [CPP]: {in} LPCWSTR pwszName |*)
function NewWorkItem(pwszTaskName: LPCWSTR; const rclsid: TCLSID; const riid: TIID; out ppUnk: IUnknown): HRESULT; stdcall;
(*| Parameter(s) was/were [CPP]: {in} LPCWSTR pwszTaskName, {in} REFCLSID rclsid, {in} REFIID riid, {out} IUnknown ** ppUnk |*)
function AddWorkItem(pwszTaskName: LPCWSTR; const pWorkItem: IScheduledWorkItem): HRESULT; stdcall;
(*| Parameter(s) was/were [CPP]: {in} LPCWSTR pwszTaskName, {in} IScheduledWorkItem * pWorkItem |*)
function IsOfType(pwszName: LPCWSTR; const riid: TIID): HRESULT; stdcall;
(*| Parameter(s) was/were [CPP]: {in} LPCWSTR pwszName, {in} REFIID riid |*)
end;
// EXTERN_C const CLSID CLSID_CTask;
// EXTERN_C const CLSID CLSID_CTaskScheduler;
// {148BD520-A2AB-11CE-B11F-00AA00530503}
const
{$EXTERNALSYM CLSID_CTask}
CLSID_CTask: TCLSID = (D1: $148BD520; D2: $A2AB; D3: $11CE; D4: ($B1, $1F, $00, $AA, $00, $53, $05, $03));
// {148BD52A-A2AB-11CE-B11F-00AA00530503}
const
{$EXTERNALSYM CLSID_CTaskScheduler}
CLSID_CTaskScheduler: TCLSID = (D1: $148BD52A; D2: $A2AB; D3: $11CE; D4: ($B1, $1F, $00, $AA, $00, $53, $05, $03));
//
// NOTE: Definition of HPROPSHEETPAGE is from sdk\inc\prsht.h
// Including this header file causes numerous redefinition errors.
//
type
{$EXTERNALSYM _PSP}
_PSP = record end;
{$IFNDEF FPC}
type
{$EXTERNALSYM HPROPSHEETPAGE}
HPROPSHEETPAGE = ^_PSP;
{$ENDIF ~FPC}
type
{$EXTERNALSYM _TASKPAGE}
_TASKPAGE = (
{$EXTERNALSYM TASKPAGE_TASK}
TASKPAGE_TASK, // 0
{$EXTERNALSYM TASKPAGE_SCHEDULE}
TASKPAGE_SCHEDULE, // 1
{$EXTERNALSYM TASKPAGE_SETTINGS}
TASKPAGE_SETTINGS // 2
);
{$EXTERNALSYM TASKPAGE}
TASKPAGE = _TASKPAGE;
TTaskPage = _TASKPAGE; // OS: Changed capitalization
//+----------------------------------------------------------------------------
//
// Interface: IProvideTaskPage
//
// Synopsis: Task property page retrieval interface. With this interface,
// it is possible to retrieve one or more property pages
// associated with a task object. Task objects inherit this
// interface.
//
//-----------------------------------------------------------------------------
// {4086658a-cbbb-11cf-b604-00c04fd8d565}
const
{$EXTERNALSYM IID_IProvideTaskPage}
IID_IProvideTaskPage: TIID = (D1: $4086658A; D2: $CBBB; D3: $11CF; D4: ($B6, $04, $00, $C0, $4F, $D8, $D5, $65));
// interface IProvideTaskPage;
type
{$EXTERNALSYM IProvideTaskPage}
IProvideTaskPage = interface(IUnknown)
['{4086658A-CBBB-11CF-B604-00C04FD8D565}']
// Methods:
function GetPage(tpType: TTaskPage; fPersistChanges: BOOL; out phPage: HPROPSHEETPAGE): HRESULT; stdcall; // OS: Changed TASKPAGE to TTaskPage
(*| Parameter(s) was/were [CPP]: {in} TASKPAGE tpType, {in} BOOL fPersistChanges, {out} HPROPSHEETPAGE * phPage |*)
end;
type
{$EXTERNALSYM ISchedulingAgent}
ISchedulingAgent = ITaskScheduler;
type
{$EXTERNALSYM IEnumTasks}
IEnumTasks = IEnumWorkItems;
const
{$EXTERNALSYM IID_ISchedulingAgent}
IID_ISchedulingAgent: TIID = (D1: $148BD527; D2: $A2AB; D3: $11CE; D4: ($B1, $1F, $00, $AA, $00, $53, $05, $03));
const
{$EXTERNALSYM CLSID_CSchedulingAgent}
CLSID_CSchedulingAgent: TCLSID = (D1: $148BD52A; D2: $A2AB; D3: $11CE; D4: ($B1, $1F, $00, $AA, $00, $53, $05, $03));
//DOM-IGNORE-END
{$IFDEF UNITVERSIONING}
const
UnitVersioning: TUnitVersionInfo = (
RCSfile: '$URL$';
Revision: '$Revision$';
Date: '$Date$';
LogPath: 'JCL\source\windows';
Extra: '';
Data: nil
);
{$ENDIF UNITVERSIONING}
implementation
{$IFDEF UNITVERSIONING}
initialization
RegisterUnitVersion(HInstance, UnitVersioning);
finalization
UnregisterUnitVersion(HInstance);
{$ENDIF UNITVERSIONING}
end.
| {
"pile_set_name": "Github"
} |
/**
* Lo-Dash 2.4.1 (Custom Build) <http://lodash.com/>
* Build: `lodash modularize underscore exports="node" -o ./underscore/`
* Copyright 2012-2013 The Dojo Foundation <http://dojofoundation.org/>
* Based on Underscore.js 1.5.2 <http://underscorejs.org/LICENSE>
* Copyright 2009-2013 Jeremy Ashkenas, DocumentCloud and Investigative Reporters & Editors
* Available under MIT license <http://lodash.com/license>
*/
var createCallback = require('../functions/createCallback'),
slice = require('../internals/slice');
/* Native method shortcuts for methods with the same name as other `lodash` methods */
var nativeMax = Math.max,
nativeMin = Math.min;
/**
* Gets all but the last element or last `n` elements of an array. If a
* callback is provided elements at the end of the array are excluded from
* the result as long as the callback returns truey. The callback is bound
* to `thisArg` and invoked with three arguments; (value, index, array).
*
* If a property name is provided for `callback` the created "_.pluck" style
* callback will return the property value of the given element.
*
* If an object is provided for `callback` the created "_.where" style callback
* will return `true` for elements that have the properties of the given object,
* else `false`.
*
* @static
* @memberOf _
* @category Arrays
* @param {Array} array The array to query.
* @param {Function|Object|number|string} [callback=1] The function called
* per element or the number of elements to exclude. If a property name or
* object is provided it will be used to create a "_.pluck" or "_.where"
* style callback, respectively.
* @param {*} [thisArg] The `this` binding of `callback`.
* @returns {Array} Returns a slice of `array`.
* @example
*
* _.initial([1, 2, 3]);
* // => [1, 2]
*
* _.initial([1, 2, 3], 2);
* // => [1]
*
* _.initial([1, 2, 3], function(num) {
* return num > 1;
* });
* // => [1]
*
* var characters = [
* { 'name': 'barney', 'blocked': false, 'employer': 'slate' },
* { 'name': 'fred', 'blocked': true, 'employer': 'slate' },
* { 'name': 'pebbles', 'blocked': true, 'employer': 'na' }
* ];
*
* // using "_.pluck" callback shorthand
* _.initial(characters, 'blocked');
* // => [{ 'name': 'barney', 'blocked': false, 'employer': 'slate' }]
*
* // using "_.where" callback shorthand
* _.pluck(_.initial(characters, { 'employer': 'na' }), 'name');
* // => ['barney', 'fred']
*/
function initial(array, callback, thisArg) {
var n = 0,
length = array ? array.length : 0;
if (typeof callback != 'number' && callback != null) {
var index = length;
callback = createCallback(callback, thisArg, 3);
while (index-- && callback(array[index], index, array)) {
n++;
}
} else {
n = (callback == null || thisArg) ? 1 : callback || n;
}
return slice(array, 0, nativeMin(nativeMax(0, length - n), length));
}
module.exports = initial;
| {
"pile_set_name": "Github"
} |
{"type":"newline","line":2,"col":1}
{"type":"extends","line":2,"col":1}
{"type":"path","line":2,"col":9,"val":"auxiliary/dialog.pug"}
{"type":"newline","line":4,"col":1}
{"type":"block","line":4,"col":1,"val":"content","mode":"replace"}
{"type":"indent","line":5,"col":1,"val":2}
{"type":"tag","line":5,"col":3,"val":"h1"}
{"type":"text","line":5,"col":6,"val":"Alert!"}
{"type":"newline","line":6,"col":1}
{"type":"tag","line":6,"col":3,"val":"p"}
{"type":"text","line":6,"col":5,"val":"I'm an alert!"}
{"type":"outdent","line":7,"col":1}
{"type":"eos","line":7,"col":1} | {
"pile_set_name": "Github"
} |
# Getting Started with kops on OpenStack
OpenStack support on kops is currently **beta**, which means that OpenStack support is in good shape and could be used for production. However, it is not as rigorously tested as the stable cloud providers and there are some features not supported. In particular, kops tries to support a wide variety of OpenStack setups and not all of them are equally well tested.
## OpenStack requirements
In order to deploy a kops-managed clustr on OpenStack, you need the following OpenStack servces:
* Nova (compute)
* Neutron (networking)
* Glance (image)
* Cinder (block storage)
In addition, kops can make use of the following services:
* Swift (object store)
* Dvelve (dns)
* Octavia (loadbalancer)
The OpenStack version should be Ocata or newer.
## Source your openstack RC
The Cloud Config used by the kubernetes API server and kubelet will be constructed from environment variables in the openstack RC file.
```bash
source openstack.rc
```
We recommend using [Application Credentials](https://docs.openstack.org/keystone/queens/user/application_credentials.html) when authenticating to OpenStack.
**Note** The authentication used locally will be exported to your cluster and used by the kubernetes controller components. You must avoid using personal credentials used for other systems,
## Environment Variables
kops stores its configuration in a state store. Before creating a cluster, we need to export the path to the state store:
```bash
export KOPS_STATE_STORE=swift://<bucket-name> # where <bucket-name> is the name of the Swift container to use for kops state
```
If your OpenStack does not have Swift you can use any other VFS store, such as S3. See the [state store documentation](../state.md) for alternatives.
## Creating a Cluster
```bash
# to see your etcd storage type
openstack volume type list
# coreos (the default) + flannel overlay cluster in Default
kops create cluster \
--cloud openstack \
--name my-cluster.k8s.local \
--state ${KOPS_STATE_STORE} \
--zones nova \
--network-cidr 10.0.0.0/24 \
--image <imagename> \
--master-count=3 \
--node-count=1 \
--node-size <flavorname> \
--master-size <flavorname> \
--etcd-storage-type <volumetype> \
--api-loadbalancer-type public \
--topology private \
--bastion \
--ssh-public-key ~/.ssh/id_rsa.pub \
--networking weave \
--os-ext-net <externalnetworkname>
# to update a cluster
kops update cluster my-cluster.k8s.local --state ${KOPS_STATE_STORE} --yes
# to delete a cluster
kops delete cluster my-cluster.k8s.local --yes
```
## Optional flags
* `--os-kubelet-ignore-az=true` Nova and Cinder have different availability zones, more information [Kubernetes docs](https://kubernetes.io/docs/concepts/cluster-administration/cloud-providers/#block-storage)
* `--os-octavia=true` If Octavia Loadbalancer api should be used instead of old lbaas v2 api.
* `--os-dns-servers=8.8.8.8,8.8.4.4` You can define dns servers to be used in your cluster if your openstack setup does not have working dnssetup by default
## Compute and volume zone names does not match
Some of the openstack users do not have compute zones named exactly the same than volume zones. Good example is that there are several compute zones for instance `zone-1`, `zone-2` and `zone-3`. Then there is only one volumezone which is usually called `nova`. By default this is problem in kops, because kops assumes that if you are deploying things to `zone-1` there should be compute and volume zone called `zone-1`.
However, you can still get kops working in your openstack by doing following:
Create cluster using your compute zones:
```bash
kops create cluster \
--zones zone-1,zone-2,zone-3 \
...
```
After you have initialized the configuration you need to edit configuration
```bash
kops edit cluster my-cluster.k8s.local
```
Edit `ignore-volume-az` to `true` and `override-volume-az` according to your cinder az name.
Example (volume zone is called `nova`):
```yaml
spec:
cloudConfig:
openstack:
blockStorage:
ignore-volume-az: true
override-volume-az: nova
```
**Finally execute update cluster**
```bash
kops update cluster my-cluster.k8s.local --state ${KOPS_STATE_STORE} --yes
```
Kops should create instances to all three zones, but provision volumes from the same zone.
# Using external cloud controller manager
If you want use [External CCM](https://github.com/kubernetes/cloud-provider-openstack) in your installation, this section contains instructions what you should do to get it up and running.
Create cluster without `--yes` flag (or modify existing cluster):
```bash
kops edit cluster <cluster>
```
Add following to clusterspec:
```yaml
spec:
cloudControllerManager: {}
```
Finally
```bash
kops update cluster --name <cluster> --yes
```
# Using OpenStack without lbaas
Some OpenStack installations does not include installation of lbaas component. To launch a cluster without a loadbalancer, run:
```bash
kops create cluster \
--cloud openstack \
... (like usually)
--api-loadbalancer-type=""
```
In clusters without loadbalancer, the address of a single random master will be added to your kube config.
# Using existing OpenStack network
You can have kops reuse existing network components instead of provisioning one per cluster. As OpenStack support is still beta, we recommend you take extra care when deleting clusters and ensure that kops do not try to remove any resources not belonging to the cluster.
## Let kops provision new subnets within an existing network
Use an existing network by using `--network <network id>`.
If you are provisioning the cluster from a spec file, add the network ID as follows:
```yaml
spec:
networkID: <network id>
```
## Use existing networks
Instead of kops creating new subnets for the cluster, you can reuse an existing subnet.
When you create a new cluster, you can specify subnets using the `--subnets` and `--utility-subnets` flags.
## Example
```bash
kops create cluster \
--cloud openstack \
--name sharedsub2.k8s.local \
--state ${KOPS_STATE_STORE} \
--zones zone-1 \
--network-cidr 10.1.0.0/16 \
--image debian-10-160819-devops \
--master-count=3 \
--node-count=2 \
--node-size m1.small \
--master-size m1.small \
--etcd-storage-type default \
--topology private \
--bastion \
--networking calico \
--api-loadbalancer-type public \
--os-kubelet-ignore-az=true \
--os-ext-net ext-net \
--subnets c7d20c0f-df3a-4e5b-842f-f633c182961f \
--utility-subnets 90871d21-b546-4c4a-a7c9-2337ddf5375f \
--os-octavia=true --yes
```
# Using with self-signed certificates in OpenStack
Kops can be configured to use insecure mode towards OpenStack. However, this is not recommended as OpenStack cloudprovider in kubernetes does not support it.
If you use insecure flag in kops it might be that the cluster does not work correctly.
```yaml
spec:
cloudConfig:
openstack:
insecureSkipVerify: true
```
# Next steps
Now that you have a working _kops_ cluster, read through the [recommendations for production setups guide](production.md) to learn more about how to configure _kops_ for production workloads. | {
"pile_set_name": "Github"
} |
<Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<TargetFramework>netcoreapp2.1</TargetFramework>
<GenerateRuntimeConfigurationFiles>true</GenerateRuntimeConfigurationFiles>
</PropertyGroup>
<ItemGroup>
<PackageReference Include="Amazon.Lambda.Core" Version="1.0.0" />
<PackageReference Include="Amazon.Lambda.APIGatewayEvents" Version="1.1.3" />
<PackageReference Include="Amazon.Lambda.Serialization.Json" Version="1.4.0" />
<PackageReference Include="Newtonsoft.Json" Version="11.0.2" />
</ItemGroup>
</Project>
| {
"pile_set_name": "Github"
} |
body: //div[@id="bloc_actu"]/parent::*
title: //div[@id="content"]/h1[1]
test_url: http://www.autoactu.com/thomas-owsianski-nomme-president-d-audi-chine.shtml
| {
"pile_set_name": "Github"
} |
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName is the group name use in this package
const GroupName = "client.authentication.k8s.io"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
SchemeBuilder runtime.SchemeBuilder
localSchemeBuilder = &SchemeBuilder
AddToScheme = localSchemeBuilder.AddToScheme
)
func init() {
// We only register manually written functions here. The registration of the
// generated functions takes place in the generated files. The separation
// makes the code compile even when the generated files are missing.
localSchemeBuilder.Register(addKnownTypes)
}
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&ExecCredential{},
)
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
return nil
}
| {
"pile_set_name": "Github"
} |
更多代码请见:https://github.com/xubo245/SparkLearning
Spark生态之Alluxio学习 版本:alluxio(tachyon) 0.7.1,spark-1.5.2,hadoop-2.6.0
# 1.问题描述 #
window下打开hdfs,会出现问题:hdfs常配置了hosts,访问或者下载hdfs的文件时(通过浏览器),回出现跳转的是hostname,而不是ip,而window下没有配置hosts,所以访问不了,需要手动设置ip,麻烦
# 2.解决办法: #
设置window的hosts
修改C:\Windows\System32\drivers\etc下的hosts文件
在
# 127.0.0.1 localhost
# ::1 localhost
后加入自己的host关联
# 3.运行记录: #
可以访问集群并且下载文件:
http://Master:50070/webhdfs/v1/xubo/project/SparkSW/output/time/20161106204334687SparkSW_queryFile_0P18691.file_dbFile_D1Line.fasta_splitNum_32_taskNum_1_file/part-00000?op=OPEN
参考
【1】http://spark.apache.org/docs/1.5.2/programming-guide.html
【2】https://github.com/xubo245/SparkLearning
| {
"pile_set_name": "Github"
} |
#!/bin/bash
set -ev; # stop on error
echo "Installing dependencies required for tests in codegens/java-okhttp"
pushd ./codegens/java-okhttp &>/dev/null;
sudo add-apt-repository ppa:openjdk-r/ppa -y
sudo rm -rf /var/lib/apt/lists/*
sudo apt-get update
sudo apt-get install -y openjdk-8-jdk
unzip test/unit/fixtures/dependencies.zip
popd &>/dev/null;
echo "Installing dependencies required for tests in codegens/java-unirest"
pushd ./codegens/java-unirest &>/dev/null;
unzip test/unit/fixtures/dependencies.zip
popd &>/dev/null;
echo "Installing dependencies required for tests in codegens/csharp-restsharp"
pushd ./codegens/csharp-restsharp &>/dev/null;
wget -q https://packages.microsoft.com/config/ubuntu/16.04/packages-microsoft-prod.deb -O packages-microsoft-prod.deb
sudo dpkg -i packages-microsoft-prod.deb
sudo apt-get install apt-transport-https
sudo apt-get update
sudo apt-get install dotnet-sdk-2.2
dotnet new console -o testProject
pushd ./testProject &>/dev/null;
dotnet add package RestSharp
popd &>/dev/null;
popd &>/dev/null;
echo "Installing dependencies required for tests in codegens/php-httprequest2"
pear install HTTP_Request2-2.3.0
echo "Installing dependencies required for tests in codegens/swift"
pushd ./codegens/swift &>/dev/null;
sudo apt-get update
sudo apt-get install clang-3.6 libicu-dev libpython2.7 -y
sudo apt-get install libcurl3 libpython2.7-dev -y
sudo wget https://swift.org/builds/swift-5.0.1-release/ubuntu1604/swift-5.0.1-RELEASE/swift-5.0.1-RELEASE-ubuntu16.04.tar.gz
sudo tar xzf swift-5.0.1-RELEASE-ubuntu16.04.tar.gz
sudo chmod 777 swift-5.0.1-RELEASE-ubuntu16.04/usr/lib/swift/CoreFoundation/module.map
popd &>/dev/null;
echo "Installing dependencies required for tests in codegens/csharp-restsharp"
sudo apt-get install -y mono-complete
echo "Installing dependencies required for tests in codegens/shell-httpie"
sudo apt-get install httpie
| {
"pile_set_name": "Github"
} |
//
// NSArray+zh_SafeAccess.m
// zhPopupController
//
// Created by zhanghao on 2017/9/15.
// Copyright © 2017年 snail-z. All rights reserved.
//
#import "NSArray+zh_SafeAccess.h"
@implementation NSArray (zh_SafeAccess)
- (NSUInteger)zh_indexOfObject:(id)anObject {
NSParameterAssert(self.count);
if ([self containsObject:anObject]) {
return [self indexOfObject:anObject];
}
return 0;
}
@end
| {
"pile_set_name": "Github"
} |
#include "Gui/Menu.h"
#include "Gui/LineEdit.h"
#include "Gui/SpinBox.h"
#include "Gui/Button.h"
#include "Gui/ticks.h"
#include "Gui/CurveEditor.h"
#include "Gui/TextRenderer.h"
#include "Gui/CurveEditorUndoRedo.h"
#include "Gui/DopeSheetEditor.h"
#include "Gui/KnobGui.h"
#include "Gui/SequenceFileDialog.h"
#include "Gui/ZoomContext.h"
#include "Gui/TabWidget.h"
#include "Gui/Gui.h"
#include "Gui/GuiMacros.h"
#include "Gui/ActionShortcuts.h"
#include "Gui/GuiApplicationManager.h"
#include "Gui/ViewerGL.h"
#include "Gui/ViewerTab.h"
#include "Gui/NodeGraph.h"
#include "Gui/Histogram.h"
#include "Gui/GuiAppInstance.h"
#include "Gui/CurveSelection.h"
#include "Gui/Label.h"
#include "Gui/PythonPanels.h"
| {
"pile_set_name": "Github"
} |
Feature: Feature executed in parallel
Background:
Given bg_1_parallel
When bg_2_parallel
Then bg_3_parallel
Scenario: Scenario_1
Given step_1_parallel
When step_2_parallel
Then step_3_parallel
Then cliché_parallel
Scenario Outline: ScenarioOutline_1_parallel
Given so_1 <a>_parallel
When so_2 <c> cucumbers_parallel
Then <b> so_3_parallel
Examples:
| a | b | c |
| 12 | 5 | 7 |
| 20 | 5 | 15 |
Scenario: Scenario_2
Given a_parallel
Then b_parallel
When c_parallel
| {
"pile_set_name": "Github"
} |
/*
* (C) 2007-2012 Alibaba Group Holding Limited.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.taobao.gecko.core.core.impl;
import java.nio.ByteBuffer;
import java.nio.CharBuffer;
import java.nio.charset.Charset;
import com.taobao.gecko.core.buffer.IoBuffer;
import com.taobao.gecko.core.core.CodecFactory;
import com.taobao.gecko.core.core.Session;
import com.taobao.gecko.core.util.ByteBufferMatcher;
import com.taobao.gecko.core.util.ShiftAndByteBufferMatcher;
/**
* 编解码工厂的一个实现,用于文本行协议
*
*
*
* @author boyan
*
* @since 1.0, 2009-12-16 下午06:05:26
*/
public class TextLineCodecFactory implements CodecFactory {
public static final IoBuffer SPLIT = IoBuffer.wrap("\r\n".getBytes());
private static final ByteBufferMatcher SPLIT_PATTERN = new ShiftAndByteBufferMatcher(SPLIT);
public static final String DEFAULT_CHARSET_NAME = "utf-8";
private final Charset charset;
public TextLineCodecFactory() {
this.charset = Charset.forName(DEFAULT_CHARSET_NAME);
}
public TextLineCodecFactory(final String charsetName) {
this.charset = Charset.forName(charsetName);
}
class StringDecoder implements CodecFactory.Decoder {
public Object decode(final IoBuffer buffer, final Session session) {
String result = null;
final int index = SPLIT_PATTERN.matchFirst(buffer);
if (index >= 0) {
final int limit = buffer.limit();
buffer.limit(index);
final CharBuffer charBuffer = TextLineCodecFactory.this.charset.decode(buffer.buf());
result = charBuffer.toString();
buffer.limit(limit);
buffer.position(index + SPLIT.remaining());
}
return result;
}
}
private final CodecFactory.Decoder decoder = new StringDecoder();
public Decoder getDecoder() {
return this.decoder;
}
class StringEncoder implements Encoder {
public IoBuffer encode(final Object msg, final Session session) {
if (msg == null) {
return null;
}
final String message = (String) msg;
final ByteBuffer buff = TextLineCodecFactory.this.charset.encode(message);
final IoBuffer resultBuffer = IoBuffer.allocate(buff.remaining() + SPLIT.remaining());
resultBuffer.put(buff);
resultBuffer.put(SPLIT.slice());
resultBuffer.flip();
return resultBuffer;
}
}
private final Encoder encoder = new StringEncoder();
public Encoder getEncoder() {
return this.encoder;
}
} | {
"pile_set_name": "Github"
} |
package com.nepxion.thunder.stock.constant;
/**
* <p>Title: Nepxion Stock</p>
* <p>Description: Nepxion Stock For Distribution</p>
* <p>Copyright: Copyright (c) 2017-2050</p>
* <p>Company: Nepxion</p>
* @author Haojun Ren
* @version 1.0
*/
public final class StockConstants {
public static final String SH = "sh000001";
public static final String SZ = "sz399001";
public static final String CY = "sz399006";
} | {
"pile_set_name": "Github"
} |
// Copyright 2008, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// The Google C++ Testing and Mocking Framework (Google Test)
#include "gtest/gtest-test-part.h"
#include "src/gtest-internal-inl.h"
namespace testing {
using internal::GetUnitTestImpl;
// Gets the summary of the failure message by omitting the stack trace
// in it.
std::string TestPartResult::ExtractSummary(const char* message) {
const char* const stack_trace = strstr(message, internal::kStackTraceMarker);
return stack_trace == nullptr ? message : std::string(message, stack_trace);
}
// Prints a TestPartResult object.
std::ostream& operator<<(std::ostream& os, const TestPartResult& result) {
return os << result.file_name() << ":" << result.line_number() << ": "
<< (result.type() == TestPartResult::kSuccess
? "Success"
: result.type() == TestPartResult::kSkip
? "Skipped"
: result.type() == TestPartResult::kFatalFailure
? "Fatal failure"
: "Non-fatal failure")
<< ":\n"
<< result.message() << std::endl;
}
// Appends a TestPartResult to the array.
void TestPartResultArray::Append(const TestPartResult& result) {
array_.push_back(result);
}
// Returns the TestPartResult at the given index (0-based).
const TestPartResult& TestPartResultArray::GetTestPartResult(int index) const {
if (index < 0 || index >= size()) {
printf("\nInvalid index (%d) into TestPartResultArray.\n", index);
internal::posix::Abort();
}
return array_[static_cast<size_t>(index)];
}
// Returns the number of TestPartResult objects in the array.
int TestPartResultArray::size() const {
return static_cast<int>(array_.size());
}
namespace internal {
HasNewFatalFailureHelper::HasNewFatalFailureHelper()
: has_new_fatal_failure_(false),
original_reporter_(GetUnitTestImpl()->
GetTestPartResultReporterForCurrentThread()) {
GetUnitTestImpl()->SetTestPartResultReporterForCurrentThread(this);
}
HasNewFatalFailureHelper::~HasNewFatalFailureHelper() {
GetUnitTestImpl()->SetTestPartResultReporterForCurrentThread(
original_reporter_);
}
void HasNewFatalFailureHelper::ReportTestPartResult(
const TestPartResult& result) {
if (result.fatally_failed())
has_new_fatal_failure_ = true;
original_reporter_->ReportTestPartResult(result);
}
} // namespace internal
} // namespace testing
| {
"pile_set_name": "Github"
} |
/* zlib.h -- interface of the 'zlib' general purpose compression library
version 1.1.4, March 11th, 2002
Copyright (C) 1995-2002 Jean-loup Gailly and Mark Adler
This software is provided 'as-is', without any express or implied
warranty. In no event will the authors be held liable for any damages
arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it
freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not
claim that you wrote the original software. If you use this software
in a product, an acknowledgment in the product documentation would be
appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be
misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
Jean-loup Gailly Mark Adler
[email protected] [email protected]
The data format used by the zlib library is described by RFCs (Request for
Comments) 1950 to 1952 in the files ftp://ds.internic.net/rfc/rfc1950.txt
(zlib format), rfc1951.txt (deflate format) and rfc1952.txt (gzip format).
*/
#ifndef _ZLIB_H
#define _ZLIB_H
#include "zconf.h"
#ifdef __cplusplus
extern "C" {
#endif
#define ZLIB_VERSION "1.1.4"
/*
The 'zlib' compression library provides in-memory compression and
decompression functions, including integrity checks of the uncompressed
data. This version of the library supports only one compression method
(deflation) but other algorithms will be added later and will have the same
stream interface.
Compression can be done in a single step if the buffers are large
enough (for example if an input file is mmap'ed), or can be done by
repeated calls of the compression function. In the latter case, the
application must provide more input and/or consume the output
(providing more output space) before each call.
The library also supports reading and writing files in gzip (.gz) format
with an interface similar to that of stdio.
The library does not install any signal handler. The decoder checks
the consistency of the compressed data, so the library should never
crash even in case of corrupted input.
*/
typedef voidpf (*alloc_func) OF((voidpf opaque, uInt items, uInt size));
typedef void (*free_func) OF((voidpf opaque, voidpf address));
struct internal_state;
typedef struct z_stream_s {
Bytef *next_in; /* next input byte */
uInt avail_in; /* number of bytes available at next_in */
uLong total_in; /* total nb of input bytes read so far */
Bytef *next_out; /* next output byte should be put there */
uInt avail_out; /* remaining free space at next_out */
uLong total_out; /* total nb of bytes output so far */
char *msg; /* last error message, NULL if no error */
struct internal_state FAR *state; /* not visible by applications */
alloc_func zalloc; /* used to allocate the internal state */
free_func zfree; /* used to free the internal state */
voidpf opaque; /* private data object passed to zalloc and zfree */
int data_type; /* best guess about the data type: ascii or binary */
uLong adler; /* adler32 value of the uncompressed data */
uLong reserved; /* reserved for future use */
} z_stream;
typedef z_stream FAR *z_streamp;
/*
The application must update next_in and avail_in when avail_in has
dropped to zero. It must update next_out and avail_out when avail_out
has dropped to zero. The application must initialize zalloc, zfree and
opaque before calling the init function. All other fields are set by the
compression library and must not be updated by the application.
The opaque value provided by the application will be passed as the first
parameter for calls of zalloc and zfree. This can be useful for custom
memory management. The compression library attaches no meaning to the
opaque value.
zalloc must return Z_NULL if there is not enough memory for the object.
If zlib is used in a multi-threaded application, zalloc and zfree must be
thread safe.
On 16-bit systems, the functions zalloc and zfree must be able to allocate
exactly 65536 bytes, but will not be required to allocate more than this
if the symbol MAXSEG_64K is defined (see zconf.h). WARNING: On MSDOS,
pointers returned by zalloc for objects of exactly 65536 bytes *must*
have their offset normalized to zero. The default allocation function
provided by this library ensures this (see zutil.c). To reduce memory
requirements and avoid any allocation of 64K objects, at the expense of
compression ratio, compile the library with -DMAX_WBITS=14 (see zconf.h).
The fields total_in and total_out can be used for statistics or
progress reports. After compression, total_in holds the total size of
the uncompressed data and may be saved for use in the decompressor
(particularly if the decompressor wants to decompress everything in
a single step).
*/
/* constants */
#define Z_NO_FLUSH 0
#define Z_PARTIAL_FLUSH 1 /* will be removed, use Z_SYNC_FLUSH instead */
#define Z_SYNC_FLUSH 2
#define Z_FULL_FLUSH 3
#define Z_FINISH 4
/* Allowed flush values; see deflate() below for details */
#define Z_OK 0
#define Z_STREAM_END 1
#define Z_NEED_DICT 2
#define Z_ERRNO (-1)
#define Z_STREAM_ERROR (-2)
#define Z_DATA_ERROR (-3)
#define Z_MEM_ERROR (-4)
#define Z_BUF_ERROR (-5)
#define Z_VERSION_ERROR (-6)
/* Return codes for the compression/decompression functions. Negative
* values are errors, positive values are used for special but normal events.
*/
#define Z_NO_COMPRESSION 0
#define Z_BEST_SPEED 1
#define Z_BEST_COMPRESSION 9
#define Z_DEFAULT_COMPRESSION (-1)
/* compression levels */
#define Z_FILTERED 1
#define Z_HUFFMAN_ONLY 2
#define Z_DEFAULT_STRATEGY 0
/* compression strategy; see deflateInit2() below for details */
#define Z_BINARY 0
#define Z_ASCII 1
#define Z_UNKNOWN 2
/* Possible values of the data_type field */
#define Z_DEFLATED 8
/* The deflate compression method (the only one supported in this version) */
#define Z_NULL 0 /* for initializing zalloc, zfree, opaque */
/* basic functions */
/* The application can compare zlibVersion and ZLIB_VERSION for consistency.
If the first character differs, the library code actually used is
not compatible with the zlib.h header file used by the application.
This check is automatically made by deflateInit and inflateInit.
*/
/*
ZEXTERN(int) deflateInit OF((z_streamp strm, int level));
Initializes the internal stream state for compression. The fields
zalloc, zfree and opaque must be initialized before by the caller.
If zalloc and zfree are set to Z_NULL, deflateInit updates them to
use default allocation functions.
The compression level must be Z_DEFAULT_COMPRESSION, or between 0 and 9:
1 gives best speed, 9 gives best compression, 0 gives no compression at
all (the input data is simply copied a block at a time).
Z_DEFAULT_COMPRESSION requests a default compromise between speed and
compression (currently equivalent to level 6).
deflateInit returns Z_OK if success, Z_MEM_ERROR if there was not
enough memory, Z_STREAM_ERROR if level is not a valid compression level,
Z_VERSION_ERROR if the zlib library version (zlib_version) is incompatible
with the version assumed by the caller (ZLIB_VERSION).
msg is set to null if there is no error message. deflateInit does not
perform any compression: this will be done by deflate().
*/
/*
deflate compresses as much data as possible, and stops when the input
buffer becomes empty or the output buffer becomes full. It may introduce some
output latency (reading input without producing any output) except when
forced to flush.
The detailed semantics are as follows. deflate performs one or both of the
following actions:
- Compress more input starting at next_in and update next_in and avail_in
accordingly. If not all input can be processed (because there is not
enough room in the output buffer), next_in and avail_in are updated and
processing will resume at this point for the next call of deflate().
- Provide more output starting at next_out and update next_out and avail_out
accordingly. This action is forced if the parameter flush is non zero.
Forcing flush frequently degrades the compression ratio, so this parameter
should be set only when necessary (in interactive applications).
Some output may be provided even if flush is not set.
Before the call of deflate(), the application should ensure that at least
one of the actions is possible, by providing more input and/or consuming
more output, and updating avail_in or avail_out accordingly; avail_out
should never be zero before the call. The application can consume the
compressed output when it wants, for example when the output buffer is full
(avail_out == 0), or after each call of deflate(). If deflate returns Z_OK
and with zero avail_out, it must be called again after making room in the
output buffer because there might be more output pending.
If the parameter flush is set to Z_SYNC_FLUSH, all pending output is
flushed to the output buffer and the output is aligned on a byte boundary, so
that the decompressor can get all input data available so far. (In particular
avail_in is zero after the call if enough output space has been provided
before the call.) Flushing may degrade compression for some compression
algorithms and so it should be used only when necessary.
If flush is set to Z_FULL_FLUSH, all output is flushed as with
Z_SYNC_FLUSH, and the compression state is reset so that decompression can
restart from this point if previous compressed data has been damaged or if
random access is desired. Using Z_FULL_FLUSH too often can seriously degrade
the compression.
If deflate returns with avail_out == 0, this function must be called again
with the same value of the flush parameter and more output space (updated
avail_out), until the flush is complete (deflate returns with non-zero
avail_out).
If the parameter flush is set to Z_FINISH, pending input is processed,
pending output is flushed and deflate returns with Z_STREAM_END if there
was enough output space; if deflate returns with Z_OK, this function must be
called again with Z_FINISH and more output space (updated avail_out) but no
more input data, until it returns with Z_STREAM_END or an error. After
deflate has returned Z_STREAM_END, the only possible operations on the
stream are deflateReset or deflateEnd.
Z_FINISH can be used immediately after deflateInit if all the compression
is to be done in a single step. In this case, avail_out must be at least
0.1% larger than avail_in plus 12 bytes. If deflate does not return
Z_STREAM_END, then it must be called again as described above.
deflate() sets strm->adler to the adler32 checksum of all input read
so far (that is, total_in bytes).
deflate() may update data_type if it can make a good guess about
the input data type (Z_ASCII or Z_BINARY). In doubt, the data is considered
binary. This field is only for information purposes and does not affect
the compression algorithm in any manner.
deflate() returns Z_OK if some progress has been made (more input
processed or more output produced), Z_STREAM_END if all input has been
consumed and all output has been produced (only when flush is set to
Z_FINISH), Z_STREAM_ERROR if the stream state was inconsistent (for example
if next_in or next_out was NULL), Z_BUF_ERROR if no progress is possible
(for example avail_in or avail_out was zero).
*/
/*
All dynamically allocated data structures for this stream are freed.
This function discards any unprocessed input and does not flush any
pending output.
deflateEnd returns Z_OK if success, Z_STREAM_ERROR if the
stream state was inconsistent, Z_DATA_ERROR if the stream was freed
prematurely (some input or output was discarded). In the error case,
msg may be set but then points to a static string (which must not be
deallocated).
*/
/*
ZEXTERN(int) inflateInit OF((z_streamp strm));
Initializes the internal stream state for decompression. The fields
next_in, avail_in, zalloc, zfree and opaque must be initialized before by
the caller. If next_in is not Z_NULL and avail_in is large enough (the exact
value depends on the compression method), inflateInit determines the
compression method from the zlib header and allocates all data structures
accordingly; otherwise the allocation will be deferred to the first call of
inflate. If zalloc and zfree are set to Z_NULL, inflateInit updates them to
use default allocation functions.
inflateInit returns Z_OK if success, Z_MEM_ERROR if there was not enough
memory, Z_VERSION_ERROR if the zlib library version is incompatible with the
version assumed by the caller. msg is set to null if there is no error
message. inflateInit does not perform any decompression apart from reading
the zlib header if present: this will be done by inflate(). (So next_in and
avail_in may be modified, but next_out and avail_out are unchanged.)
*/
ZEXTERN(int) inflate OF((z_streamp strm, int flush));
/*
inflate decompresses as much data as possible, and stops when the input
buffer becomes empty or the output buffer becomes full. It may some
introduce some output latency (reading input without producing any output)
except when forced to flush.
The detailed semantics are as follows. inflate performs one or both of the
following actions:
- Decompress more input starting at next_in and update next_in and avail_in
accordingly. If not all input can be processed (because there is not
enough room in the output buffer), next_in is updated and processing
will resume at this point for the next call of inflate().
- Provide more output starting at next_out and update next_out and avail_out
accordingly. inflate() provides as much output as possible, until there
is no more input data or no more space in the output buffer (see below
about the flush parameter).
Before the call of inflate(), the application should ensure that at least
one of the actions is possible, by providing more input and/or consuming
more output, and updating the next_* and avail_* values accordingly.
The application can consume the uncompressed output when it wants, for
example when the output buffer is full (avail_out == 0), or after each
call of inflate(). If inflate returns Z_OK and with zero avail_out, it
must be called again after making room in the output buffer because there
might be more output pending.
If the parameter flush is set to Z_SYNC_FLUSH, inflate flushes as much
output as possible to the output buffer. The flushing behavior of inflate is
not specified for values of the flush parameter other than Z_SYNC_FLUSH
and Z_FINISH, but the current implementation actually flushes as much output
as possible anyway.
inflate() should normally be called until it returns Z_STREAM_END or an
error. However if all decompression is to be performed in a single step
(a single call of inflate), the parameter flush should be set to
Z_FINISH. In this case all pending input is processed and all pending
output is flushed; avail_out must be large enough to hold all the
uncompressed data. (The size of the uncompressed data may have been saved
by the compressor for this purpose.) The next operation on this stream must
be inflateEnd to deallocate the decompression state. The use of Z_FINISH
is never required, but can be used to inform inflate that a faster routine
may be used for the single inflate() call.
If a preset dictionary is needed at this point (see inflateSetDictionary
below), inflate sets strm-adler to the adler32 checksum of the
dictionary chosen by the compressor and returns Z_NEED_DICT; otherwise
it sets strm->adler to the adler32 checksum of all output produced
so far (that is, total_out bytes) and returns Z_OK, Z_STREAM_END or
an error code as described below. At the end of the stream, inflate()
checks that its computed adler32 checksum is equal to that saved by the
compressor and returns Z_STREAM_END only if the checksum is correct.
inflate() returns Z_OK if some progress has been made (more input processed
or more output produced), Z_STREAM_END if the end of the compressed data has
been reached and all uncompressed output has been produced, Z_NEED_DICT if a
preset dictionary is needed at this point, Z_DATA_ERROR if the input data was
corrupted (input stream not conforming to the zlib format or incorrect
adler32 checksum), Z_STREAM_ERROR if the stream structure was inconsistent
(for example if next_in or next_out was NULL), Z_MEM_ERROR if there was not
enough memory, Z_BUF_ERROR if no progress is possible or if there was not
enough room in the output buffer when Z_FINISH is used. In the Z_DATA_ERROR
case, the application may then call inflateSync to look for a good
compression block.
*/
ZEXTERN(int) inflateEnd OF((z_streamp strm));
/*
All dynamically allocated data structures for this stream are freed.
This function discards any unprocessed input and does not flush any
pending output.
inflateEnd returns Z_OK if success, Z_STREAM_ERROR if the stream state
was inconsistent. In the error case, msg may be set but then points to a
static string (which must not be deallocated).
*/
/* Advanced functions */
/*
The following functions are needed only in some special applications.
*/
/*
ZEXTERN(int) deflateInit2 OF((z_streamp strm,
int level,
int method,
int windowBits,
int memLevel,
int strategy));
This is another version of deflateInit with more compression options. The
fields next_in, zalloc, zfree and opaque must be initialized before by
the caller.
The method parameter is the compression method. It must be Z_DEFLATED in
this version of the library.
The windowBits parameter is the base two logarithm of the window size
(the size of the history buffer). It should be in the range 8..15 for this
version of the library. Larger values of this parameter result in better
compression at the expense of memory usage. The default value is 15 if
deflateInit is used instead.
The memLevel parameter specifies how much memory should be allocated
for the internal compression state. memLevel=1 uses minimum memory but
is slow and reduces compression ratio; memLevel=9 uses maximum memory
for optimal speed. The default value is 8. See zconf.h for total memory
usage as a function of windowBits and memLevel.
The strategy parameter is used to tune the compression algorithm. Use the
value Z_DEFAULT_STRATEGY for normal data, Z_FILTERED for data produced by a
filter (or predictor), or Z_HUFFMAN_ONLY to force Huffman encoding only (no
string match). Filtered data consists mostly of small values with a
somewhat random distribution. In this case, the compression algorithm is
tuned to compress them better. The effect of Z_FILTERED is to force more
Huffman coding and less string matching; it is somewhat intermediate
between Z_DEFAULT and Z_HUFFMAN_ONLY. The strategy parameter only affects
the compression ratio but not the correctness of the compressed output even
if it is not set appropriately.
deflateInit2 returns Z_OK if success, Z_MEM_ERROR if there was not enough
memory, Z_STREAM_ERROR if a parameter is invalid (such as an invalid
method). msg is set to null if there is no error message. deflateInit2 does
not perform any compression: this will be done by deflate().
*/
/*
Initializes the compression dictionary from the given byte sequence
without producing any compressed output. This function must be called
immediately after deflateInit, deflateInit2 or deflateReset, before any
call of deflate. The compressor and decompressor must use exactly the same
dictionary (see inflateSetDictionary).
The dictionary should consist of strings (byte sequences) that are likely
to be encountered later in the data to be compressed, with the most commonly
used strings preferably put towards the end of the dictionary. Using a
dictionary is most useful when the data to be compressed is short and can be
predicted with good accuracy; the data can then be compressed better than
with the default empty dictionary.
Depending on the size of the compression data structures selected by
deflateInit or deflateInit2, a part of the dictionary may in effect be
discarded, for example if the dictionary is larger than the window size in
deflate or deflate2. Thus the strings most likely to be useful should be
put at the end of the dictionary, not at the front.
Upon return of this function, strm->adler is set to the Adler32 value
of the dictionary; the decompressor may later use this value to determine
which dictionary has been used by the compressor. (The Adler32 value
applies to the whole dictionary even if only a subset of the dictionary is
actually used by the compressor.)
deflateSetDictionary returns Z_OK if success, or Z_STREAM_ERROR if a
parameter is invalid (such as NULL dictionary) or the stream state is
inconsistent (for example if deflate has already been called for this stream
or if the compression method is bsort). deflateSetDictionary does not
perform any compression: this will be done by deflate().
*/
/*
Sets the destination stream as a complete copy of the source stream.
This function can be useful when several compression strategies will be
tried, for example when there are several ways of pre-processing the input
data with a filter. The streams that will be discarded should then be freed
by calling deflateEnd. Note that deflateCopy duplicates the internal
compression state which can be quite large, so this strategy is slow and
can consume lots of memory.
deflateCopy returns Z_OK if success, Z_MEM_ERROR if there was not
enough memory, Z_STREAM_ERROR if the source stream state was inconsistent
(such as zalloc being NULL). msg is left unchanged in both source and
destination.
*/
/*
This function is equivalent to deflateEnd followed by deflateInit,
but does not free and reallocate all the internal compression state.
The stream will keep the same compression level and any other attributes
that may have been set by deflateInit2.
deflateReset returns Z_OK if success, or Z_STREAM_ERROR if the source
stream state was inconsistent (such as zalloc or state being NULL).
*/
/*
Dynamically update the compression level and compression strategy. The
interpretation of level and strategy is as in deflateInit2. This can be
used to switch between compression and straight copy of the input data, or
to switch to a different kind of input data requiring a different
strategy. If the compression level is changed, the input available so far
is compressed with the old level (and may be flushed); the new level will
take effect only at the next call of deflate().
Before the call of deflateParams, the stream state must be set as for
a call of deflate(), since the currently available input may have to
be compressed and flushed. In particular, strm->avail_out must be non-zero.
deflateParams returns Z_OK if success, Z_STREAM_ERROR if the source
stream state was inconsistent or if a parameter was invalid, Z_BUF_ERROR
if strm->avail_out was zero.
*/
/*
ZEXTERN(int) inflateInit2 OF((z_streamp strm,
int windowBits));
This is another version of inflateInit with an extra parameter. The
fields next_in, avail_in, zalloc, zfree and opaque must be initialized
before by the caller.
The windowBits parameter is the base two logarithm of the maximum window
size (the size of the history buffer). It should be in the range 8..15 for
this version of the library. The default value is 15 if inflateInit is used
instead. If a compressed stream with a larger window size is given as
input, inflate() will return with the error code Z_DATA_ERROR instead of
trying to allocate a larger window.
inflateInit2 returns Z_OK if success, Z_MEM_ERROR if there was not enough
memory, Z_STREAM_ERROR if a parameter is invalid (such as a negative
memLevel). msg is set to null if there is no error message. inflateInit2
does not perform any decompression apart from reading the zlib header if
present: this will be done by inflate(). (So next_in and avail_in may be
modified, but next_out and avail_out are unchanged.)
*/
/*
Initializes the decompression dictionary from the given uncompressed byte
sequence. This function must be called immediately after a call of inflate
if this call returned Z_NEED_DICT. The dictionary chosen by the compressor
can be determined from the Adler32 value returned by this call of
inflate. The compressor and decompressor must use exactly the same
dictionary (see deflateSetDictionary).
inflateSetDictionary returns Z_OK if success, Z_STREAM_ERROR if a
parameter is invalid (such as NULL dictionary) or the stream state is
inconsistent, Z_DATA_ERROR if the given dictionary doesn't match the
expected one (incorrect Adler32 value). inflateSetDictionary does not
perform any decompression: this will be done by subsequent calls of
inflate().
*/
/*
Skips invalid compressed data until a full flush point (see above the
description of deflate with Z_FULL_FLUSH) can be found, or until all
available input is skipped. No output is provided.
inflateSync returns Z_OK if a full flush point has been found, Z_BUF_ERROR
if no more input was provided, Z_DATA_ERROR if no flush point has been found,
or Z_STREAM_ERROR if the stream structure was inconsistent. In the success
case, the application may save the current current value of total_in which
indicates where valid compressed data was found. In the error case, the
application may repeatedly call inflateSync, providing more input each time,
until success or end of the input data.
*/
ZEXTERN(int) inflateReset OF((z_streamp strm));
/*
This function is equivalent to inflateEnd followed by inflateInit,
but does not free and reallocate all the internal decompression state.
The stream will keep attributes that may have been set by inflateInit2.
inflateReset returns Z_OK if success, or Z_STREAM_ERROR if the source
stream state was inconsistent (such as zalloc or state being NULL).
*/
/* utility functions */
/*
The following utility functions are implemented on top of the
basic stream-oriented functions. To simplify the interface, some
default options are assumed (compression level and memory usage,
standard memory allocation functions). The source code of these
utility functions can easily be modified if you need special options.
*/
/*
Compresses the source buffer into the destination buffer. sourceLen is
the byte length of the source buffer. Upon entry, destLen is the total
size of the destination buffer, which must be at least 0.1% larger than
sourceLen plus 12 bytes. Upon exit, destLen is the actual size of the
compressed buffer.
This function can be used to compress a whole file at once if the
input file is mmap'ed.
compress returns Z_OK if success, Z_MEM_ERROR if there was not
enough memory, Z_BUF_ERROR if there was not enough room in the output
buffer.
*/
/*
Compresses the source buffer into the destination buffer. The level
parameter has the same meaning as in deflateInit. sourceLen is the byte
length of the source buffer. Upon entry, destLen is the total size of the
destination buffer, which must be at least 0.1% larger than sourceLen plus
12 bytes. Upon exit, destLen is the actual size of the compressed buffer.
compress2 returns Z_OK if success, Z_MEM_ERROR if there was not enough
memory, Z_BUF_ERROR if there was not enough room in the output buffer,
Z_STREAM_ERROR if the level parameter is invalid.
*/
/*
Decompresses the source buffer into the destination buffer. sourceLen is
the byte length of the source buffer. Upon entry, destLen is the total
size of the destination buffer, which must be large enough to hold the
entire uncompressed data. (The size of the uncompressed data must have
been saved previously by the compressor and transmitted to the decompressor
by some mechanism outside the scope of this compression library.)
Upon exit, destLen is the actual size of the compressed buffer.
This function can be used to decompress a whole file at once if the
input file is mmap'ed.
uncompress returns Z_OK if success, Z_MEM_ERROR if there was not
enough memory, Z_BUF_ERROR if there was not enough room in the output
buffer, or Z_DATA_ERROR if the input data was corrupted.
*/
/*
Opens a gzip (.gz) file for reading or writing. The mode parameter
is as in fopen ("rb" or "wb") but can also include a compression level
("wb9") or a strategy: 'f' for filtered data as in "wb6f", 'h' for
Huffman only compression as in "wb1h". (See the description
of deflateInit2 for more information about the strategy parameter.)
gzopen can be used to read a file which is not in gzip format; in this
case gzread will directly read from the file without decompression.
gzopen returns NULL if the file could not be opened or if there was
insufficient memory to allocate the (de)compression state; errno
can be checked to distinguish the two cases (if errno is zero, the
zlib error is Z_MEM_ERROR). */
/*
gzdopen() associates a gzFile with the file descriptor fd. File
descriptors are obtained from calls like open, dup, creat, pipe or
fileno (in the file has been previously opened with fopen).
The mode parameter is as in gzopen.
The next call of gzclose on the returned gzFile will also close the
file descriptor fd, just like fclose(fdopen(fd), mode) closes the file
descriptor fd. If you want to keep fd open, use gzdopen(dup(fd), mode).
gzdopen returns NULL if there was insufficient memory to allocate
the (de)compression state.
*/
/*
Dynamically update the compression level or strategy. See the description
of deflateInit2 for the meaning of these parameters.
gzsetparams returns Z_OK if success, or Z_STREAM_ERROR if the file was not
opened for writing.
*/
/*
Reads the given number of uncompressed bytes from the compressed file.
If the input file was not in gzip format, gzread copies the given number
of bytes into the buffer.
gzread returns the number of uncompressed bytes actually read (0 for
end of file, -1 for error). */
/*
Writes the given number of uncompressed bytes into the compressed file.
gzwrite returns the number of uncompressed bytes actually written
(0 in case of error).
*/
/*
Converts, formats, and writes the args to the compressed file under
control of the format string, as in fprintf. gzprintf returns the number of
uncompressed bytes actually written (0 in case of error).
*/
/*
Writes the given null-terminated string to the compressed file, excluding
the terminating null character.
gzputs returns the number of characters written, or -1 in case of error.
*/
/*
Reads bytes from the compressed file until len-1 characters are read, or
a newline character is read and transferred to buf, or an end-of-file
condition is encountered. The string is then terminated with a null
character.
gzgets returns buf, or Z_NULL in case of error.
*/
/*
Writes c, converted to an unsigned char, into the compressed file.
gzputc returns the value that was written, or -1 in case of error.
*/
/*
Reads one byte from the compressed file. gzgetc returns this byte
or -1 in case of end of file or error.
*/
/*
Flushes all pending output into the compressed file. The parameter
flush is as in the deflate() function. The return value is the zlib
error number (see function gzerror below). gzflush returns Z_OK if
the flush parameter is Z_FINISH and all output could be flushed.
gzflush should be called only when strictly necessary because it can
degrade compression.
*/
/*
Sets the starting position for the next gzread or gzwrite on the
given compressed file. The offset represents a number of bytes in the
uncompressed data stream. The whence parameter is defined as in lseek(2);
the value SEEK_END is not supported.
If the file is opened for reading, this function is emulated but can be
extremely slow. If the file is opened for writing, only forward seeks are
supported; gzseek then compresses a sequence of zeroes up to the new
starting position.
gzseek returns the resulting offset location as measured in bytes from
the beginning of the uncompressed stream, or -1 in case of error, in
particular if the file is opened for writing and the new starting position
would be before the current position.
*/
/*
Rewinds the given file. This function is supported only for reading.
gzrewind(file) is equivalent to (int)gzseek(file, 0L, SEEK_SET)
*/
/*
Returns the starting position for the next gzread or gzwrite on the
given compressed file. This position represents a number of bytes in the
uncompressed data stream.
gztell(file) is equivalent to gzseek(file, 0L, SEEK_CUR)
*/
/*
Returns 1 when EOF has previously been detected reading the given
input stream, otherwise zero.
*/
/*
Flushes all pending output if necessary, closes the compressed file
and deallocates all the (de)compression state. The return value is the zlib
error number (see function gzerror below).
*/
/*
Returns the error message for the last error which occurred on the
given compressed file. errnum is set to zlib error number. If an
error occurred in the file system and not in the compression library,
errnum is set to Z_ERRNO and the application may consult errno
to get the exact error code.
*/
/* checksum functions */
/*
These functions are not related to compression but are exported
anyway because they might be useful in applications using the
compression library.
*/
ZEXTERN(uLong) adler32 OF((uLong adler, const Bytef *buf, uInt len));
/*
Update a running Adler-32 checksum with the bytes buf[0..len-1] and
return the updated checksum. If buf is NULL, this function returns
the required initial value for the checksum.
An Adler-32 checksum is almost as reliable as a CRC32 but can be computed
much faster. Usage example:
uLong adler = adler32(0L, Z_NULL, 0);
while (read_buffer(buffer, length) != EOF) {
adler = adler32(adler, buffer, length);
}
if (adler != original_adler) error();
*/
/*
Update a running crc with the bytes buf[0..len-1] and return the updated
crc. If buf is NULL, this function returns the required initial value
for the crc. Pre- and post-conditioning (one's complement) is performed
within this function so it shouldn't be done by the application.
Usage example:
uLong crc = crc32(0L, Z_NULL, 0);
while (read_buffer(buffer, length) != EOF) {
crc = crc32(crc, buffer, length);
}
if (crc != original_crc) error();
*/
/* various hacks, don't look :) */
/* deflateInit and inflateInit are macros to allow checking the zlib version
* and the compiler's view of z_stream:
*/
ZEXTERN(int) inflateInit2_ OF((z_streamp strm, int windowBits,
const char *version, int stream_size));
#define deflateInit(strm, level) \
deflateInit_((strm), (level), ZLIB_VERSION, sizeof(z_stream))
#define inflateInit(strm) \
inflateInit_((strm), ZLIB_VERSION, sizeof(z_stream))
#define deflateInit2(strm, level, method, windowBits, memLevel, strategy) \
deflateInit2_((strm),(level),(method),(windowBits),(memLevel),\
(strategy), ZLIB_VERSION, sizeof(z_stream))
#define inflateInit2(strm, windowBits) \
inflateInit2_((strm), (windowBits), ZLIB_VERSION, sizeof(z_stream))
#ifdef __cplusplus
}
#endif
#endif /* _ZLIB_H */
| {
"pile_set_name": "Github"
} |
<?php
namespace Illuminate\Contracts\Debug;
use Exception;
interface ExceptionHandler
{
/**
* Report or log an exception.
*
* @param \Exception $e
* @return void
*/
public function report(Exception $e);
/**
* Determine if the exception should be reported.
*
* @param \Exception $e
* @return bool
*/
public function shouldReport(Exception $e);
/**
* Render an exception into an HTTP response.
*
* @param \Illuminate\Http\Request $request
* @param \Exception $e
* @return \Symfony\Component\HttpFoundation\Response
*/
public function render($request, Exception $e);
/**
* Render an exception to the console.
*
* @param \Symfony\Component\Console\Output\OutputInterface $output
* @param \Exception $e
* @return void
*/
public function renderForConsole($output, Exception $e);
}
| {
"pile_set_name": "Github"
} |
//#############################################################################
//
// This file is part of ImagePlay.
//
// ImagePlay is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// ImagePlay is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with ImagePlay. If not, see <http://www.gnu.org/licenses/>.
//
//#############################################################################
#ifndef IPLImagePlane_H
#define IPLImagePlane_H
#include "IPL_global.h"
/**
* @brief The IPLImagePlane class
*/
class IPLSHARED_EXPORT IPLImagePlane
{
public:
IPLImagePlane();
IPLImagePlane( int width, int height );
IPLImagePlane( const IPLImagePlane &other );
IPLImagePlane( IPLImagePlane &&other );
IPLImagePlane &operator=(const IPLImagePlane &other);
IPLImagePlane &operator=(IPLImagePlane &&other);
~IPLImagePlane();
void swap(IPLImagePlane &other);
//IPLImagePlane& IPLImagePlane::operator=( IPLImagePlane& i );
//!
//! \brief pixel access without checks
//! \param x
//! \param y
//! \return
//!
ipl_basetype& p( int x, int y )
{
return _plane[y * _width + x];
}
//!
//! \brief pixel access with check, zero for invalid coordinates
//! \param x
//! \param y
//! \return
//!
ipl_basetype& cp( int x, int y )
{
if( x>=0 && x<_width && y>=0 && y<_height )
return _plane[y * _width + x];
else
return _zero;
}
//!
//! \brief pixel access with check, extend border mode
//! \param x
//! \param y
//! \return
//!
ipl_basetype& bp( int x, int y )
{
if( x<0 ) x = 0;
if( x >= _width ) x = _width-1;
if( y<0 ) y = 0;
if( y >= _height ) y = _height-1;
return _plane[y * _width + x];
}
//!
//! \brief pixel access with check, wrap border mode
//! \param x
//! \param y
//! \return
//!
ipl_basetype& wp( int x, int y )
{
if( x<0 ) x = x+_width;
if( x >= _width ) x = x-_width;
if( y<0 ) y = y+_height;
if( y >= _height ) y = y-_height;
return _plane[y * _width + x];
}
int width( void ) { return _width; }
int height( void ) { return _height; }
private:
void newPlane( void );
void deletePlane( void );
int _height;
int _width;
ipl_basetype* _plane;
static ipl_basetype _zero;
static int _instanceCount;
};
#endif // IPLImagePlane_H
| {
"pile_set_name": "Github"
} |
<?php
/*
* This file is part of the Assetic package, an OpenSky project.
*
* (c) 2010-2014 OpenSky Project Inc
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
namespace Assetic\Filter\Yui;
use Assetic\Asset\AssetInterface;
/**
* CSS YUI compressor filter.
*
* @link http://developer.yahoo.com/yui/compressor/
* @author Kris Wallsmith <[email protected]>
*/
class CssCompressorFilter extends BaseCompressorFilter
{
public function filterDump(AssetInterface $asset)
{
$asset->setContent($this->compress($asset->getContent(), 'css'));
}
}
| {
"pile_set_name": "Github"
} |
/*
** $Id: ltable.c,v 2.72.1.1 2013/04/12 18:48:47 roberto Exp $
** Lua tables (hash)
** See Copyright Notice in lua.h
*/
/*
** Implementation of tables (aka arrays, objects, or hash tables).
** Tables keep its elements in two parts: an array part and a hash part.
** Non-negative integer keys are all candidates to be kept in the array
** part. The actual size of the array is the largest `n' such that at
** least half the slots between 0 and n are in use.
** Hash uses a mix of chained scatter table with Brent's variation.
** A main invariant of these tables is that, if an element is not
** in its main position (i.e. the `original' position that its hash gives
** to it), then the colliding element is in its own main position.
** Hence even when the load factor reaches 100%, performance remains good.
*/
#include <string.h>
#define ltable_c
#define LUA_CORE
#include "lua.h"
#include "ldebug.h"
#include "ldo.h"
#include "lgc.h"
#include "lmem.h"
#include "lobject.h"
#include "lstate.h"
#include "lstring.h"
#include "ltable.h"
#include "lvm.h"
/*
** max size of array part is 2^MAXBITS
*/
#if LUAI_BITSINT >= 32
#define MAXBITS 30
#else
#define MAXBITS (LUAI_BITSINT-2)
#endif
#define MAXASIZE (1 << MAXBITS)
#define hashpow2(t,n) (gnode(t, lmod((n), sizenode(t))))
#define hashstr(t,str) hashpow2(t, (str)->tsv.hash)
#define hashboolean(t,p) hashpow2(t, p)
/*
** for some types, it is better to avoid modulus by power of 2, as
** they tend to have many 2 factors.
*/
#define hashmod(t,n) (gnode(t, ((n) % ((sizenode(t)-1)|1))))
#define hashpointer(t,p) hashmod(t, IntPoint(p))
#define dummynode (&dummynode_)
#define isdummy(n) ((n) == dummynode)
static const Node dummynode_ = {
{NILCONSTANT}, /* value */
{{NILCONSTANT, NULL}} /* key */
};
/*
** hash for lua_Numbers
*/
static Node *hashnum (const Table *t, lua_Number n) {
int i;
luai_hashnum(i, n);
if (i < 0) {
if (cast(unsigned int, i) == 0u - i) /* use unsigned to avoid overflows */
i = 0; /* handle INT_MIN */
i = -i; /* must be a positive value */
}
return hashmod(t, i);
}
/*
** returns the `main' position of an element in a table (that is, the index
** of its hash value)
*/
static Node *mainposition (const Table *t, const TValue *key) {
switch (ttype(key)) {
case LUA_TNUMBER:
return hashnum(t, nvalue(key));
case LUA_TLNGSTR: {
TString *s = rawtsvalue(key);
if (s->tsv.extra == 0) { /* no hash? */
s->tsv.hash = luaS_hash(getstr(s), s->tsv.len, s->tsv.hash);
s->tsv.extra = 1; /* now it has its hash */
}
return hashstr(t, rawtsvalue(key));
}
case LUA_TSHRSTR:
return hashstr(t, rawtsvalue(key));
case LUA_TBOOLEAN:
return hashboolean(t, bvalue(key));
case LUA_TLIGHTUSERDATA:
return hashpointer(t, pvalue(key));
case LUA_TLCF:
return hashpointer(t, fvalue(key));
default:
return hashpointer(t, gcvalue(key));
}
}
/*
** returns the index for `key' if `key' is an appropriate key to live in
** the array part of the table, -1 otherwise.
*/
static int arrayindex (const TValue *key) {
if (ttisnumber(key)) {
lua_Number n = nvalue(key);
int k;
lua_number2int(k, n);
if (luai_numeq(cast_num(k), n))
return k;
}
return -1; /* `key' did not match some condition */
}
/*
** returns the index of a `key' for table traversals. First goes all
** elements in the array part, then elements in the hash part. The
** beginning of a traversal is signaled by -1.
*/
static int findindex (lua_State *L, Table *t, StkId key) {
int i;
if (ttisnil(key)) return -1; /* first iteration */
i = arrayindex(key);
if (0 < i && i <= t->sizearray) /* is `key' inside array part? */
return i-1; /* yes; that's the index (corrected to C) */
else {
Node *n = mainposition(t, key);
for (;;) { /* check whether `key' is somewhere in the chain */
/* key may be dead already, but it is ok to use it in `next' */
if (luaV_rawequalobj(gkey(n), key) ||
(ttisdeadkey(gkey(n)) && iscollectable(key) &&
deadvalue(gkey(n)) == gcvalue(key))) {
i = cast_int(n - gnode(t, 0)); /* key index in hash table */
/* hash elements are numbered after array ones */
return i + t->sizearray;
}
else n = gnext(n);
if (n == NULL)
luaG_runerror(L, "invalid key to " LUA_QL("next")); /* key not found */
}
}
}
int luaH_next (lua_State *L, Table *t, StkId key) {
int i = findindex(L, t, key); /* find original element */
for (i++; i < t->sizearray; i++) { /* try first array part */
if (!ttisnil(&t->array[i])) { /* a non-nil value? */
setnvalue(key, cast_num(i+1));
setobj2s(L, key+1, &t->array[i]);
return 1;
}
}
for (i -= t->sizearray; i < sizenode(t); i++) { /* then hash part */
if (!ttisnil(gval(gnode(t, i)))) { /* a non-nil value? */
setobj2s(L, key, gkey(gnode(t, i)));
setobj2s(L, key+1, gval(gnode(t, i)));
return 1;
}
}
return 0; /* no more elements */
}
/*
** {=============================================================
** Rehash
** ==============================================================
*/
static int computesizes (int nums[], int *narray) {
int i;
int twotoi; /* 2^i */
int a = 0; /* number of elements smaller than 2^i */
int na = 0; /* number of elements to go to array part */
int n = 0; /* optimal size for array part */
for (i = 0, twotoi = 1; twotoi/2 < *narray; i++, twotoi *= 2) {
if (nums[i] > 0) {
a += nums[i];
if (a > twotoi/2) { /* more than half elements present? */
n = twotoi; /* optimal size (till now) */
na = a; /* all elements smaller than n will go to array part */
}
}
if (a == *narray) break; /* all elements already counted */
}
*narray = n;
lua_assert(*narray/2 <= na && na <= *narray);
return na;
}
static int countint (const TValue *key, int *nums) {
int k = arrayindex(key);
if (0 < k && k <= MAXASIZE) { /* is `key' an appropriate array index? */
nums[luaO_ceillog2(k)]++; /* count as such */
return 1;
}
else
return 0;
}
static int numusearray (const Table *t, int *nums) {
int lg;
int ttlg; /* 2^lg */
int ause = 0; /* summation of `nums' */
int i = 1; /* count to traverse all array keys */
for (lg=0, ttlg=1; lg<=MAXBITS; lg++, ttlg*=2) { /* for each slice */
int lc = 0; /* counter */
int lim = ttlg;
if (lim > t->sizearray) {
lim = t->sizearray; /* adjust upper limit */
if (i > lim)
break; /* no more elements to count */
}
/* count elements in range (2^(lg-1), 2^lg] */
for (; i <= lim; i++) {
if (!ttisnil(&t->array[i-1]))
lc++;
}
nums[lg] += lc;
ause += lc;
}
return ause;
}
static int numusehash (const Table *t, int *nums, int *pnasize) {
int totaluse = 0; /* total number of elements */
int ause = 0; /* summation of `nums' */
int i = sizenode(t);
while (i--) {
Node *n = &t->node[i];
if (!ttisnil(gval(n))) {
ause += countint(gkey(n), nums);
totaluse++;
}
}
*pnasize += ause;
return totaluse;
}
static void setarrayvector (lua_State *L, Table *t, int size) {
int i;
luaM_reallocvector(L, t->array, t->sizearray, size, TValue);
for (i=t->sizearray; i<size; i++)
setnilvalue(&t->array[i]);
t->sizearray = size;
}
static void setnodevector (lua_State *L, Table *t, int size) {
int lsize;
if (size == 0) { /* no elements to hash part? */
t->node = cast(Node *, dummynode); /* use common `dummynode' */
lsize = 0;
}
else {
int i;
lsize = luaO_ceillog2(size);
if (lsize > MAXBITS)
luaG_runerror(L, "table overflow");
size = twoto(lsize);
t->node = luaM_newvector(L, size, Node);
for (i=0; i<size; i++) {
Node *n = gnode(t, i);
gnext(n) = NULL;
setnilvalue(gkey(n));
setnilvalue(gval(n));
}
}
t->lsizenode = cast_byte(lsize);
t->lastfree = gnode(t, size); /* all positions are free */
}
void luaH_resize (lua_State *L, Table *t, int nasize, int nhsize) {
int i;
int oldasize = t->sizearray;
int oldhsize = t->lsizenode;
Node *nold = t->node; /* save old hash ... */
if (nasize > oldasize) /* array part must grow? */
setarrayvector(L, t, nasize);
/* create new hash part with appropriate size */
setnodevector(L, t, nhsize);
if (nasize < oldasize) { /* array part must shrink? */
t->sizearray = nasize;
/* re-insert elements from vanishing slice */
for (i=nasize; i<oldasize; i++) {
if (!ttisnil(&t->array[i]))
luaH_setint(L, t, i + 1, &t->array[i]);
}
/* shrink array */
luaM_reallocvector(L, t->array, oldasize, nasize, TValue);
}
/* re-insert elements from hash part */
for (i = twoto(oldhsize) - 1; i >= 0; i--) {
Node *old = nold+i;
if (!ttisnil(gval(old))) {
/* doesn't need barrier/invalidate cache, as entry was
already present in the table */
setobjt2t(L, luaH_set(L, t, gkey(old)), gval(old));
}
}
if (!isdummy(nold))
luaM_freearray(L, nold, cast(size_t, twoto(oldhsize))); /* free old array */
}
void luaH_resizearray (lua_State *L, Table *t, int nasize) {
int nsize = isdummy(t->node) ? 0 : sizenode(t);
luaH_resize(L, t, nasize, nsize);
}
static void rehash (lua_State *L, Table *t, const TValue *ek) {
int nasize, na;
int nums[MAXBITS+1]; /* nums[i] = number of keys with 2^(i-1) < k <= 2^i */
int i;
int totaluse;
for (i=0; i<=MAXBITS; i++) nums[i] = 0; /* reset counts */
nasize = numusearray(t, nums); /* count keys in array part */
totaluse = nasize; /* all those keys are integer keys */
totaluse += numusehash(t, nums, &nasize); /* count keys in hash part */
/* count extra key */
nasize += countint(ek, nums);
totaluse++;
/* compute new size for array part */
na = computesizes(nums, &nasize);
/* resize the table to new computed sizes */
luaH_resize(L, t, nasize, totaluse - na);
}
/*
** }=============================================================
*/
Table *luaH_new (lua_State *L) {
Table *t = &luaC_newobj(L, LUA_TTABLE, sizeof(Table), NULL, 0)->h;
t->metatable = NULL;
t->flags = cast_byte(~0);
t->array = NULL;
t->sizearray = 0;
setnodevector(L, t, 0);
return t;
}
void luaH_free (lua_State *L, Table *t) {
if (!isdummy(t->node))
luaM_freearray(L, t->node, cast(size_t, sizenode(t)));
luaM_freearray(L, t->array, t->sizearray);
luaM_free(L, t);
}
static Node *getfreepos (Table *t) {
while (t->lastfree > t->node) {
t->lastfree--;
if (ttisnil(gkey(t->lastfree)))
return t->lastfree;
}
return NULL; /* could not find a free place */
}
/*
** inserts a new key into a hash table; first, check whether key's main
** position is free. If not, check whether colliding node is in its main
** position or not: if it is not, move colliding node to an empty place and
** put new key in its main position; otherwise (colliding node is in its main
** position), new key goes to an empty position.
*/
TValue *luaH_newkey (lua_State *L, Table *t, const TValue *key) {
Node *mp;
if (ttisnil(key)) luaG_runerror(L, "table index is nil");
else if (ttisnumber(key) && luai_numisnan(L, nvalue(key)))
luaG_runerror(L, "table index is NaN");
mp = mainposition(t, key);
if (!ttisnil(gval(mp)) || isdummy(mp)) { /* main position is taken? */
Node *othern;
Node *n = getfreepos(t); /* get a free place */
if (n == NULL) { /* cannot find a free place? */
rehash(L, t, key); /* grow table */
/* whatever called 'newkey' take care of TM cache and GC barrier */
return luaH_set(L, t, key); /* insert key into grown table */
}
lua_assert(!isdummy(n));
othern = mainposition(t, gkey(mp));
if (othern != mp) { /* is colliding node out of its main position? */
/* yes; move colliding node into free position */
while (gnext(othern) != mp) othern = gnext(othern); /* find previous */
gnext(othern) = n; /* redo the chain with `n' in place of `mp' */
*n = *mp; /* copy colliding node into free pos. (mp->next also goes) */
gnext(mp) = NULL; /* now `mp' is free */
setnilvalue(gval(mp));
}
else { /* colliding node is in its own main position */
/* new node will go into free position */
gnext(n) = gnext(mp); /* chain new position */
gnext(mp) = n;
mp = n;
}
}
setobj2t(L, gkey(mp), key);
luaC_barrierback(L, obj2gco(t), key);
lua_assert(ttisnil(gval(mp)));
return gval(mp);
}
/*
** search function for integers
*/
const TValue *luaH_getint (Table *t, int key) {
/* (1 <= key && key <= t->sizearray) */
if (cast(unsigned int, key-1) < cast(unsigned int, t->sizearray))
return &t->array[key-1];
else {
lua_Number nk = cast_num(key);
Node *n = hashnum(t, nk);
do { /* check whether `key' is somewhere in the chain */
if (ttisnumber(gkey(n)) && luai_numeq(nvalue(gkey(n)), nk))
return gval(n); /* that's it */
else n = gnext(n);
} while (n);
return luaO_nilobject;
}
}
/*
** search function for short strings
*/
const TValue *luaH_getstr (Table *t, TString *key) {
Node *n = hashstr(t, key);
lua_assert(key->tsv.tt == LUA_TSHRSTR);
do { /* check whether `key' is somewhere in the chain */
if (ttisshrstring(gkey(n)) && eqshrstr(rawtsvalue(gkey(n)), key))
return gval(n); /* that's it */
else n = gnext(n);
} while (n);
return luaO_nilobject;
}
/*
** main search function
*/
const TValue *luaH_get (Table *t, const TValue *key) {
switch (ttype(key)) {
case LUA_TSHRSTR: return luaH_getstr(t, rawtsvalue(key));
case LUA_TNIL: return luaO_nilobject;
case LUA_TNUMBER: {
int k;
lua_Number n = nvalue(key);
lua_number2int(k, n);
if (luai_numeq(cast_num(k), n)) /* index is int? */
return luaH_getint(t, k); /* use specialized version */
/* else go through */
}
default: {
Node *n = mainposition(t, key);
do { /* check whether `key' is somewhere in the chain */
if (luaV_rawequalobj(gkey(n), key))
return gval(n); /* that's it */
else n = gnext(n);
} while (n);
return luaO_nilobject;
}
}
}
/*
** beware: when using this function you probably need to check a GC
** barrier and invalidate the TM cache.
*/
TValue *luaH_set (lua_State *L, Table *t, const TValue *key) {
const TValue *p = luaH_get(t, key);
if (p != luaO_nilobject)
return cast(TValue *, p);
else return luaH_newkey(L, t, key);
}
void luaH_setint (lua_State *L, Table *t, int key, TValue *value) {
const TValue *p = luaH_getint(t, key);
TValue *cell;
if (p != luaO_nilobject)
cell = cast(TValue *, p);
else {
TValue k;
setnvalue(&k, cast_num(key));
cell = luaH_newkey(L, t, &k);
}
setobj2t(L, cell, value);
}
static int unbound_search (Table *t, unsigned int j) {
unsigned int i = j; /* i is zero or a present index */
j++;
/* find `i' and `j' such that i is present and j is not */
while (!ttisnil(luaH_getint(t, j))) {
i = j;
j *= 2;
if (j > cast(unsigned int, MAX_INT)) { /* overflow? */
/* table was built with bad purposes: resort to linear search */
i = 1;
while (!ttisnil(luaH_getint(t, i))) i++;
return i - 1;
}
}
/* now do a binary search between them */
while (j - i > 1) {
unsigned int m = (i+j)/2;
if (ttisnil(luaH_getint(t, m))) j = m;
else i = m;
}
return i;
}
/*
** Try to find a boundary in table `t'. A `boundary' is an integer index
** such that t[i] is non-nil and t[i+1] is nil (and 0 if t[1] is nil).
*/
int luaH_getn (Table *t) {
unsigned int j = t->sizearray;
if (j > 0 && ttisnil(&t->array[j - 1])) {
/* there is a boundary in the array part: (binary) search for it */
unsigned int i = 0;
while (j - i > 1) {
unsigned int m = (i+j)/2;
if (ttisnil(&t->array[m - 1])) j = m;
else i = m;
}
return i;
}
/* else must find a boundary in hash part */
else if (isdummy(t->node)) /* hash part is empty? */
return j; /* that is easy... */
else return unbound_search(t, j);
}
#if defined(LUA_DEBUG)
Node *luaH_mainposition (const Table *t, const TValue *key) {
return mainposition(t, key);
}
int luaH_isdummy (Node *n) { return isdummy(n); }
#endif
| {
"pile_set_name": "Github"
} |
<view class="page">
<view class="page__hd">
<text class="page__title">modal</text>
<text class="page__desc">模式对话框</text>
</view>
<view class="page__bd">
<modal title="标题" confirm-text="确定" cancel-text="取消" hidden="{{modalHidden}}" mask bindconfirm="modalChange" bindcancel="modalChange">
这是对话框的内容。
</modal>
<modal class="modal" hidden="{{modalHidden2}}" no-cancel bindconfirm="modalChange2" bindcancel="modalChange2">
<view> 没有标题没有取消的对话框 </view>
<view> 内容可以插入节点 </view>
</modal>
<view class="btn-area">
<button type="default" bindtap="modalTap">点击弹出modal</button>
<button type="default" bindtap="modalTap2">点击弹出modal2</button>
</view>
</view>
</view>
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!--
Copyright 2009-2012 The MyBatis Team
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<!DOCTYPE configuration
PUBLIC "-//mybatis.org//DTD Config 3.0//EN"
"http://mybatis.org/dtd/mybatis-3-config.dtd">
<configuration>
<typeAliases>
<typeAlias alias="Person" type="org.apache.ibatis.submitted.empty_namespace.Person"/>
</typeAliases>
<environments default="test">
<environment id="test">
<transactionManager type="JDBC"></transactionManager>
<dataSource type="UNPOOLED">
<property name="driver" value="org.hsqldb.jdbcDriver"/>
<property name="url" value="jdbc:hsqldb:mem:empty_namespace"/>
<property name="username" value="sa"/>
</dataSource>
</environment>
</environments>
<mappers>
<mapper resource="org/apache/ibatis/submitted/empty_namespace/Person.xml"/>
</mappers>
</configuration>
| {
"pile_set_name": "Github"
} |
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ssh
import (
"encoding/binary"
"fmt"
"io"
"log"
"sync"
"sync/atomic"
)
// debugMux, if set, causes messages in the connection protocol to be
// logged.
const debugMux = false
// chanList is a thread safe channel list.
type chanList struct {
// protects concurrent access to chans
sync.Mutex
// chans are indexed by the local id of the channel, which the
// other side should send in the PeersId field.
chans []*channel
// This is a debugging aid: it offsets all IDs by this
// amount. This helps distinguish otherwise identical
// server/client muxes
offset uint32
}
// Assigns a channel ID to the given channel.
func (c *chanList) add(ch *channel) uint32 {
c.Lock()
defer c.Unlock()
for i := range c.chans {
if c.chans[i] == nil {
c.chans[i] = ch
return uint32(i) + c.offset
}
}
c.chans = append(c.chans, ch)
return uint32(len(c.chans)-1) + c.offset
}
// getChan returns the channel for the given ID.
func (c *chanList) getChan(id uint32) *channel {
id -= c.offset
c.Lock()
defer c.Unlock()
if id < uint32(len(c.chans)) {
return c.chans[id]
}
return nil
}
func (c *chanList) remove(id uint32) {
id -= c.offset
c.Lock()
if id < uint32(len(c.chans)) {
c.chans[id] = nil
}
c.Unlock()
}
// dropAll forgets all channels it knows, returning them in a slice.
func (c *chanList) dropAll() []*channel {
c.Lock()
defer c.Unlock()
var r []*channel
for _, ch := range c.chans {
if ch == nil {
continue
}
r = append(r, ch)
}
c.chans = nil
return r
}
// mux represents the state for the SSH connection protocol, which
// multiplexes many channels onto a single packet transport.
type mux struct {
conn packetConn
chanList chanList
incomingChannels chan NewChannel
globalSentMu sync.Mutex
globalResponses chan interface{}
incomingRequests chan *Request
errCond *sync.Cond
err error
}
// When debugging, each new chanList instantiation has a different
// offset.
var globalOff uint32
func (m *mux) Wait() error {
m.errCond.L.Lock()
defer m.errCond.L.Unlock()
for m.err == nil {
m.errCond.Wait()
}
return m.err
}
// newMux returns a mux that runs over the given connection.
func newMux(p packetConn) *mux {
m := &mux{
conn: p,
incomingChannels: make(chan NewChannel, chanSize),
globalResponses: make(chan interface{}, 1),
incomingRequests: make(chan *Request, chanSize),
errCond: newCond(),
}
if debugMux {
m.chanList.offset = atomic.AddUint32(&globalOff, 1)
}
go m.loop()
return m
}
func (m *mux) sendMessage(msg interface{}) error {
p := Marshal(msg)
if debugMux {
log.Printf("send global(%d): %#v", m.chanList.offset, msg)
}
return m.conn.writePacket(p)
}
func (m *mux) SendRequest(name string, wantReply bool, payload []byte) (bool, []byte, error) {
if wantReply {
m.globalSentMu.Lock()
defer m.globalSentMu.Unlock()
}
if err := m.sendMessage(globalRequestMsg{
Type: name,
WantReply: wantReply,
Data: payload,
}); err != nil {
return false, nil, err
}
if !wantReply {
return false, nil, nil
}
msg, ok := <-m.globalResponses
if !ok {
return false, nil, io.EOF
}
switch msg := msg.(type) {
case *globalRequestFailureMsg:
return false, msg.Data, nil
case *globalRequestSuccessMsg:
return true, msg.Data, nil
default:
return false, nil, fmt.Errorf("ssh: unexpected response to request: %#v", msg)
}
}
// ackRequest must be called after processing a global request that
// has WantReply set.
func (m *mux) ackRequest(ok bool, data []byte) error {
if ok {
return m.sendMessage(globalRequestSuccessMsg{Data: data})
}
return m.sendMessage(globalRequestFailureMsg{Data: data})
}
func (m *mux) Close() error {
return m.conn.Close()
}
// loop runs the connection machine. It will process packets until an
// error is encountered. To synchronize on loop exit, use mux.Wait.
func (m *mux) loop() {
var err error
for err == nil {
err = m.onePacket()
}
for _, ch := range m.chanList.dropAll() {
ch.close()
}
close(m.incomingChannels)
close(m.incomingRequests)
close(m.globalResponses)
m.conn.Close()
m.errCond.L.Lock()
m.err = err
m.errCond.Broadcast()
m.errCond.L.Unlock()
if debugMux {
log.Println("loop exit", err)
}
}
// onePacket reads and processes one packet.
func (m *mux) onePacket() error {
packet, err := m.conn.readPacket()
if err != nil {
return err
}
if debugMux {
if packet[0] == msgChannelData || packet[0] == msgChannelExtendedData {
log.Printf("decoding(%d): data packet - %d bytes", m.chanList.offset, len(packet))
} else {
p, _ := decode(packet)
log.Printf("decoding(%d): %d %#v - %d bytes", m.chanList.offset, packet[0], p, len(packet))
}
}
switch packet[0] {
case msgChannelOpen:
return m.handleChannelOpen(packet)
case msgGlobalRequest, msgRequestSuccess, msgRequestFailure:
return m.handleGlobalPacket(packet)
}
// assume a channel packet.
if len(packet) < 5 {
return parseError(packet[0])
}
id := binary.BigEndian.Uint32(packet[1:])
ch := m.chanList.getChan(id)
if ch == nil {
return fmt.Errorf("ssh: invalid channel %d", id)
}
return ch.handlePacket(packet)
}
func (m *mux) handleGlobalPacket(packet []byte) error {
msg, err := decode(packet)
if err != nil {
return err
}
switch msg := msg.(type) {
case *globalRequestMsg:
m.incomingRequests <- &Request{
Type: msg.Type,
WantReply: msg.WantReply,
Payload: msg.Data,
mux: m,
}
case *globalRequestSuccessMsg, *globalRequestFailureMsg:
m.globalResponses <- msg
default:
panic(fmt.Sprintf("not a global message %#v", msg))
}
return nil
}
// handleChannelOpen schedules a channel to be Accept()ed.
func (m *mux) handleChannelOpen(packet []byte) error {
var msg channelOpenMsg
if err := Unmarshal(packet, &msg); err != nil {
return err
}
if msg.MaxPacketSize < minPacketLength || msg.MaxPacketSize > 1<<31 {
failMsg := channelOpenFailureMsg{
PeersId: msg.PeersId,
Reason: ConnectionFailed,
Message: "invalid request",
Language: "en_US.UTF-8",
}
return m.sendMessage(failMsg)
}
c := m.newChannel(msg.ChanType, channelInbound, msg.TypeSpecificData)
c.remoteId = msg.PeersId
c.maxRemotePayload = msg.MaxPacketSize
c.remoteWin.add(msg.PeersWindow)
m.incomingChannels <- c
return nil
}
func (m *mux) OpenChannel(chanType string, extra []byte) (Channel, <-chan *Request, error) {
ch, err := m.openChannel(chanType, extra)
if err != nil {
return nil, nil, err
}
return ch, ch.incomingRequests, nil
}
func (m *mux) openChannel(chanType string, extra []byte) (*channel, error) {
ch := m.newChannel(chanType, channelOutbound, extra)
ch.maxIncomingPayload = channelMaxPacket
open := channelOpenMsg{
ChanType: chanType,
PeersWindow: ch.myWindow,
MaxPacketSize: ch.maxIncomingPayload,
TypeSpecificData: extra,
PeersId: ch.localId,
}
if err := m.sendMessage(open); err != nil {
return nil, err
}
switch msg := (<-ch.msg).(type) {
case *channelOpenConfirmMsg:
return ch, nil
case *channelOpenFailureMsg:
return nil, &OpenChannelError{msg.Reason, msg.Message}
default:
return nil, fmt.Errorf("ssh: unexpected packet in response to channel open: %T", msg)
}
}
| {
"pile_set_name": "Github"
} |
using Microsoft.VisualStudio.TestTools.UnitTesting;
using Shouldly;
using System;
namespace Mapster.Tests
{
[TestClass]
public class WhenIgnoringNonMapped
{
[TestMethod]
public void Should_Ignore_Non_Mapped()
{
TypeAdapterConfig<SimplePoco, SimpleDto>.NewConfig()
.Map(dest => dest.Id, src => src.Id)
.IgnoreNonMapped(true)
.Compile();
var poco = new SimplePoco
{
Id = Guid.NewGuid(),
Name1 = "Name1",
Name2 = "Name2",
Name3 = "Name3"
};
var dto = poco.Adapt<SimplePoco, SimpleDto>();
dto.Id.ShouldBe(poco.Id);
dto.Name1.ShouldBeNull();
dto.Name2.ShouldBeNull();
dto.Name3.ShouldBeNull();
}
#region test classes
public class SimplePoco
{
public Guid Id { get; set; }
public string Name1 { get; set; }
public string Name2 { get; set; }
public string Name3 { get; set; }
}
public class SimpleDto
{
public Guid Id { get; set; }
public string Name1 { get; set; }
public string Name2 { get; set; }
public string Name3 { get; set; }
}
#endregion
}
}
| {
"pile_set_name": "Github"
} |
using System;
using UnityEngine;
using UnityStandardAssets.CrossPlatformInput.PlatformSpecific;
namespace UnityStandardAssets.CrossPlatformInput
{
public static class CrossPlatformInputManager
{
public enum ActiveInputMethod
{
Hardware,
Touch
}
private static VirtualInput activeInput;
private static VirtualInput s_TouchInput;
private static VirtualInput s_HardwareInput;
static CrossPlatformInputManager()
{
s_TouchInput = new MobileInput();
s_HardwareInput = new StandaloneInput();
#if MOBILE_INPUT
activeInput = s_TouchInput;
#else
activeInput = s_HardwareInput;
#endif
}
public static void SwitchActiveInputMethod(ActiveInputMethod activeInputMethod)
{
switch (activeInputMethod)
{
case ActiveInputMethod.Hardware:
activeInput = s_HardwareInput;
break;
case ActiveInputMethod.Touch:
activeInput = s_TouchInput;
break;
}
}
public static bool AxisExists(string name)
{
return activeInput.AxisExists(name);
}
public static bool ButtonExists(string name)
{
return activeInput.ButtonExists(name);
}
public static void RegisterVirtualAxis(VirtualAxis axis)
{
activeInput.RegisterVirtualAxis(axis);
}
public static void RegisterVirtualButton(VirtualButton button)
{
activeInput.RegisterVirtualButton(button);
}
public static void UnRegisterVirtualAxis(string name)
{
if (name == null)
{
throw new ArgumentNullException("name");
}
activeInput.UnRegisterVirtualAxis(name);
}
public static void UnRegisterVirtualButton(string name)
{
activeInput.UnRegisterVirtualButton(name);
}
// returns a reference to a named virtual axis if it exists otherwise null
public static VirtualAxis VirtualAxisReference(string name)
{
return activeInput.VirtualAxisReference(name);
}
// returns the platform appropriate axis for the given name
public static float GetAxis(string name)
{
return GetAxis(name, false);
}
public static float GetAxisRaw(string name)
{
return GetAxis(name, true);
}
// private function handles both types of axis (raw and not raw)
private static float GetAxis(string name, bool raw)
{
return activeInput.GetAxis(name, raw);
}
// -- Button handling --
public static bool GetButton(string name)
{
return activeInput.GetButton(name);
}
public static bool GetButtonDown(string name)
{
return activeInput.GetButtonDown(name);
}
public static bool GetButtonUp(string name)
{
return activeInput.GetButtonUp(name);
}
public static void SetButtonDown(string name)
{
activeInput.SetButtonDown(name);
}
public static void SetButtonUp(string name)
{
activeInput.SetButtonUp(name);
}
public static void SetAxisPositive(string name)
{
activeInput.SetAxisPositive(name);
}
public static void SetAxisNegative(string name)
{
activeInput.SetAxisNegative(name);
}
public static void SetAxisZero(string name)
{
activeInput.SetAxisZero(name);
}
public static void SetAxis(string name, float value)
{
activeInput.SetAxis(name, value);
}
public static Vector3 mousePosition
{
get { return activeInput.MousePosition(); }
}
public static void SetVirtualMousePositionX(float f)
{
activeInput.SetVirtualMousePositionX(f);
}
public static void SetVirtualMousePositionY(float f)
{
activeInput.SetVirtualMousePositionY(f);
}
public static void SetVirtualMousePositionZ(float f)
{
activeInput.SetVirtualMousePositionZ(f);
}
// virtual axis and button classes - applies to mobile input
// Can be mapped to touch joysticks, tilt, gyro, etc, depending on desired implementation.
// Could also be implemented by other input devices - kinect, electronic sensors, etc
public class VirtualAxis
{
public string name { get; private set; }
private float m_Value;
public bool matchWithInputManager { get; private set; }
public VirtualAxis(string name)
: this(name, true)
{
}
public VirtualAxis(string name, bool matchToInputSettings)
{
this.name = name;
matchWithInputManager = matchToInputSettings;
}
// removes an axes from the cross platform input system
public void Remove()
{
UnRegisterVirtualAxis(name);
}
// a controller gameobject (eg. a virtual thumbstick) should update this class
public void Update(float value)
{
m_Value = value;
}
public float GetValue
{
get { return m_Value; }
}
public float GetValueRaw
{
get { return m_Value; }
}
}
// a controller gameobject (eg. a virtual GUI button) should call the
// 'pressed' function of this class. Other objects can then read the
// Get/Down/Up state of this button.
public class VirtualButton
{
public string name { get; private set; }
public bool matchWithInputManager { get; private set; }
private int m_LastPressedFrame = -5;
private int m_ReleasedFrame = -5;
private bool m_Pressed;
public VirtualButton(string name)
: this(name, true)
{
}
public VirtualButton(string name, bool matchToInputSettings)
{
this.name = name;
matchWithInputManager = matchToInputSettings;
}
// A controller gameobject should call this function when the button is pressed down
public void Pressed()
{
if (m_Pressed)
{
return;
}
m_Pressed = true;
m_LastPressedFrame = Time.frameCount;
}
// A controller gameobject should call this function when the button is released
public void Released()
{
m_Pressed = false;
m_ReleasedFrame = Time.frameCount;
}
// the controller gameobject should call Remove when the button is destroyed or disabled
public void Remove()
{
UnRegisterVirtualButton(name);
}
// these are the states of the button which can be read via the cross platform input system
public bool GetButton
{
get { return m_Pressed; }
}
public bool GetButtonDown
{
get
{
return m_LastPressedFrame - Time.frameCount == -1;
}
}
public bool GetButtonUp
{
get
{
return (m_ReleasedFrame == Time.frameCount - 1);
}
}
}
}
}
| {
"pile_set_name": "Github"
} |
{
"dependencies": [
{
"importpath": "github.com/sdboyer/deptest",
"revision": "3f4c3bea144e112a69bbe5d8d01c1b09a544253f",
"branch": "HEAD"
},
{
"importpath": "github.com/sdboyer/deptestdos",
"revision": "5c607206be5decd28e6263ffffdcee067266015e",
"branch": "master"
},
{
"importpath": "github.com/carolynvs/deptest-importers",
"revision": "b79bc9482da8bb7402cdc3e3fd984db250718dd7",
"branch": "v2"
}
]
}
| {
"pile_set_name": "Github"
} |
var convert = require('./convert'),
func = convert('isSafeInteger', require('../isSafeInteger'), require('./_falseOptions'));
func.placeholder = require('./placeholder');
module.exports = func;
| {
"pile_set_name": "Github"
} |
/*
*************************************************************************************
* Copyright 2013 Normation SAS
*************************************************************************************
*
* This file is part of Rudder.
*
* Rudder is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* In accordance with the terms of section 7 (7. Additional Terms.) of
* the GNU General Public License version 3, the copyright holders add
* the following Additional permissions:
* Notwithstanding to the terms of section 5 (5. Conveying Modified Source
* Versions) and 6 (6. Conveying Non-Source Forms.) of the GNU General
* Public License version 3, when you create a Related Module, this
* Related Module is not considered as a part of the work and may be
* distributed under the license agreement of your choice.
* A "Related Module" means a set of sources files including their
* documentation that, without modification of the Source Code, enables
* supplementary functions or services in addition to those offered by
* the Software.
*
* Rudder is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Rudder. If not, see <http://www.gnu.org/licenses/>.
*
*************************************************************************************
*/
package com.normation.rudder.web.components
import scala.xml.NodeSeq
import bootstrap.liftweb.RudderConfig
import net.liftweb.common.EmptyBox
import net.liftweb.common.Loggable
import net.liftweb.common.Full
import net.liftweb.util.Helpers._
import com.normation.rudder.domain.workflows.ChangeRequest
import net.liftweb.common.Box
import com.normation.rudder.domain.policies.RuleId
import com.normation.rudder.domain.nodes.NodeGroupId
import com.normation.rudder.domain.policies.DirectiveId
import com.normation.rudder.web.model.CurrentUser
import com.normation.rudder.AuthorizationType
import com.normation.box._
/*
* This object is just a service that check if a given rule/directive/etc has
* already a change request on it to display a relevant information if it is
* the case.
*/
object PendingChangeRequestDisplayer extends Loggable{
private[this] val workflowLevel = RudderConfig.workflowLevelService
private[this] val linkUtil = RudderConfig.linkUtil
private[this] def displayPendingChangeRequest(xml:NodeSeq, crs:Box[Seq[ChangeRequest]] ) : NodeSeq = {
crs match {
case eb: EmptyBox =>
val e = eb ?~! "Error when trying to lookup pending change request"
logger.error(e.messageChain)
e.rootExceptionCause.foreach { ex =>
logger.error("Exception was:", ex)
}
<span class="error">{e.messageChain}</span>
case Full(crs) if(crs.size == 0) =>
NodeSeq.Empty
case Full(crs) =>
// Need to fold the Element into one parent, or it behaves strangely (repeat the parent ...)
val pendingChangeRequestLink =crs.foldLeft(NodeSeq.Empty) {
(res,cr) => res ++
{
if (CurrentUser.checkRights(AuthorizationType.Validator.Read)||CurrentUser.checkRights(AuthorizationType.Deployer.Read)||cr.owner == CurrentUser.actor.name) {
<li><a href={linkUtil.baseChangeRequestLink(cr.id)}>CR #{cr.id}: {cr.info.name}</a></li>
} else {
<li>CR #{cr.id}</li>
} }
}
("#changeRequestList *+" #> pendingChangeRequestLink ).apply(xml)
}
}
private type checkFunction[T] = (T,Boolean) => Box[Seq[ChangeRequest]]
private[this] def checkChangeRequest[T] (
xml : NodeSeq
, id : T
, check : checkFunction[T]
): NodeSeq = {
if (RudderConfig.configService.rudder_workflow_enabled().toBox.getOrElse(false)) {
val crs = check(id,true)
displayPendingChangeRequest(xml,crs)
} else {
// Workflow disabled, nothing to display
NodeSeq.Empty
}
}
def checkByRule(xml:NodeSeq,ruleId:RuleId): NodeSeq = {
checkChangeRequest(xml,ruleId,workflowLevel.getByRule)
}
def checkByGroup(xml:NodeSeq,groupId:NodeGroupId): NodeSeq = {
checkChangeRequest(xml,groupId,workflowLevel.getByNodeGroup)
}
def checkByDirective(xml:NodeSeq,directiveId:DirectiveId): NodeSeq = {
checkChangeRequest(xml,directiveId,workflowLevel.getByDirective)
}
}
| {
"pile_set_name": "Github"
} |
import { intersection } from "../index";
export = intersection;
| {
"pile_set_name": "Github"
} |
<?xml version="1.0"?>
<sparql
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:xs="http://www.w3.org/2001/XMLSchema#"
xmlns="http://www.w3.org/2005/sparql-results#" >
<head>
<variable name="x"/>
</head>
<results>
</results>
</sparql>
| {
"pile_set_name": "Github"
} |
/**
* Copyright (c) 2014,2019 Contributors to the Eclipse Foundation
*
* See the NOTICE file(s) distributed with this work for additional
* information regarding copyright ownership.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0
*
* SPDX-License-Identifier: EPL-2.0
*/
package org.eclipse.smarthome.core.auth.client.oauth2;
import java.io.IOException;
import org.eclipse.jdt.annotation.NonNullByDefault;
import org.eclipse.jdt.annotation.Nullable;
/**
* This is the service factory to produce a OAuth2 service client that authenticates using OAUTH2.
* This is a service factory pattern; the OAuthe2 service client is not shared between bundles.
*
* <p>
* The basic uses of this OAuthClient are as follows:
*
* <p>
* Use case 1 - For the full authorization code grant flow, as described in RFC 6749 section 4.1
* https://tools.ietf.org/html/rfc6749#section-4.1
*
* <ul>
* <li>Method {@code #getAuthorizationUrl(String, String, String)} to get an authorization code url
* <li>Redirect the user-agent/ real user (outside scope of this client)
* <li>Method {@code #extractAuthCodeFromAuthResponse(String)} to verify and extract the authorization
* code from the response
* <li>Method {@code #getAccessTokenResponseByAuthorizationCode(String, String)} to get an access token (may contain
* optional refresh token) by authorization code extracted in above step.
* <li>Use the {@code AccessTokenResponse} in code
* <li>When access token is expired, see Use case 3 - refresh token.
* </ul>
*
* Use case 2 - For Resource Owner Password Credentials Grant, as described in RFC 6749 section 4.3
* https://tools.ietf.org/html/rfc6749#section-4.3
*
* <ul>
* <li>Method {@code #getAccessTokenByResourceOwnerPasswordCredentials(String, String, String)} to get
* {@code AccessTokenResponse} (may contain optional refresh token) by username and password
* <li>Use the {@code AccessTokenResponse} in code
* <li>When access token is expired, Use {@code #refreshToken()} to get another access token
* </ul>
*
* Use case 3 - Refresh token
* <ul>
* <li>Method {@code #refreshToken}
* </ul>
*
* Use case 4 - Client Credentials. This is used to get the AccessToken by purely the client credential (ESH).
* <ul>
* <li>Method {@code #getAccessTokenByClientCredentials(String)}
* </ul>
*
* Use case 5 - Implicit Grant (RFC 6749 section 4.2). The implicit grant usually involves browser/javascript
* redirection flows.
* <ul>
* <li>Method {@code #getAccessTokenByImplicit(String, String, String)}
* </ul>
* Use case 6 - Import OAuth access token for data migration. Existing implementations may choose to migrate
* existing OAuth access tokens to be managed by this client.
* <ul>
* <li>Method {@code #importAccessTokenResponse(AccessTokenResponse)}
* </ul>
*
* Use case 7 - Get tokens - continue from Use case 1/2/4/5.
* <ul>
* <li>Method {@code #getAccessTokenResponse()}
* </ul>
*
* @author Gary Tse - Initial contribution
* @author Hilbrand Bouwkamp - Added AccessTokenRefreshListener, fixed javadoc warnings
*
*/
@NonNullByDefault
public interface OAuthClientService extends AutoCloseable {
/**
* Use case 1 Part (A)
*
* This call produces a URL which can be used during the Authorization Code Grant part (A).
* The OAuthClientService generate an authorization URL, which contains the HTTP query parameters needed for
* authorization code grant.
*
* @see <a href="https://tools.ietf.org/html/rfc6749#section-4.1">Authorization Code Grant illustration - rfc6749
* section-4.1</a>
* @see <a href="https://tools.ietf.org/html/rfc6749#section-4.1.1">Concerning which parameters must be set and
* which ones are optional - rfc6749 section-4.1.1</a>
* @param redirectURI is the http request parameter which tells the oauth provider the URI to redirect the
* user-agent. This may/ may not be present as per agreement with the oauth provider.
* e.g. after the human user authenticate with the oauth provider by the browser, the oauth provider
* will redirect the browser to this redirectURL.
* @param scope Specific scopes, if null the service specified scopes will be used
* @param state If the state is not null, it will be added as the HTTP query parameter state=xxxxxxxx .
* If the state is null, a random UUID will be generated and added state=<random UUID>,
* the state will be assigned to the requestParams in this case.
* @return An authorization URL during the Authorization Code Grant with http request parameters filled in.
* e.g Produces an URL string like this:
* https://oauth.provider?response_type=code&client_id=myClientId&redirect_uri=redirectURI&scope=myScope&state=mySecureRandomState
* @throws OAuthException if authorizationUrl or clientId were not previously provided (null)
*/
String getAuthorizationUrl(@Nullable String redirectURI, @Nullable String scope, @Nullable String state)
throws OAuthException;
/**
* Use case 1 Part (C). Part (B) is not in this client. Part (B) is about
* redirecting the user and user-agent and is not in scope. This is a continuation of the flow of Authorization Code
* Grant, part (C).
*
* @param redirectURLwithParams This is the full redirectURI from Part (A),
* {@link #getAuthorizationUrl(String, String, String)}, but added with authorizationCode and state
* parameters
* returned by the oauth provider. It is encoded in application/x-www-form-urlencoded format
* as stated in RFC 6749 section 4.1.2.
* To quote from the RFC:
* HTTP/1.1 302 Found
* Location: https://client.example.com/cb?code=SplxlOBeZQQYbYS6WxSbIA&state=xyz
* @return AuthorizationCode This authorizationCode can be used in the call {#getOAuthTokenByAuthCode(String)}
* @throws OAuthException If the state from redirectURLwithParams does not exactly match the expectedState, or
* exceptions arise while parsing redirectURLwithParams.
* @see #getAuthorizationUrl(String, String, String) for part (A)
* @see <a href="https://tools.ietf.org/html/rfc6749#section-4.1">Authorization Code Grant illustration - rfc6749
* section-4.1</a>
*/
String extractAuthCodeFromAuthResponse(String redirectURLwithParams) throws OAuthException;
/**
* Use case 1 Part (D)
* This is a continuation of the flow of Authorization Code Grant, part (D).
*
* Get the cached access token by authorizationCode. This is exactly
* RFC 4.1.3 Access Token Request
*
* @param authorizationCode authorization code given by part (C) of the Authorization Code Grant
* {{@link #extractAuthCodeFromAuthResponse(String)}
* @param redirectURI is the http request parameter which tells the oauth provider the URI to redirect the
* user-agent. This may/ may not be present as per agreement with the oauth provider.
* e.g. after the human user authenticate with the oauth provider by the browser, the oauth provider
* will redirect the browser to this redirectURL.
* @return AccessTokenResponse
* @throws IOException IO/ network exceptions
* @throws OAuthException Other exceptions
* @throws OAuthErrorException Error codes given by authorization provider, as in RFC 6749 section 5.2 Error
* Response
* @see <a href="https://tools.ietf.org/html/rfc6749#section-4.1.3">Access Token Request - rfc6749 section-4.1.3</a>
* @see <a href="https://tools.ietf.org/html/rfc6749#section-5.2">Error Response - rfc6749 section-5.2</a>
*/
AccessTokenResponse getAccessTokenResponseByAuthorizationCode(String authorizationCode, String redirectURI)
throws OAuthException, IOException, OAuthResponseException;
/**
* Use case 2 - Resource Owner Password Credentials
* This is for when the username and password of the actual resource owner (user) is known to the client (ESH).
*
* @param username of the user
* @param password of the user
* @param scope of the access, a space delimited separated list
* @return AccessTokenResponse
* @throws IOException IO/ network exceptions
* @throws OAuthException Other exceptions
* @throws OAuthErrorException Error codes given by authorization provider, as in RFC 6749 section 5.2 Error
* Response
* @see <a href="https://tools.ietf.org/html/rfc6749#section-4.3.2">rfc6749 section-4.3.2</>
*/
AccessTokenResponse getAccessTokenByResourceOwnerPasswordCredentials(String username, String password,
@Nullable String scope) throws OAuthException, IOException, OAuthResponseException;
/**
* Use case 3 - refreshToken. Usually, it is only necessary to call {@code #getAccessTokenResponse()} directly.
* It automatically takes care of refreshing the token if the token has become expired.
*
* If the authorization server has invalidated the access token before the expiry,
* then this call can be used to get a new acess token by using the refresh token.
*
* @return new AccessTokenResponse from authorization server
* @throws IOException Web/ network issues etc.
* @throws OAuthErrorException For OAUTH error responses.
* @throws OAuthException For other exceptions.
* @see <a href="https://tools.ietf.org/html/rfc6749#section-5.2">rfc6749 section-5.2</a>
*/
AccessTokenResponse refreshToken() throws OAuthException, IOException, OAuthResponseException;
/**
* Use case 4 - Client Credentials
* This is used to get the AccessToken by purely the client credential. The client
* in this context is the program making the call to OAuthClientService. The actual
* resource owner (human user) is not involved.
*
* @param scope of the access, a space delimited separated list
* @return AccessTokenResponse
* @throws IOException Web/ network issues etc.
* @throws OAuthErrorException For OAUTH error responses.
* @throws OAuthException For other exceptions.
*/
AccessTokenResponse getAccessTokenByClientCredentials(@Nullable String scope)
throws OAuthException, IOException, OAuthResponseException;
/**
* Use case 5 - Implicit Grant
* The implicit grant usually involves browser/javascript redirection flows.
*
* @param redirectURI is the http request parameter which tells the oauth provider the URI to redirect the
* user-agent. This may/ may not be present as per agreement with the oauth provider.
* e.g. after the human user authenticate with the oauth provider by the browser, the oauth provider
* will redirect the browser to this redirectURL.
* @param scope of the access, a space delimited separated list
* @param state An opaque value used by the client to maintain state between the request and callback. Recommended
* to prevent cross-site forgery.
* @return AccessTokenResponse
* @throws IOException Web/ network issues etc.
* @throws OAuthErrorException For OAUTH error responses.
* @throws OAuthException For other exceptions.
* @see <a href="https://tools.ietf.org/html/rfc6749#section-4.2>Implicit Grant - rfc6749 section-4.2</a>
*/
AccessTokenResponse getAccessTokenByImplicit(@Nullable String redirectURI, @Nullable String scope,
@Nullable String state) throws OAuthException, IOException, OAuthResponseException;
/**
* Use case 6 - Import This method is used for importing/ migrating existing Access Token Response to be stored by
* this service.
*
* @param accessTokenResponse
* @throws OAuthException if client is closed
*/
void importAccessTokenResponse(AccessTokenResponse accessTokenResponse) throws OAuthException;
/**
* Use case 7 - get access token response. The tokens may have been retrieved previously through Use cases: 1d, 2,
* 3,
* 4, 6.
*
* The implementation uses following ways to get the AccesstokenResponse,
* in following order :--
*
* 1. no token in store ==> return null
* 2. get from the store, token is still valid ==> return it.
* 3. get from the store, but token is expired, no refresh token ==> return null
* 4. get from the store, but token is expired, refresh token available ==> use refresh token to get new access
* token.
*
* @return AccessTokenResponse or null, depending on situations listed above.
* @throws IOException Web/ network issues etc.
* @throws OAuthErrorException For OAUTH error responses.
* @throws OAuthException For other exceptions.
*/
@Nullable
AccessTokenResponse getAccessTokenResponse() throws OAuthException, IOException, OAuthResponseException;
/**
* Remove all access token issued under this OAuthClientService.
* Use this to remove existing token or if the access and refresh token has become invalid/ invalidated.
*
* @throws OAuthException if client is closed
*/
void remove() throws OAuthException;
/**
* Stop the service and free underlying resources. This will not remove access tokens stored under the service.
*/
@Override
void close();
/**
* The client cannot be used anymore if close has been previously called.
*
* @return true if client is closed. i.e. {@link #close()} has been called.
*/
boolean isClosed();
/**
* Adds a {@link AccessTokenRefreshListener}.
*
* @param listener the listener to add
*/
void addAccessTokenRefreshListener(AccessTokenRefreshListener listener);
/**
* Removes the {@link AccessTokenRefreshListener}.
*
* @param listener the listener to remove
*/
boolean removeAccessTokenRefreshListener(AccessTokenRefreshListener listener);
}
| {
"pile_set_name": "Github"
} |
#-- copyright
# OpenProject is an open source project management software.
# Copyright (C) 2012-2020 the OpenProject GmbH
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License version 3.
#
# OpenProject is a fork of ChiliProject, which is a fork of Redmine. The copyright follows:
# Copyright (C) 2006-2017 Jean-Philippe Lang
# Copyright (C) 2010-2013 the ChiliProject Team
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# See docs/COPYRIGHT.rdoc for more details.
#++
# make sure to require Widget::Filters::Base first because otherwise
# ruby might find Base within Widget and Rails will not load it
require_dependency 'widget/filters/base'
class Widget::Filters::MultiValues < Widget::Filters::Base
def render
write(content_tag(:div, id: "#{filter_class.underscore_name}_arg_1", class: 'advanced-filters--filter-value') do
select_options = { :"data-remote-url" => url_for(action: 'available_values'),
:"data-initially-selected" => JSON::dump(Array(filter.values).flatten),
style: 'vertical-align: top;', # FIXME: Do CSS
name: "values[#{filter_class.underscore_name}][]",
:"data-loading" => @options[:lazy] ? 'ajax' : '',
id: "#{filter_class.underscore_name}_arg_1_val",
class: 'form--select filter-value',
:"data-filter-name" => filter_class.underscore_name }
box_content = ''.html_safe
label = label_tag "#{filter_class.underscore_name}_arg_1_val",
h(filter_class.label) + ' ' + I18n.t(:label_filter_value),
class: 'hidden-for-sighted'
box = content_tag :select, select_options, id: "#{filter_class.underscore_name}_select_1" do
render_widget Widget::Filters::Option, filter, to: box_content unless @options[:lazy]
end
plus = content_tag :a,
href: '#',
class: 'form-label filter_multi-select -transparent',
:"data-filter-name" => filter_class.underscore_name,
title: I18n.t(:description_multi_select) do
content_tag :span,
'',
class: 'icon-context icon-button icon-add icon4',
title: I18n.t(:label_enable_multi_select) do
content_tag :span, I18n.t(:label_enable_multi_select), class: 'hidden-for-sighted'
end
end
content_tag(:span, class: 'inline-label') do
label + box + plus
end
end)
end
end
| {
"pile_set_name": "Github"
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.