text
stringlengths 2
100k
| meta
dict |
---|---|
/*----------------------------------------------------------------------------*/
/* Copyright (c) 2015-2020 FIRST. All Rights Reserved. */
/* Open Source Software - may be modified and shared by FRC teams. The code */
/* must be accompanied by the FIRST BSD license file in the root directory of */
/* the project. */
/*----------------------------------------------------------------------------*/
#pragma once
#include <cassert>
#include <cmath>
#include <initializer_list>
#include <vector>
#include <wpi/ArrayRef.h>
#include <wpi/circular_buffer.h>
#include "units/time.h"
#include "wpimath/MathShared.h"
namespace frc {
/**
* This class implements a linear, digital filter. All types of FIR and IIR
* filters are supported. Static factory methods are provided to create commonly
* used types of filters.
*
* Filters are of the form:<br>
* y[n] = (b0 * x[n] + b1 * x[n-1] + … + bP * x[n-P]) -
* (a0 * y[n-1] + a2 * y[n-2] + … + aQ * y[n-Q])
*
* Where:<br>
* y[n] is the output at time "n"<br>
* x[n] is the input at time "n"<br>
* y[n-1] is the output from the LAST time step ("n-1")<br>
* x[n-1] is the input from the LAST time step ("n-1")<br>
* b0 … bP are the "feedforward" (FIR) gains<br>
* a0 … aQ are the "feedback" (IIR) gains<br>
* IMPORTANT! Note the "-" sign in front of the feedback term! This is a common
* convention in signal processing.
*
* What can linear filters do? Basically, they can filter, or diminish, the
* effects of undesirable input frequencies. High frequencies, or rapid changes,
* can be indicative of sensor noise or be otherwise undesirable. A "low pass"
* filter smooths out the signal, reducing the impact of these high frequency
* components. Likewise, a "high pass" filter gets rid of slow-moving signal
* components, letting you detect large changes more easily.
*
* Example FRC applications of filters:
* - Getting rid of noise from an analog sensor input (note: the roboRIO's FPGA
* can do this faster in hardware)
* - Smoothing out joystick input to prevent the wheels from slipping or the
* robot from tipping
* - Smoothing motor commands so that unnecessary strain isn't put on
* electrical or mechanical components
* - If you use clever gains, you can make a PID controller out of this class!
*
* For more on filters, we highly recommend the following articles:<br>
* https://en.wikipedia.org/wiki/Linear_filter<br>
* https://en.wikipedia.org/wiki/Iir_filter<br>
* https://en.wikipedia.org/wiki/Fir_filter<br>
*
* Note 1: Calculate() should be called by the user on a known, regular period.
* You can use a Notifier for this or do it "inline" with code in a
* periodic function.
*
* Note 2: For ALL filters, gains are necessarily a function of frequency. If
* you make a filter that works well for you at, say, 100Hz, you will most
* definitely need to adjust the gains if you then want to run it at 200Hz!
* Combining this with Note 1 - the impetus is on YOU as a developer to make
* sure Calculate() gets called at the desired, constant frequency!
*/
template <class T>
class LinearFilter {
public:
/**
* Create a linear FIR or IIR filter.
*
* @param ffGains The "feed forward" or FIR gains.
* @param fbGains The "feed back" or IIR gains.
*/
LinearFilter(wpi::ArrayRef<double> ffGains, wpi::ArrayRef<double> fbGains)
: m_inputs(ffGains.size()),
m_outputs(fbGains.size()),
m_inputGains(ffGains),
m_outputGains(fbGains) {
static int instances = 0;
instances++;
wpi::math::MathSharedStore::ReportUsage(
wpi::math::MathUsageId::kFilter_Linear, 1);
}
/**
* Create a linear FIR or IIR filter.
*
* @param ffGains The "feed forward" or FIR gains.
* @param fbGains The "feed back" or IIR gains.
*/
LinearFilter(std::initializer_list<double> ffGains,
std::initializer_list<double> fbGains)
: LinearFilter(wpi::makeArrayRef(ffGains.begin(), ffGains.end()),
wpi::makeArrayRef(fbGains.begin(), fbGains.end())) {}
// Static methods to create commonly used filters
/**
* Creates a one-pole IIR low-pass filter of the form:<br>
* y[n] = (1 - gain) * x[n] + gain * y[n-1]<br>
* where gain = e<sup>-dt / T</sup>, T is the time constant in seconds
*
* This filter is stable for time constants greater than zero.
*
* @param timeConstant The discrete-time time constant in seconds.
* @param period The period in seconds between samples taken by the
* user.
*/
static LinearFilter<T> SinglePoleIIR(double timeConstant,
units::second_t period) {
double gain = std::exp(-period.to<double>() / timeConstant);
return LinearFilter(1.0 - gain, -gain);
}
/**
* Creates a first-order high-pass filter of the form:<br>
* y[n] = gain * x[n] + (-gain) * x[n-1] + gain * y[n-1]<br>
* where gain = e<sup>-dt / T</sup>, T is the time constant in seconds
*
* This filter is stable for time constants greater than zero.
*
* @param timeConstant The discrete-time time constant in seconds.
* @param period The period in seconds between samples taken by the
* user.
*/
static LinearFilter<T> HighPass(double timeConstant, units::second_t period) {
double gain = std::exp(-period.to<double>() / timeConstant);
return LinearFilter({gain, -gain}, {-gain});
}
/**
* Creates a K-tap FIR moving average filter of the form:<br>
* y[n] = 1/k * (x[k] + x[k-1] + … + x[0])
*
* This filter is always stable.
*
* @param taps The number of samples to average over. Higher = smoother but
* slower
*/
static LinearFilter<T> MovingAverage(int taps) {
assert(taps > 0);
std::vector<double> gains(taps, 1.0 / taps);
return LinearFilter(gains, {});
}
/**
* Reset the filter state.
*/
void Reset() {
m_inputs.reset();
m_outputs.reset();
}
/**
* Calculates the next value of the filter.
*
* @param input Current input value.
*
* @return The filtered value at this step
*/
T Calculate(T input) {
T retVal = T(0.0);
// Rotate the inputs
m_inputs.push_front(input);
// Calculate the new value
for (size_t i = 0; i < m_inputGains.size(); i++) {
retVal += m_inputs[i] * m_inputGains[i];
}
for (size_t i = 0; i < m_outputGains.size(); i++) {
retVal -= m_outputs[i] * m_outputGains[i];
}
// Rotate the outputs
m_outputs.push_front(retVal);
return retVal;
}
private:
wpi::circular_buffer<T> m_inputs;
wpi::circular_buffer<T> m_outputs;
std::vector<double> m_inputGains;
std::vector<double> m_outputGains;
};
} // namespace frc
| {
"pile_set_name": "Github"
} |
{
"private": true,
"scripts": {
"start": "npm run lint && npm run test",
"lint": "eslint --format json --output-file /usr/lint-output.json --config /usr/linked/.eslintrc.yml /usr/linked/**/*.ts || true",
"test": "jest ./test.js"
},
"devDependencies": {
"eslint": "4.19.1",
"jest": "23.1.0",
"typescript": "~3.2.1"
}
}
| {
"pile_set_name": "Github"
} |
// 后台配置文件
module.exports = {
// 数据库配置
mysqlDB: {
host: 'localhost',
user: 'root',
password: 'root',
database: 'scscmsdb'
},
upPath: 'dist/upFile/', // 上传路径
// token 配置
JWTs: {
secret: 'scscms', // 指定密钥
expiresIn: '2h' // 超时设置 m分钟 h小时 d天数
},
// 邮件服务配置
emailServer: {
host: 'smtp.126.com',
port: 465,
secure: true,
auth: {
user: '[email protected]',
pass: '您的邮箱授权码'
}
},
// 公用:获取客户端IP
getClientIP: function (ctx) {
let req = ctx.request
let ip = ctx.ip ||
req.headers['x-forwarded-for'] ||
req.ip ||
req.connection.remoteAddress ||
req.socket.remoteAddress ||
req.connection.socket.remoteAddress || ''
let arr = ip.match(/(\d{1,3}\.){3}\d{1,3}/)
return arr ? arr[0] : ''
}
}
| {
"pile_set_name": "Github"
} |
library(mlr)
library(BBmisc)
library(devtools)
DATASEED = 7761 # nolint
COMPRESSION = "xz" # nolint
# classification
set.seed(DATASEED)
data(iris, package = "datasets")
iris.task = makeClassifTask("iris-example", data = iris, target = "Species")
use_data(iris.task, overwrite = TRUE, compress = COMPRESSION)
set.seed(DATASEED)
data(Sonar, package = "mlbench")
sonar.task = makeClassifTask("Sonar-example", data = Sonar, target = "Class")
use_data(sonar.task, overwrite = TRUE, compress = COMPRESSION)
set.seed(DATASEED)
data(BreastCancer, package = "mlbench")
BreastCancer$Id = NULL
BreastCancer = BreastCancer[complete.cases(BreastCancer), ] # nolint
bc.task = makeClassifTask("BreastCancer-example", data = BreastCancer, target = "Class")
use_data(bc.task, overwrite = TRUE, compress = COMPRESSION)
set.seed(DATASEED)
data(PimaIndiansDiabetes, package = "mlbench")
pid.task = makeClassifTask("PimaIndiansDiabetes-example", data = PimaIndiansDiabetes, target = "diabetes", positive = "pos")
use_data(pid.task, overwrite = TRUE, compress = COMPRESSION)
set.seed(DATASEED)
data(spam, package = "kernlab")
spam.task = makeClassifTask("spam-example", data = spam, target = "type")
use_data(spam.task, overwrite = TRUE, compress = COMPRESSION)
# regression
set.seed(DATASEED)
data(BostonHousing, package = "mlbench")
bh.task = makeRegrTask("BostonHousing-example", data = BostonHousing, target = "medv")
use_data(bh.task, overwrite = TRUE, compress = COMPRESSION)
# survival analysis
set.seed(DATASEED)
data(wpbc, package = "TH.data")
wpbc$status = ifelse(wpbc$status == "R", 1L, 0L)
wpbc = wpbc[complete.cases(wpbc), ]
wpbc.task = makeSurvTask("wpbc-example", data = wpbc, target = c("time", "status"))
use_data(wpbc.task, overwrite = TRUE, compress = COMPRESSION)
set.seed(DATASEED)
data(lung, package = "survival")
lung$status = lung$status - 1
lung = lung[complete.cases(lung), ]
lung.task = makeSurvTask("lung-example", data = lung, target = c("time", "status"))
use_data(lung.task, overwrite = TRUE, compress = COMPRESSION)
# cluster analysis
set.seed(DATASEED)
data(mtcars, package = "datasets")
mtcars.task = makeClusterTask("mtcars-example", data = mtcars)
use_data(mtcars.task, overwrite = TRUE, compress = COMPRESSION)
set.seed(DATASEED)
data(agriculture, package = "cluster")
agri.task = makeClusterTask("agriculture-example", data = agriculture)
use_data(agri.task, overwrite = TRUE, compress = COMPRESSION)
# cost-sensitive classification
set.seed(DATASEED)
data(iris, package = "datasets")
cost = matrix(runif(150 * 3, 0, 2000), 150) * (1 - diag(3))[iris$Species, ]
iris$Species = NULL
costiris.task = makeCostSensTask("cost-sensitive iris-example", data = iris, cost = cost)
use_data(costiris.task, overwrite = TRUE, compress = COMPRESSION)
# multilabel
set.seed(DATASEED)
d = load2("thirdparty/yeast.RData")
yeast.task = makeMultilabelTask("yeast-example", data = d, target = paste0("label", 1:14))
use_data(yeast.task, overwrite = TRUE, compress = COMPRESSION)
# FDA classification
set.seed(DATASEED)
gunpoint = load2("thirdparty/gunpoint.RData")
gp.fdf = makeFunctionalData(gunpoint, fd.features = list("fd" = 2:151))
gunpoint.task = makeClassifTask(data = gp.fdf, target = "X1", positive = "1")
use_data(gunpoint.task, overwrite = TRUE, compress = COMPRESSION)
# FDA regression
set.seed(DATASEED)
data(fuelSubset, package = "FDboost")
# Center / Scale Variables
fuelSubset$UVVIS = scale(fuelSubset$UVVIS, scale = FALSE)
fuelSubset$NIR = scale(fuelSubset$NIR, scale = FALSE)
fuelSubset$uvvis.lambda = with(fuelSubset, (uvvis.lambda - min(uvvis.lambda)) / (max(uvvis.lambda) - min(uvvis.lambda)))
fuelSubset$nir.lambda = with(fuelSubset, (nir.lambda - min(nir.lambda)) / (max(nir.lambda) - min(nir.lambda)))
len1 = length(fuelSubset$uvvis.lambda)
len2 = length(fuelSubset$nir.lambda)
fdf = list(UVVIS = 1:len1, NIR = (len1 + 1):(len1 + len2))
fs = data.frame("UVVIS" = fuelSubset$UVVIS, "NIR" = fuelSubset$NIR,
"heatan" = fuelSubset$heatan, "h20" = fuelSubset$h2o)
fs.fdf = makeFunctionalData(fs, fd.features = fdf)
fuelsubset.task = makeRegrTask(data = fs.fdf, target = "heatan")
use_data(fuelsubset.task, overwrite = TRUE, compress = COMPRESSION)
# FDA Multiclass Classification
set.seed(DATASEED)
data(phoneme, package = "fda.usc")
ph = as.data.frame(phoneme[["learn"]]$data)
ph[, "classlearn"] = phoneme[["classlearn"]]
fdata = makeFunctionalData(ph, fd.features = list())
phoneme.task = makeClassifTask(data = fdata, target = "classlearn")
use_data(phoneme.task, overwrite = TRUE, compress = COMPRESSION)
# spatial
data(ecuador, package = "sperrorest")
coords = ecuador[, c("x", "y")]
ecuador$x = NULL
ecuador$y = NULL
spatial.task = makeClassifTask(target = "slides", data = ecuador, coordinates = coords,
positive = "TRUE")
use_data(spatial.task, overwrite = TRUE, compress = COMPRESSION)
| {
"pile_set_name": "Github"
} |
<?php
declare(strict_types=1);
/*
* +----------------------------------------------------------------------+
* | ThinkSNS Plus |
* +----------------------------------------------------------------------+
* | Copyright (c) 2016-Present ZhiYiChuangXiang Technology Co., Ltd. |
* +----------------------------------------------------------------------+
* | This source file is subject to enterprise private license, that is |
* | bundled with this package in the file LICENSE, and is available |
* | through the world-wide-web at the following url: |
* | https://github.com/slimkit/plus/blob/master/LICENSE |
* +----------------------------------------------------------------------+
* | Author: Slim Kit Group <[email protected]> |
* | Homepage: www.thinksns.com |
* +----------------------------------------------------------------------+
*/
namespace Zhiyi\Plus\FileStorage;
use Illuminate\Support\Manager;
use OSS\OssClient;
use Zhiyi\Plus\AppInterface;
use function Zhiyi\Plus\setting;
class FilesystemManager extends Manager
{
/**
* Create the filesystem manager instance.
* @param \Zhiyi\Plus\AppInterface $app
*/
public function __construct(AppInterface $app)
{
parent::__construct($app);
}
/**
* Get the default driver name.
*/
public function getDefaultDriver()
{
return setting('file-storage', 'default-filesystem', 'local');
}
/**
* Create local driver.
* @return \Zhiyi\Plus\FileStorage\Filesystems\FilesystemInterface
*/
public function createLocalDriver(): Filesystems\FilesystemInterface
{
$localConfigure = setting('file-storage', 'filesystems.local', [
'disk' => 'local',
]);
$filesystem = $this
->app
->make(\Illuminate\Contracts\Filesystem\Factory::class)
->disk($localConfigure['disk']);
return new Filesystems\LocalFilesystem($filesystem);
}
/**
* Create Aliyun OSS filesystem driver.
* @return \Zhiyi\Plus\FileStorage\Filesystems\FilesystemInterface
*/
public function createAliyunOSSDriver(): Filesystems\FilesystemInterface
{
$aliyunOssConfigure = setting('file-storage', 'filesystems.aliyun-oss', []);
$aliyunOssConfigure = array_merge([
'bucket' => null,
'access-key-id' => null,
'access-key-secret' => null,
'domain' => null,
'inside-domain' => null,
'timeout' => 3600,
], $aliyunOssConfigure);
$oss = new OssClient(
$aliyunOssConfigure['access-key-id'] ?? null,
$aliyunOssConfigure['access-key-secret'] ?? null,
$aliyunOssConfigure['domain'] ?? null,
true
);
$insideOss = new OssClient(
$aliyunOssConfigure['access-key-id'] ?? null,
$aliyunOssConfigure['access-key-secret'] ?? null,
$aliyunOssConfigure['inside-domain'] ?? null,
true
);
return new Filesystems\AliyunOssFilesystem($oss, $insideOss, $aliyunOssConfigure);
}
}
| {
"pile_set_name": "Github"
} |
{{- if .Values.sysdig.accessKey }}
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: {{ template "sysdig.fullname" . }}
labels:
{{ include "sysdig.labels" . | indent 4 }}
spec:
selector:
matchLabels:
app.kubernetes.io/name: {{ include "sysdig.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
template:
metadata:
name: {{ template "sysdig.fullname" .}}
labels:
{{ include "sysdig.labels" . | indent 8 }}
{{- if .Values.daemonset.annotations }}
annotations:
{{ toYaml .Values.daemonset.annotations | indent 8 }}
{{- end }}
spec:
serviceAccountName: {{ template "sysdig.serviceAccountName" .}}
{{- if .Values.priorityClassName }}
priorityClassName: "{{ .Values.priorityClassName }}"
{{- end }}
tolerations:
{{ toYaml .Values.tolerations | indent 8 }}
hostNetwork: true
dnsPolicy: ClusterFirstWithHostNet
hostPID: true
terminationGracePeriodSeconds: 5
{{- if .Values.daemonset.affinity }}
affinity:
{{ toYaml .Values.daemonset.affinity | indent 8 }}
{{- end }}
{{- if .Values.image.pullSecrets }}
imagePullSecrets:
{{ toYaml .Values.image.pullSecrets | indent 8 }}
{{- end }}
{{- if .Values.slim.enabled }}
initContainers:
- name: sysdig-agent-kmodule
image: {{ template "sysdig.image.kmodule" . }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
{{- if .Values.image.pullSecrets }}
imagePullSecrets:
{{ toYaml .Values.image.pullSecrets | indent 12 }}
{{- end }}
securityContext:
capabilities:
drop:
- ALL
privileged: true
runAsNonRoot: false
runAsUser: 0
readOnlyRootFilesystem: false
allowPrivilegeEscalation: true
resources:
{{ toYaml .Values.slim.resources | indent 12 }}
volumeMounts:
- mountPath: /etc/modprobe.d
name: modprobe-d
readOnly: true
- mountPath: /host/boot
name: boot-vol
readOnly: true
- mountPath: /host/lib/modules
name: modules-vol
readOnly: true
- mountPath: /host/usr
name: usr-vol
readOnly: true
{{- end }}
containers:
- name: {{ .Chart.Name }}
image: {{ template "sysdig.image" . }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
resources:
{{ toYaml .Values.resources | indent 12 }}
securityContext:
capabilities:
drop:
- ALL
privileged: true
runAsNonRoot: false
runAsUser: 0
readOnlyRootFilesystem: false
allowPrivilegeEscalation: true
env:
{{- if .Values.ebpf.enabled }}
- name: SYSDIG_BPF_PROBE
value:
{{- end }}
{{- if .Values.proxy.httpProxy }}
- name: http_proxy
value: {{ .Values.proxy.httpProxy }}
{{- end }}
{{- if .Values.proxy.httpsProxy }}
- name: https_proxy
value: {{ .Values.proxy.httpsProxy }}
{{- end }}
{{- if .Values.proxy.noProxy }}
- name: no_proxy
value: {{ .Values.proxy.noProxy }}
{{- end }}
{{- if .Values.timezone }}
- name: TZ
value: {{ .Values.timezone }}
{{- end }}
{{- range $key, $value := .Values.daemonset.env }}
- name: "{{ $key }}"
value: "{{ $value }}"
{{- end }}
readinessProbe:
exec:
command: [ "test", "-e", "/opt/draios/logs/running" ]
initialDelaySeconds: 10
livenessProbe:
exec:
command: [ "test", "-e", "/opt/draios/logs/running" ]
initialDelaySeconds: 10
volumeMounts:
{{- if not .Values.slim.enabled }}
- mountPath: /etc/modprobe.d
name: modprobe-d
readOnly: true
{{- end }}
- mountPath: /host/dev
name: dev-vol
readOnly: false
- mountPath: /host/proc
name: proc-vol
readOnly: true
{{- if not .Values.slim.enabled }}
- mountPath: /host/boot
name: boot-vol
readOnly: true
- mountPath: /host/lib/modules
name: modules-vol
readOnly: true
- mountPath: /host/usr
name: usr-vol
readOnly: true
{{- end }}
- mountPath: /host/run
name: run-vol
- mountPath: /host/var/run
name: varrun-vol
- mountPath: /dev/shm
name: dshm
- mountPath: /opt/draios/etc/kubernetes/config
name: sysdig-agent-config
- mountPath: /opt/draios/etc/kubernetes/secrets
name: sysdig-agent-secrets
{{- if (and .Values.ebpf.enabled .Values.ebpf.settings.mountEtcVolume) }}
- mountPath: /host/etc
name: etc-fs
readOnly: true
{{- end }}
{{- if .Values.customAppChecks }}
- mountPath: /opt/draios/lib/python/checks.custom.d
name: custom-app-checks-volume
{{- end }}
- mountPath: /host/etc/os-release
name: osrel
readOnly: true
volumes:
- name: modprobe-d
hostPath:
path: /etc/modprobe.d
- name: osrel
hostPath:
path: /etc/os-release
type: FileOrCreate
- name: dshm
emptyDir:
medium: Memory
- name: dev-vol
hostPath:
path: /dev
- name: proc-vol
hostPath:
path: /proc
- name: boot-vol
hostPath:
path: /boot
- name: modules-vol
hostPath:
path: /lib/modules
- name: usr-vol
hostPath:
path: /usr
- name: run-vol
hostPath:
path: /run
- name: varrun-vol
hostPath:
path: /var/run
{{- if (and .Values.ebpf.enabled .Values.ebpf.settings.mountEtcVolume) }}
- name: etc-fs
hostPath:
path: /etc
{{- end }}
- name: sysdig-agent-config
configMap:
name: {{ template "sysdig.fullname" . }}
optional: true
- name: sysdig-agent-secrets
secret:
secretName: {{ template "sysdig.fullname" . }}
{{- if .Values.customAppChecks }}
- name: custom-app-checks-volume
configMap:
name: {{ template "sysdig.fullname" . }}-custom-app-checks
{{- end }}
updateStrategy:
{{ toYaml .Values.daemonset.updateStrategy | indent 4 }}
{{- end }}
| {
"pile_set_name": "Github"
} |
import numbers
import cython
from cython import Py_ssize_t
import numpy as np
cimport numpy as cnp
from numpy cimport float64_t, int64_t, ndarray, uint8_t
cnp.import_array()
from pandas._libs cimport util
from pandas._libs.tslibs.nattype cimport (
c_NaT as NaT,
checknull_with_nat,
is_null_datetimelike,
)
from pandas._libs.tslibs.np_datetime cimport get_datetime64_value, get_timedelta64_value
from pandas._libs.ops_dispatch import maybe_dispatch_ufunc_to_dunder_op
from pandas.compat import IS64
cdef:
float64_t INF = <float64_t>np.inf
float64_t NEGINF = -INF
int64_t NPY_NAT = util.get_nat()
bint is_32bit = not IS64
cpdef bint checknull(object val):
"""
Return boolean describing of the input is NA-like, defined here as any
of:
- None
- nan
- NaT
- np.datetime64 representation of NaT
- np.timedelta64 representation of NaT
- NA
Parameters
----------
val : object
Returns
-------
bool
Notes
-----
The difference between `checknull` and `checknull_old` is that `checknull`
does *not* consider INF or NEGINF to be NA.
"""
return val is C_NA or is_null_datetimelike(val, inat_is_null=False)
cpdef bint checknull_old(object val):
"""
Return boolean describing of the input is NA-like, defined here as any
of:
- None
- nan
- INF
- NEGINF
- NaT
- np.datetime64 representation of NaT
- np.timedelta64 representation of NaT
Parameters
----------
val : object
Returns
-------
result : bool
Notes
-----
The difference between `checknull` and `checknull_old` is that `checknull`
does *not* consider INF or NEGINF to be NA.
"""
if checknull(val):
return True
elif util.is_float_object(val) or util.is_complex_object(val):
return val == INF or val == NEGINF
return False
@cython.wraparound(False)
@cython.boundscheck(False)
cpdef ndarray[uint8_t] isnaobj(ndarray arr):
"""
Return boolean mask denoting which elements of a 1-D array are na-like,
according to the criteria defined in `checknull`:
- None
- nan
- NaT
- np.datetime64 representation of NaT
- np.timedelta64 representation of NaT
Parameters
----------
arr : ndarray
Returns
-------
result : ndarray (dtype=np.bool_)
"""
cdef:
Py_ssize_t i, n
object val
ndarray[uint8_t] result
assert arr.ndim == 1, "'arr' must be 1-D."
n = len(arr)
result = np.empty(n, dtype=np.uint8)
for i in range(n):
val = arr[i]
result[i] = checknull(val)
return result.view(np.bool_)
@cython.wraparound(False)
@cython.boundscheck(False)
def isnaobj_old(arr: ndarray) -> ndarray:
"""
Return boolean mask denoting which elements of a 1-D array are na-like,
defined as being any of:
- None
- nan
- INF
- NEGINF
- NaT
- NA
Parameters
----------
arr : ndarray
Returns
-------
result : ndarray (dtype=np.bool_)
"""
cdef:
Py_ssize_t i, n
object val
ndarray[uint8_t] result
assert arr.ndim == 1, "'arr' must be 1-D."
n = len(arr)
result = np.zeros(n, dtype=np.uint8)
for i in range(n):
val = arr[i]
result[i] = (
checknull(val)
or util.is_float_object(val) and (val == INF or val == NEGINF)
)
return result.view(np.bool_)
@cython.wraparound(False)
@cython.boundscheck(False)
def isnaobj2d(arr: ndarray) -> ndarray:
"""
Return boolean mask denoting which elements of a 2-D array are na-like,
according to the criteria defined in `checknull`:
- None
- nan
- NaT
- np.datetime64 representation of NaT
- np.timedelta64 representation of NaT
Parameters
----------
arr : ndarray
Returns
-------
result : ndarray (dtype=np.bool_)
Notes
-----
The difference between `isnaobj2d` and `isnaobj2d_old` is that `isnaobj2d`
does *not* consider INF or NEGINF to be NA.
"""
cdef:
Py_ssize_t i, j, n, m
object val
ndarray[uint8_t, ndim=2] result
assert arr.ndim == 2, "'arr' must be 2-D."
n, m = (<object>arr).shape
result = np.zeros((n, m), dtype=np.uint8)
for i in range(n):
for j in range(m):
val = arr[i, j]
if checknull(val):
result[i, j] = 1
return result.view(np.bool_)
@cython.wraparound(False)
@cython.boundscheck(False)
def isnaobj2d_old(arr: ndarray) -> ndarray:
"""
Return boolean mask denoting which elements of a 2-D array are na-like,
according to the criteria defined in `checknull_old`:
- None
- nan
- INF
- NEGINF
- NaT
- np.datetime64 representation of NaT
- np.timedelta64 representation of NaT
Parameters
----------
arr : ndarray
Returns
-------
ndarray (dtype=np.bool_)
Notes
-----
The difference between `isnaobj2d` and `isnaobj2d_old` is that `isnaobj2d`
does *not* consider INF or NEGINF to be NA.
"""
cdef:
Py_ssize_t i, j, n, m
object val
ndarray[uint8_t, ndim=2] result
assert arr.ndim == 2, "'arr' must be 2-D."
n, m = (<object>arr).shape
result = np.zeros((n, m), dtype=np.uint8)
for i in range(n):
for j in range(m):
val = arr[i, j]
if checknull_old(val):
result[i, j] = 1
return result.view(np.bool_)
def isposinf_scalar(val: object) -> bool:
return util.is_float_object(val) and val == INF
def isneginf_scalar(val: object) -> bool:
return util.is_float_object(val) and val == NEGINF
cdef inline bint is_null_datetime64(v):
# determine if we have a null for a datetime (or integer versions),
# excluding np.timedelta64('nat')
if checknull_with_nat(v):
return True
elif util.is_datetime64_object(v):
return get_datetime64_value(v) == NPY_NAT
return False
cdef inline bint is_null_timedelta64(v):
# determine if we have a null for a timedelta (or integer versions),
# excluding np.datetime64('nat')
if checknull_with_nat(v):
return True
elif util.is_timedelta64_object(v):
return get_timedelta64_value(v) == NPY_NAT
return False
cdef bint checknull_with_nat_and_na(object obj):
# See GH#32214
return checknull_with_nat(obj) or obj is C_NA
# -----------------------------------------------------------------------------
# Implementation of NA singleton
def _create_binary_propagating_op(name, is_divmod=False):
def method(self, other):
if (other is C_NA or isinstance(other, str)
or isinstance(other, (numbers.Number, np.bool_))
or isinstance(other, np.ndarray) and not other.shape):
# Need the other.shape clause to handle NumPy scalars,
# since we do a setitem on `out` below, which
# won't work for NumPy scalars.
if is_divmod:
return NA, NA
else:
return NA
elif isinstance(other, np.ndarray):
out = np.empty(other.shape, dtype=object)
out[:] = NA
if is_divmod:
return out, out.copy()
else:
return out
return NotImplemented
method.__name__ = name
return method
def _create_unary_propagating_op(name):
def method(self):
return NA
method.__name__ = name
return method
cdef class C_NAType:
pass
class NAType(C_NAType):
"""
NA ("not available") missing value indicator.
.. warning::
Experimental: the behaviour of NA can still change without warning.
.. versionadded:: 1.0.0
The NA singleton is a missing value indicator defined by pandas. It is
used in certain new extension dtypes (currently the "string" dtype).
"""
_instance = None
def __new__(cls, *args, **kwargs):
if NAType._instance is None:
NAType._instance = C_NAType.__new__(cls, *args, **kwargs)
return NAType._instance
def __repr__(self) -> str:
return "<NA>"
def __format__(self, format_spec) -> str:
try:
return self.__repr__().__format__(format_spec)
except ValueError:
return self.__repr__()
def __bool__(self):
raise TypeError("boolean value of NA is ambiguous")
def __hash__(self):
# GH 30013: Ensure hash is large enough to avoid hash collisions with integers
exponent = 31 if is_32bit else 61
return 2 ** exponent - 1
def __reduce__(self):
return "NA"
# Binary arithmetic and comparison ops -> propagate
__add__ = _create_binary_propagating_op("__add__")
__radd__ = _create_binary_propagating_op("__radd__")
__sub__ = _create_binary_propagating_op("__sub__")
__rsub__ = _create_binary_propagating_op("__rsub__")
__mul__ = _create_binary_propagating_op("__mul__")
__rmul__ = _create_binary_propagating_op("__rmul__")
__matmul__ = _create_binary_propagating_op("__matmul__")
__rmatmul__ = _create_binary_propagating_op("__rmatmul__")
__truediv__ = _create_binary_propagating_op("__truediv__")
__rtruediv__ = _create_binary_propagating_op("__rtruediv__")
__floordiv__ = _create_binary_propagating_op("__floordiv__")
__rfloordiv__ = _create_binary_propagating_op("__rfloordiv__")
__mod__ = _create_binary_propagating_op("__mod__")
__rmod__ = _create_binary_propagating_op("__rmod__")
__divmod__ = _create_binary_propagating_op("__divmod__", is_divmod=True)
__rdivmod__ = _create_binary_propagating_op("__rdivmod__", is_divmod=True)
# __lshift__ and __rshift__ are not implemented
__eq__ = _create_binary_propagating_op("__eq__")
__ne__ = _create_binary_propagating_op("__ne__")
__le__ = _create_binary_propagating_op("__le__")
__lt__ = _create_binary_propagating_op("__lt__")
__gt__ = _create_binary_propagating_op("__gt__")
__ge__ = _create_binary_propagating_op("__ge__")
# Unary ops
__neg__ = _create_unary_propagating_op("__neg__")
__pos__ = _create_unary_propagating_op("__pos__")
__abs__ = _create_unary_propagating_op("__abs__")
__invert__ = _create_unary_propagating_op("__invert__")
# pow has special
def __pow__(self, other):
if other is C_NA:
return NA
elif isinstance(other, (numbers.Number, np.bool_)):
if other == 0:
# returning positive is correct for +/- 0.
return type(other)(1)
else:
return NA
elif isinstance(other, np.ndarray):
return np.where(other == 0, other.dtype.type(1), NA)
return NotImplemented
def __rpow__(self, other):
if other is C_NA:
return NA
elif isinstance(other, (numbers.Number, np.bool_)):
if other == 1:
return other
else:
return NA
elif isinstance(other, np.ndarray):
return np.where(other == 1, other, NA)
return NotImplemented
# Logical ops using Kleene logic
def __and__(self, other):
if other is False:
return False
elif other is True or other is C_NA:
return NA
return NotImplemented
__rand__ = __and__
def __or__(self, other):
if other is True:
return True
elif other is False or other is C_NA:
return NA
return NotImplemented
__ror__ = __or__
def __xor__(self, other):
if other is False or other is True or other is C_NA:
return NA
return NotImplemented
__rxor__ = __xor__
__array_priority__ = 1000
_HANDLED_TYPES = (np.ndarray, numbers.Number, str, np.bool_)
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
types = self._HANDLED_TYPES + (NAType,)
for x in inputs:
if not isinstance(x, types):
return NotImplemented
if method != "__call__":
raise ValueError(f"ufunc method '{method}' not supported for NA")
result = maybe_dispatch_ufunc_to_dunder_op(
self, ufunc, method, *inputs, **kwargs
)
if result is NotImplemented:
# For a NumPy ufunc that's not a binop, like np.logaddexp
index = [i for i, x in enumerate(inputs) if x is NA][0]
result = np.broadcast_arrays(*inputs)[index]
if result.ndim == 0:
result = result.item()
if ufunc.nout > 1:
result = (NA,) * ufunc.nout
return result
C_NA = NAType() # C-visible
NA = C_NA # Python-visible
| {
"pile_set_name": "Github"
} |
// Copyright (c) Microsoft Corporation. All rights reserved.
#include <synchapi.h>
// This must be as small as possible, because its contents are
// injected into the msvcprt.lib and msvcprtd.lib import libraries.
// Do not include or define anything else here.
// In particular, basic_string must not be included here.
// these declarations must be in sync with the those in thr/xthreads.h
typedef void * _Smtx_t;
extern "C" {
static_assert(sizeof(_Smtx_t) == sizeof(SRWLOCK),
"_Smtx_t must be the same size as SRWLOCK.");
static_assert(alignof(_Smtx_t) == alignof(SRWLOCK),
"_Smtx_t must be the same alignment as SRWLOCK.");
void __cdecl _Smtx_lock_exclusive(_Smtx_t * smtx)
{ /* lock shared mutex exclusively */
AcquireSRWLockExclusive(reinterpret_cast<PSRWLOCK>(smtx));
}
void __cdecl _Smtx_lock_shared(_Smtx_t * smtx)
{ /* lock shared mutex non-exclusively */
AcquireSRWLockShared(reinterpret_cast<PSRWLOCK>(smtx));
}
int __cdecl _Smtx_try_lock_exclusive(_Smtx_t * smtx)
{ /* try to lock shared mutex exclusively */
return (TryAcquireSRWLockExclusive(reinterpret_cast<PSRWLOCK>(smtx)));
}
int __cdecl _Smtx_try_lock_shared(_Smtx_t * smtx)
{ /* try to lock shared mutex non-exclusively */
return (TryAcquireSRWLockShared(reinterpret_cast<PSRWLOCK>(smtx)));
}
void __cdecl _Smtx_unlock_exclusive(_Smtx_t * smtx)
{ /* unlock exclusive shared mutex */
ReleaseSRWLockExclusive(reinterpret_cast<PSRWLOCK>(smtx));
}
void __cdecl _Smtx_unlock_shared(_Smtx_t * smtx)
{ /* unlock non-exclusive shared mutex */
ReleaseSRWLockShared(reinterpret_cast<PSRWLOCK>(smtx));
}
} | {
"pile_set_name": "Github"
} |
#include <QTextStream>
#include "tasks/Task.hpp"
#include <stdexcept>
#include "FlowView.hpp"
namespace flow {
Task::Task(
FlowEnv env,
const QString& e,
const QStringList& as,
const QString& wd,
Output out,
Output err,
Callback cb,
QObject* h
): env_(env), executor_(e), args_(as), workingDir_(wd), stdout_(out), stderr_(err), callback_(cb), handler_(h) {
connect(&proc_, SIGNAL(readyReadStandardError()), this, SLOT(slotReadStdErr()));
connect(&proc_, SIGNAL(readyReadStandardOutput()), this, SLOT(slotReadStdOut()));
connect(&proc_, SIGNAL(error(QProcess::ProcessError)), this, SLOT(slotProcError(QProcess::ProcessError)));
connect(&proc_, SIGNAL(finished(int, QProcess::ExitStatus)), this, SLOT(slotProcFinished(int, QProcess::ExitStatus)));
connect(&proc_, SIGNAL(started()), this, SLOT(slotProcStarted()));
}
Task::~Task() {
slotStop();
for (int i = 0; i < env_.view.flowConfig_.ui.tasksTableWidget->rowCount(); ++i) {
if (env_.view.flowConfig_.ui.tasksTableWidget->item(i, 0)->text() == pid_) {
env_.view.flowConfig_.ui.tasksTableWidget->removeRow(i);
break;
}
}
delete handler_;
}
void Task::write(const QString& data) {
proc_.write(qPrintable(data));
}
double Task::runTime() const {
auto dur = end_ - start_;
auto time = std::chrono::duration_cast<std::chrono::milliseconds>(dur).count();
return static_cast<double>(time) / 1000.0;
}
void Task::slotStart() {
try {
proc_.setWorkingDirectory(workingDir_);
proc_.start(executor_, args_);
pid_ = QString::number(proc_.processId());
QTextStream(stdout) << "started PID: " << pid_ << " -- " << executor_ << " " << args_.join(QLatin1String(" ")) <<endl;
int row = env_.view.flowConfig_.ui.tasksTableWidget->rowCount();
env_.view.flowConfig_.ui.tasksTableWidget->insertRow(row);
env_.view.flowConfig_.ui.tasksTableWidget->setItem(row, 0, new QTableWidgetItem(pid_));
env_.view.flowConfig_.ui.tasksTableWidget->setItem(row, 1, new QTableWidgetItem(executor_));
env_.view.flowConfig_.ui.tasksTableWidget->setItem(row, 2, new QTableWidgetItem(args_.join(QLatin1String(" "))));
} catch (std::exception& ex) {
proc_.kill();
stderr_(QLatin1String(ex.what()) + QLatin1String("\n"));
}
}
void Task::slotStop() {
try {
proc_.kill();
if (!proc_.waitForFinished()) {
// TODO:
}
} catch (std::exception& ex) {
stderr_(QLatin1String(ex.what()) + QLatin1String("\n"));
}
}
void Task::slotProcStarted() {
emit signalStarted();
}
void Task::slotProcError(QProcess::ProcessError err) {
stdout_(QLatin1String("Error: ") + proc_.errorString());
proc_.kill();
}
void Task::slotReadStdOut() {
QString out = QString::fromLocal8Bit(proc_.readAllStandardOutput().data());
stdout_(out);
}
void Task::slotReadStdErr() {
QString err = QString::fromLocal8Bit(proc_.readAllStandardError().data());
stderr_(err);
}
void Task::slotProcFinished(int exitCode, QProcess::ExitStatus status) {
QTextStream(stdout) << "stopped PID: " << pid_ << endl;
emit signalStopped();
if (!exitCode && status == QProcess::NormalExit) {
callback_();
} else {
QString err = QLatin1String("*** flowc terminated *** ");
err += QLatin1String("exit code: ") + QString::number(exitCode) + QLatin1String("\n");
err += proc_.errorString() + QLatin1String("\n");
stderr_(err);
}
//env_.view.taskManager_.remove(pid_);
}
QString Task::show() const {
return workingDir_ + QLatin1String("/") + executor_ + QLatin1String(" ") + args_.join(QLatin1String(" "));
}
}
| {
"pile_set_name": "Github"
} |
package migrations_test
import (
"path/filepath"
"reflect"
"sort"
"strconv"
"strings"
"testing"
migrations "github.com/sourcegraph/sourcegraph/migrations/frontend"
)
const FirstMigration = 1528395684
func TestIDConstraints(t *testing.T) {
ups, err := filepath.Glob("*.up.sql")
if err != nil {
t.Fatal(err)
}
byID := map[int][]string{}
for _, name := range ups {
id, err := strconv.Atoi(name[:strings.IndexByte(name, '_')])
if err != nil {
t.Fatalf("failed to parse name %q: %v", name, err)
}
byID[id] = append(byID[id], name)
}
for id, names := range byID {
// Check if we are using sequential migrations from a certain point.
if _, hasPrev := byID[id-1]; id > FirstMigration && !hasPrev {
t.Errorf("migration with ID %d exists, but previous one (%d) does not", id, id-1)
}
if len(names) > 1 {
t.Errorf("multiple migrations with ID %d: %s", id, strings.Join(names, " "))
}
}
}
func TestNeedsGenerate(t *testing.T) {
want, err := filepath.Glob("*.sql")
if err != nil {
t.Fatal(err)
}
got := migrations.AssetNames()
sort.Strings(want)
sort.Strings(got)
if !reflect.DeepEqual(got, want) {
t.Fatal("bindata out of date. Please run:\n go generate github.com/sourcegraph/sourcegraph/migrations/...")
}
}
| {
"pile_set_name": "Github"
} |
### YamlMime:ManagedReference
items:
- uid: Rssdp.Infrastructure.ResponseReceivedEventArgs
commentId: T:Rssdp.Infrastructure.ResponseReceivedEventArgs
id: ResponseReceivedEventArgs
parent: Rssdp.Infrastructure
children:
- Rssdp.Infrastructure.ResponseReceivedEventArgs.#ctor(HttpResponseMessage,Rssdp.Infrastructure.UdpEndPoint)
- Rssdp.Infrastructure.ResponseReceivedEventArgs.Message
- Rssdp.Infrastructure.ResponseReceivedEventArgs.ReceivedFrom
langs:
- csharp
- vb
name: ResponseReceivedEventArgs
nameWithType: ResponseReceivedEventArgs
fullName: Rssdp.Infrastructure.ResponseReceivedEventArgs
type: Class
source:
id: ResponseReceivedEventArgs
path: ''
startLine: 3221
assemblies:
- cs.temp.dll
namespace: Rssdp.Infrastructure
summary: "\nProvides arguments for the <xref href=\"Rssdp.Infrastructure.ISsdpCommunicationsServer.ResponseReceived\" data-throw-if-not-resolved=\"false\"></xref> event.\n"
example: []
syntax:
content: 'public sealed class ResponseReceivedEventArgs : EventArgs'
content.vb: >-
Public NotInheritable Class ResponseReceivedEventArgs
Inherits EventArgs
inheritance:
- System.Object
- System.EventArgs
inheritedMembers:
- System.EventArgs.Empty
- System.Object.ToString
- System.Object.Equals(System.Object)
- System.Object.Equals(System.Object,System.Object)
- System.Object.ReferenceEquals(System.Object,System.Object)
- System.Object.GetHashCode
- System.Object.GetType
- System.Object.MemberwiseClone
modifiers.csharp:
- public
- sealed
- class
modifiers.vb:
- Public
- NotInheritable
- Class
- uid: Rssdp.Infrastructure.ResponseReceivedEventArgs.#ctor(HttpResponseMessage,Rssdp.Infrastructure.UdpEndPoint)
commentId: M:Rssdp.Infrastructure.ResponseReceivedEventArgs.#ctor(HttpResponseMessage,Rssdp.Infrastructure.UdpEndPoint)
id: '#ctor(HttpResponseMessage,Rssdp.Infrastructure.UdpEndPoint)'
parent: Rssdp.Infrastructure.ResponseReceivedEventArgs
langs:
- csharp
- vb
name: ResponseReceivedEventArgs(HttpResponseMessage, UdpEndPoint)
nameWithType: ResponseReceivedEventArgs.ResponseReceivedEventArgs(HttpResponseMessage, UdpEndPoint)
fullName: Rssdp.Infrastructure.ResponseReceivedEventArgs.ResponseReceivedEventArgs(HttpResponseMessage, Rssdp.Infrastructure.UdpEndPoint)
type: Constructor
source:
id: .ctor
path: ''
startLine: 3238
assemblies:
- cs.temp.dll
namespace: Rssdp.Infrastructure
summary: "\nFull constructor.\n"
example: []
syntax:
content: public ResponseReceivedEventArgs(HttpResponseMessage message, UdpEndPoint receivedFrom)
parameters:
- id: message
type: HttpResponseMessage
description: The <see cref="!:HttpResponseMessage"></see> that was received.
- id: receivedFrom
type: Rssdp.Infrastructure.UdpEndPoint
description: A <xref href="Rssdp.Infrastructure.UdpEndPoint" data-throw-if-not-resolved="false"></xref> representing the sender's address (sometimes used for replies).
content.vb: Public Sub New(message As HttpResponseMessage, receivedFrom As UdpEndPoint)
overload: Rssdp.Infrastructure.ResponseReceivedEventArgs.#ctor*
modifiers.csharp:
- public
modifiers.vb:
- Public
- uid: Rssdp.Infrastructure.ResponseReceivedEventArgs.Message
commentId: P:Rssdp.Infrastructure.ResponseReceivedEventArgs.Message
id: Message
parent: Rssdp.Infrastructure.ResponseReceivedEventArgs
langs:
- csharp
- vb
name: Message
nameWithType: ResponseReceivedEventArgs.Message
fullName: Rssdp.Infrastructure.ResponseReceivedEventArgs.Message
type: Property
source:
id: Message
path: ''
startLine: 3251
assemblies:
- cs.temp.dll
namespace: Rssdp.Infrastructure
summary: "\nThe <see cref=\"!:HttpResponseMessage\"></see> that was received.\n"
example: []
syntax:
content: public HttpResponseMessage Message { get; }
parameters: []
return:
type: HttpResponseMessage
content.vb: Public ReadOnly Property Message As HttpResponseMessage
overload: Rssdp.Infrastructure.ResponseReceivedEventArgs.Message*
modifiers.csharp:
- public
- get
modifiers.vb:
- Public
- ReadOnly
- uid: Rssdp.Infrastructure.ResponseReceivedEventArgs.ReceivedFrom
commentId: P:Rssdp.Infrastructure.ResponseReceivedEventArgs.ReceivedFrom
id: ReceivedFrom
parent: Rssdp.Infrastructure.ResponseReceivedEventArgs
langs:
- csharp
- vb
name: ReceivedFrom
nameWithType: ResponseReceivedEventArgs.ReceivedFrom
fullName: Rssdp.Infrastructure.ResponseReceivedEventArgs.ReceivedFrom
type: Property
source:
id: ReceivedFrom
path: ''
startLine: 3259
assemblies:
- cs.temp.dll
namespace: Rssdp.Infrastructure
summary: "\nThe <xref href=\"Rssdp.Infrastructure.UdpEndPoint\" data-throw-if-not-resolved=\"false\"></xref> the response came from.\n"
example: []
syntax:
content: public UdpEndPoint ReceivedFrom { get; }
parameters: []
return:
type: Rssdp.Infrastructure.UdpEndPoint
content.vb: Public ReadOnly Property ReceivedFrom As UdpEndPoint
overload: Rssdp.Infrastructure.ResponseReceivedEventArgs.ReceivedFrom*
modifiers.csharp:
- public
- get
modifiers.vb:
- Public
- ReadOnly
references:
- uid: Rssdp.Infrastructure.ISsdpCommunicationsServer.ResponseReceived
commentId: E:Rssdp.Infrastructure.ISsdpCommunicationsServer.ResponseReceived
parent: Rssdp.Infrastructure.ISsdpCommunicationsServer
isExternal: false
name: ResponseReceived
nameWithType: ISsdpCommunicationsServer.ResponseReceived
fullName: Rssdp.Infrastructure.ISsdpCommunicationsServer.ResponseReceived
- uid: Rssdp.Infrastructure
commentId: N:Rssdp.Infrastructure
isExternal: false
name: Rssdp.Infrastructure
nameWithType: Rssdp.Infrastructure
fullName: Rssdp.Infrastructure
- uid: System.Object
commentId: T:System.Object
parent: System
isExternal: true
name: Object
nameWithType: Object
fullName: System.Object
- uid: System.EventArgs
commentId: T:System.EventArgs
parent: System
isExternal: true
name: EventArgs
nameWithType: EventArgs
fullName: System.EventArgs
- uid: System.EventArgs.Empty
commentId: F:System.EventArgs.Empty
parent: System.EventArgs
isExternal: true
name: Empty
nameWithType: EventArgs.Empty
fullName: System.EventArgs.Empty
- uid: System.Object.ToString
commentId: M:System.Object.ToString
parent: System.Object
isExternal: true
name: ToString()
nameWithType: Object.ToString()
fullName: System.Object.ToString()
spec.csharp:
- uid: System.Object.ToString
name: ToString
nameWithType: Object.ToString
fullName: System.Object.ToString
isExternal: true
- name: (
nameWithType: (
fullName: (
- name: )
nameWithType: )
fullName: )
spec.vb:
- uid: System.Object.ToString
name: ToString
nameWithType: Object.ToString
fullName: System.Object.ToString
isExternal: true
- name: (
nameWithType: (
fullName: (
- name: )
nameWithType: )
fullName: )
- uid: System.Object.Equals(System.Object)
commentId: M:System.Object.Equals(System.Object)
parent: System.Object
isExternal: true
name: Equals(Object)
nameWithType: Object.Equals(Object)
fullName: System.Object.Equals(System.Object)
spec.csharp:
- uid: System.Object.Equals(System.Object)
name: Equals
nameWithType: Object.Equals
fullName: System.Object.Equals
isExternal: true
- name: (
nameWithType: (
fullName: (
- uid: System.Object
name: Object
nameWithType: Object
fullName: System.Object
isExternal: true
- name: )
nameWithType: )
fullName: )
spec.vb:
- uid: System.Object.Equals(System.Object)
name: Equals
nameWithType: Object.Equals
fullName: System.Object.Equals
isExternal: true
- name: (
nameWithType: (
fullName: (
- uid: System.Object
name: Object
nameWithType: Object
fullName: System.Object
isExternal: true
- name: )
nameWithType: )
fullName: )
- uid: System.Object.Equals(System.Object,System.Object)
commentId: M:System.Object.Equals(System.Object,System.Object)
parent: System.Object
isExternal: true
name: Equals(Object, Object)
nameWithType: Object.Equals(Object, Object)
fullName: System.Object.Equals(System.Object, System.Object)
spec.csharp:
- uid: System.Object.Equals(System.Object,System.Object)
name: Equals
nameWithType: Object.Equals
fullName: System.Object.Equals
isExternal: true
- name: (
nameWithType: (
fullName: (
- uid: System.Object
name: Object
nameWithType: Object
fullName: System.Object
isExternal: true
- name: ', '
nameWithType: ', '
fullName: ', '
- uid: System.Object
name: Object
nameWithType: Object
fullName: System.Object
isExternal: true
- name: )
nameWithType: )
fullName: )
spec.vb:
- uid: System.Object.Equals(System.Object,System.Object)
name: Equals
nameWithType: Object.Equals
fullName: System.Object.Equals
isExternal: true
- name: (
nameWithType: (
fullName: (
- uid: System.Object
name: Object
nameWithType: Object
fullName: System.Object
isExternal: true
- name: ', '
nameWithType: ', '
fullName: ', '
- uid: System.Object
name: Object
nameWithType: Object
fullName: System.Object
isExternal: true
- name: )
nameWithType: )
fullName: )
- uid: System.Object.ReferenceEquals(System.Object,System.Object)
commentId: M:System.Object.ReferenceEquals(System.Object,System.Object)
parent: System.Object
isExternal: true
name: ReferenceEquals(Object, Object)
nameWithType: Object.ReferenceEquals(Object, Object)
fullName: System.Object.ReferenceEquals(System.Object, System.Object)
spec.csharp:
- uid: System.Object.ReferenceEquals(System.Object,System.Object)
name: ReferenceEquals
nameWithType: Object.ReferenceEquals
fullName: System.Object.ReferenceEquals
isExternal: true
- name: (
nameWithType: (
fullName: (
- uid: System.Object
name: Object
nameWithType: Object
fullName: System.Object
isExternal: true
- name: ', '
nameWithType: ', '
fullName: ', '
- uid: System.Object
name: Object
nameWithType: Object
fullName: System.Object
isExternal: true
- name: )
nameWithType: )
fullName: )
spec.vb:
- uid: System.Object.ReferenceEquals(System.Object,System.Object)
name: ReferenceEquals
nameWithType: Object.ReferenceEquals
fullName: System.Object.ReferenceEquals
isExternal: true
- name: (
nameWithType: (
fullName: (
- uid: System.Object
name: Object
nameWithType: Object
fullName: System.Object
isExternal: true
- name: ', '
nameWithType: ', '
fullName: ', '
- uid: System.Object
name: Object
nameWithType: Object
fullName: System.Object
isExternal: true
- name: )
nameWithType: )
fullName: )
- uid: System.Object.GetHashCode
commentId: M:System.Object.GetHashCode
parent: System.Object
isExternal: true
name: GetHashCode()
nameWithType: Object.GetHashCode()
fullName: System.Object.GetHashCode()
spec.csharp:
- uid: System.Object.GetHashCode
name: GetHashCode
nameWithType: Object.GetHashCode
fullName: System.Object.GetHashCode
isExternal: true
- name: (
nameWithType: (
fullName: (
- name: )
nameWithType: )
fullName: )
spec.vb:
- uid: System.Object.GetHashCode
name: GetHashCode
nameWithType: Object.GetHashCode
fullName: System.Object.GetHashCode
isExternal: true
- name: (
nameWithType: (
fullName: (
- name: )
nameWithType: )
fullName: )
- uid: System.Object.GetType
commentId: M:System.Object.GetType
parent: System.Object
isExternal: true
name: GetType()
nameWithType: Object.GetType()
fullName: System.Object.GetType()
spec.csharp:
- uid: System.Object.GetType
name: GetType
nameWithType: Object.GetType
fullName: System.Object.GetType
isExternal: true
- name: (
nameWithType: (
fullName: (
- name: )
nameWithType: )
fullName: )
spec.vb:
- uid: System.Object.GetType
name: GetType
nameWithType: Object.GetType
fullName: System.Object.GetType
isExternal: true
- name: (
nameWithType: (
fullName: (
- name: )
nameWithType: )
fullName: )
- uid: System.Object.MemberwiseClone
commentId: M:System.Object.MemberwiseClone
parent: System.Object
isExternal: true
name: MemberwiseClone()
nameWithType: Object.MemberwiseClone()
fullName: System.Object.MemberwiseClone()
spec.csharp:
- uid: System.Object.MemberwiseClone
name: MemberwiseClone
nameWithType: Object.MemberwiseClone
fullName: System.Object.MemberwiseClone
isExternal: true
- name: (
nameWithType: (
fullName: (
- name: )
nameWithType: )
fullName: )
spec.vb:
- uid: System.Object.MemberwiseClone
name: MemberwiseClone
nameWithType: Object.MemberwiseClone
fullName: System.Object.MemberwiseClone
isExternal: true
- name: (
nameWithType: (
fullName: (
- name: )
nameWithType: )
fullName: )
- uid: Rssdp.Infrastructure.ISsdpCommunicationsServer
commentId: T:Rssdp.Infrastructure.ISsdpCommunicationsServer
parent: Rssdp.Infrastructure
isExternal: false
name: ISsdpCommunicationsServer
nameWithType: ISsdpCommunicationsServer
fullName: Rssdp.Infrastructure.ISsdpCommunicationsServer
- uid: System
commentId: N:System
isExternal: false
name: System
nameWithType: System
fullName: System
- uid: Rssdp.Infrastructure.UdpEndPoint
commentId: T:Rssdp.Infrastructure.UdpEndPoint
parent: Rssdp.Infrastructure
isExternal: false
name: UdpEndPoint
nameWithType: UdpEndPoint
fullName: Rssdp.Infrastructure.UdpEndPoint
- uid: Rssdp.Infrastructure.ResponseReceivedEventArgs.#ctor*
commentId: Overload:Rssdp.Infrastructure.ResponseReceivedEventArgs.#ctor
isExternal: false
name: ResponseReceivedEventArgs
nameWithType: ResponseReceivedEventArgs.ResponseReceivedEventArgs
fullName: Rssdp.Infrastructure.ResponseReceivedEventArgs.ResponseReceivedEventArgs
- uid: HttpResponseMessage
isExternal: true
name: HttpResponseMessage
nameWithType: HttpResponseMessage
fullName: HttpResponseMessage
- uid: Rssdp.Infrastructure.ResponseReceivedEventArgs.Message*
commentId: Overload:Rssdp.Infrastructure.ResponseReceivedEventArgs.Message
isExternal: false
name: Message
nameWithType: ResponseReceivedEventArgs.Message
fullName: Rssdp.Infrastructure.ResponseReceivedEventArgs.Message
- uid: Rssdp.Infrastructure.ResponseReceivedEventArgs.ReceivedFrom*
commentId: Overload:Rssdp.Infrastructure.ResponseReceivedEventArgs.ReceivedFrom
isExternal: false
name: ReceivedFrom
nameWithType: ResponseReceivedEventArgs.ReceivedFrom
fullName: Rssdp.Infrastructure.ResponseReceivedEventArgs.ReceivedFrom
| {
"pile_set_name": "Github"
} |
{
"name": "Holiday Extras",
"displayName": "Holiday Extras",
"properties": [
"dock-yard.io",
"hxtrack.com"
],
"prevalence": {
"tracking": 0,
"nonTracking": 0.0000147,
"total": 0.0000147
}
} | {
"pile_set_name": "Github"
} |
//
// Generated by class-dump 3.5 (64 bit).
//
// class-dump is Copyright (C) 1997-1998, 2000-2001, 2004-2013 by Steve Nygard.
//
#import "NSObject.h"
__attribute__((visibility("hidden")))
@interface CUTWeakReference : NSObject
{
id _object;
unsigned long long _objectAddress;
}
+ (id)weakRefWithObject:(id)arg1;
- (id)object;
- (BOOL)isEqual:(id)arg1;
- (unsigned long long)hash;
- (void)dealloc;
@end
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" standalone="no" ?>
<!DOCTYPE pov SYSTEM "/usr/share/cgc-docs/replay.dtd"><pov>
<cbid>CROMU_00011</cbid>
<replay>
<read echo="ascii"><delim> </delim><match><data>\x3E </data></match></read>
<write echo="ascii"><data>W6vFb0 = |"sKq","C","5rPdi","GX7SRZ"|+|"nrAzRg","5rPdi"|\n</data></write>
<read echo="ascii"><delim> </delim><match><data>\x3E </data></match></read>
<write echo="ascii"><data>mn7H467 = |"IdKvZ","8i","GrcW","vy","YZr0","eo","WnW7zJ2","1fe","4UPQnN"|\n</data></write>
<read echo="ascii"><delim> </delim><match><data>\x3E </data></match></read>
<write echo="ascii"><data>tm9uCZ = |"vy","4UPQnN"|\n</data></write>
<read echo="ascii"><delim> </delim><match><data>\x3E </data></match></read>
<write echo="ascii"><data>tm9uCZ@mn7H467\n</data></write>
<read echo="ascii"><delim>\n</delim><match><data>TRUE\n</data></match></read>
<read echo="ascii"><delim> </delim><match><data>\x3E </data></match></read>
<write echo="ascii"><data>fvU7dVG = |"SJ","BVfhLcO","cmh6ax","ppHQH","NQjzpPq","Jkr"|+|"J","WiMmNYV","ppHQH","7UMdw","Jkr"|\n</data></write>
<read echo="ascii"><delim> </delim><match><data>\x3E </data></match></read>
<write echo="ascii"><data>f253Qp = |"MnF","9ZT","U8VMp"|-|"rl","U8VMp","mujxYf"|\n</data></write>
<read echo="ascii"><delim> </delim><match><data>\x3E </data></match></read>
<write echo="ascii"><data>pGZXYh = |"2KEEwY","5Bpke","uK0","z","A1ns","WJ"|-|"6HH","BgJni","zy5kb","U","VvrFer","ln","uK0"|\n</data></write>
<read echo="ascii"><delim> </delim><match><data>\x3E </data></match></read>
<write echo="ascii"><data>cAl7G = |"ICS3aiQ","fY","2TkUx","Pn","mK2lPO2","AcVJ4","Hkzy2t7","cxRmvJN","0RuQUC"|\n</data></write>
<read echo="ascii"><delim> </delim><match><data>\x3E </data></match></read>
<write echo="ascii"><data>Egzjfo = |"AcVJ4","fY","mK2lPO2","xG5osHzEpH"|\n</data></write>
<read echo="ascii"><delim> </delim><match><data>\x3E </data></match></read>
<write echo="ascii"><data>Egzjfo@cAl7G\n</data></write>
<read echo="ascii"><delim>\n</delim><match><data>FALSE\n</data></match></read>
<read echo="ascii"><delim> </delim><match><data>\x3E </data></match></read>
<write echo="ascii"><data>8lXvr08 = |"klyuyU","AYt","iJlv","oM9yNc4"|~|"mNu8oC6","GOrOlT","8a","AYt","ckaVaI","l2aRyAa"|\n</data></write>
<read echo="ascii"><delim> </delim><match><data>\x3E </data></match></read>
<write echo="ascii"><data>v4olwI8 = |"WWJkIeb","iayxRSc","cswQ","kahR3Z","4zs","pVFWL"|\n</data></write>
<read echo="ascii"><delim> </delim><match><data>\x3E </data></match></read>
<write echo="ascii"><data>PGfr2 = |"iayxRSc","cswQ","kahR3Z"|\n</data></write>
<read echo="ascii"><delim> </delim><match><data>\x3E </data></match></read>
<write echo="ascii"><data>PGfr2@v4olwI8\n</data></write>
<read echo="ascii"><delim>\n</delim><match><data>TRUE\n</data></match></read>
<read echo="ascii"><delim> </delim><match><data>\x3E </data></match></read>
<write echo="ascii"><data>7WUrhj = |"YseEKG","b4w","cfaMbTL","tTdz","W7OrPY","K8EPlwX","JINdu7Z","s6SHC"|\n</data></write>
<read echo="ascii"><delim> </delim><match><data>\x3E </data></match></read>
<write echo="ascii"><data>xKt5dBf = |"JINdu7Z","s6SHC"|\n</data></write>
<read echo="ascii"><delim> </delim><match><data>\x3E </data></match></read>
<write echo="ascii"><data>xKt5dBf@7WUrhj\n</data></write>
<read echo="ascii"><delim>\n</delim><match><data>TRUE\n</data></match></read>
<read echo="ascii"><delim> </delim><match><data>\x3E </data></match></read>
<write echo="ascii"><data>cPupwn0q2M = pGZXYh ^ 7WUrhj\n</data></write>
<read echo="ascii"><delim> </delim><match><data>\x3E </data></match></read>
<write echo="ascii"><data>1hf2ttA = tm9uCZ + fvU7dVG\n</data></write>
<read echo="ascii"><delim> </delim><match><data>\x3E </data></match></read>
<write echo="ascii"><data>PB3eZxla = PGfr2 ^ fvU7dVG\n</data></write>
<read echo="ascii"><delim> </delim><match><data>\x3E </data></match></read>
<write echo="ascii"><data>AGQhe = cPupwn0q2M - mn7H467\n</data></write>
<read echo="ascii"><delim> </delim><match><data>\x3E </data></match></read>
<write echo="ascii"><data>kVLiOB = |"vu","rPiH","WN","wOO","4rTeGT","iezyo","Hj","HO5","yjtZNhV","yzG"|\n</data></write>
<read echo="ascii"><delim> </delim><match><data>\x3E </data></match></read>
<write echo="ascii"><data>pp7EcXk = |"WN","wOO","iezyo","4rTeGT","HO5","vu","rPiH"|\n</data></write>
<read echo="ascii"><delim> </delim><match><data>\x3E </data></match></read>
<write echo="ascii"><data>pp7EcXk@kVLiOB\n</data></write>
<read echo="ascii"><delim>\n</delim><match><data>TRUE\n</data></match></read>
<read echo="ascii"><delim> </delim><match><data>\x3E </data></match></read>
<write echo="ascii"><data>.p\n</data></write>
<read echo="ascii"><delim>\n</delim><match><data>W6vFb0 = |"sKq","C","5rPdi","GX7SRZ","nrAzRg"|\n</data></match></read>
<read echo="ascii"><delim>\n</delim><match><data>mn7H467 = |"IdKvZ","8i","GrcW","vy","YZr0","eo","WnW7zJ2","1fe","4UPQnN"|\n</data></match></read>
<read echo="ascii"><delim>\n</delim><match><data>tm9uCZ = |"vy","4UPQnN"|\n</data></match></read>
<read echo="ascii"><delim>\n</delim><match><data>fvU7dVG = |"SJ","BVfhLcO","cmh6ax","ppHQH","NQjzpPq","Jkr","J","WiMmNYV","7UMdw"|\n</data></match></read>
<read echo="ascii"><delim>\n</delim><match><data>f253Qp = |"MnF","9ZT"|\n</data></match></read>
<read echo="ascii"><delim>\n</delim><match><data>pGZXYh = |"2KEEwY","5Bpke","z","A1ns","WJ"|\n</data></match></read>
<read echo="ascii"><delim>\n</delim><match><data>cAl7G = |"ICS3aiQ","fY","2TkUx","Pn","mK2lPO2","AcVJ4","Hkzy2t7","cxRmvJN","0RuQUC"|\n</data></match></read>
<read echo="ascii"><delim>\n</delim><match><data>Egzjfo = |"AcVJ4","fY","mK2lPO2","xG5osHzEpH"|\n</data></match></read>
<read echo="ascii"><delim>\n</delim><match><data>8lXvr08 = |"klyuyU","iJlv","oM9yNc4","mNu8oC6","GOrOlT","8a","ckaVaI","l2aRyAa"|\n</data></match></read>
<read echo="ascii"><delim>\n</delim><match><data>v4olwI8 = |"WWJkIeb","iayxRSc","cswQ","kahR3Z","4zs","pVFWL"|\n</data></match></read>
<read echo="ascii"><delim>\n</delim><match><data>PGfr2 = |"iayxRSc","cswQ","kahR3Z"|\n</data></match></read>
<read echo="ascii"><delim>\n</delim><match><data>7WUrhj = |"YseEKG","b4w","cfaMbTL","tTdz","W7OrPY","K8EPlwX","JINdu7Z","s6SHC"|\n</data></match></read>
<read echo="ascii"><delim>\n</delim><match><data>xKt5dBf = |"JINdu7Z","s6SHC"|\n</data></match></read>
<read echo="ascii"><delim>\n</delim><match><data>cPupwn0q2M = ||\n</data></match></read>
<read echo="ascii"><delim>\n</delim><match><data>1hf2ttA = |"vy","4UPQnN","SJ","BVfhLcO","cmh6ax","ppHQH","NQjzpPq","Jkr","J","WiMmNYV","7UMdw"|\n</data></match></read>
<read echo="ascii"><delim>\n</delim><match><data>PB3eZxla = ||\n</data></match></read>
<read echo="ascii"><delim>\n</delim><match><data>AGQhe = ||\n</data></match></read>
<read echo="ascii"><delim>\n</delim><match><data>kVLiOB = |"vu","rPiH","WN","wOO","4rTeGT","iezyo","Hj","HO5","yjtZNhV","yzG"|\n</data></match></read>
<read echo="ascii"><delim>\n</delim><match><data>pp7EcXk = |"WN","wOO","iezyo","4rTeGT","HO5","vu","rPiH"|\n</data></match></read>
<read echo="ascii"><delim> </delim><match><data>\x3E </data></match></read>
<write echo="ascii"><data>.l\n</data></write>
</replay>
</pov>
| {
"pile_set_name": "Github"
} |
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include <memory>
#include <string>
#include <vector>
#include "base/bind.h"
#include "base/callback.h"
#include "base/macros.h"
#include "base/strings/stringprintf.h"
#include "base/test/simple_test_clock.h"
#include "base/test/values_test_util.h"
#include "base/time/time.h"
#include "base/values.h"
#include "net/base/ip_address.h"
#include "net/base/net_errors.h"
#include "net/network_error_logging/mock_persistent_nel_store.h"
#include "net/network_error_logging/network_error_logging_service.h"
#include "net/reporting/reporting_test_util.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "url/gurl.h"
#include "url/origin.h"
namespace net {
namespace {
// The tests are parametrized on a boolean value which represents whether or not
// to use a MockPersistentNelStore.
// If a MockPersistentNelStore is used, then calls to
// NetworkErrorLoggingService::OnHeader(), OnRequest(),
// QueueSignedExchangeReport(), RemoveBrowsingData(), and
// RemoveAllBrowsingData() will block until the store finishes loading.
// Therefore, for tests that should run synchronously (i.e. tests that don't
// specifically test the asynchronous/deferred task behavior), FinishLoading()
// must be called after the first call to one of the above methods.
class NetworkErrorLoggingServiceTest : public ::testing::TestWithParam<bool> {
protected:
NetworkErrorLoggingServiceTest() {
if (GetParam()) {
store_ = std::make_unique<MockPersistentNelStore>();
} else {
store_.reset(nullptr);
}
service_ = NetworkErrorLoggingService::Create(store_.get());
CreateReportingService();
}
void CreateReportingService() {
DCHECK(!reporting_service_);
reporting_service_ = std::make_unique<TestReportingService>();
service_->SetReportingService(reporting_service_.get());
}
NetworkErrorLoggingService::RequestDetails MakeRequestDetails(
GURL url,
Error error_type,
std::string method = "GET",
int status_code = 0,
IPAddress server_ip = IPAddress()) {
NetworkErrorLoggingService::RequestDetails details;
details.uri = url;
details.referrer = kReferrer_;
details.user_agent = kUserAgent_;
details.server_ip = server_ip.IsValid() ? server_ip : kServerIP_;
details.method = std::move(method);
details.status_code = status_code;
details.elapsed_time = base::TimeDelta::FromSeconds(1);
details.type = error_type;
details.reporting_upload_depth = 0;
return details;
}
NetworkErrorLoggingService::SignedExchangeReportDetails
MakeSignedExchangeReportDetails(bool success,
const std::string& type,
const GURL& outer_url,
const GURL& inner_url,
const GURL& cert_url,
const IPAddress& server_ip_address) {
NetworkErrorLoggingService::SignedExchangeReportDetails details;
details.success = success;
details.type = type;
details.outer_url = outer_url;
details.inner_url = inner_url;
details.cert_url = cert_url;
details.referrer = kReferrer_.spec();
details.server_ip_address = server_ip_address;
details.protocol = "http/1.1";
details.method = "GET";
details.status_code = 200;
details.elapsed_time = base::TimeDelta::FromMilliseconds(1234);
details.user_agent = kUserAgent_;
return details;
}
NetworkErrorLoggingService* service() { return service_.get(); }
MockPersistentNelStore* store() { return store_.get(); }
const std::vector<TestReportingService::Report>& reports() {
return reporting_service_->reports();
}
const url::Origin MakeOrigin(size_t index) {
GURL url(base::StringPrintf("https://example%zd.com/", index));
return url::Origin::Create(url);
}
NetworkErrorLoggingService::NelPolicy MakePolicyForOrigin(
url::Origin origin,
base::Time expires = base::Time(),
base::Time last_used = base::Time()) {
NetworkErrorLoggingService::NelPolicy policy;
policy.origin = std::move(origin);
policy.expires = expires;
policy.last_used = last_used;
return policy;
}
// Returns whether the NetworkErrorLoggingService has a policy corresponding
// to |origin|. Returns true if so, even if the policy is expired.
bool HasPolicyForOrigin(const url::Origin& origin) {
std::set<url::Origin> all_policy_origins =
service_->GetPolicyOriginsForTesting();
return all_policy_origins.find(origin) != all_policy_origins.end();
}
size_t PolicyCount() { return service_->GetPolicyOriginsForTesting().size(); }
// Makes the rest of the test run synchronously.
void FinishLoading(bool load_success) {
if (store())
store()->FinishLoading(load_success);
}
const GURL kUrl_ = GURL("https://example.com/path");
const GURL kUrlDifferentPort_ = GURL("https://example.com:4433/path");
const GURL kUrlSubdomain_ = GURL("https://subdomain.example.com/path");
const GURL kUrlDifferentHost_ = GURL("https://example2.com/path");
const GURL kUrlEtld_ = GURL("https://co.uk/foo.html");
const GURL kInnerUrl_ = GURL("https://example.net/path");
const GURL kCertUrl_ = GURL("https://example.com/cert_path");
const IPAddress kServerIP_ = IPAddress(192, 168, 0, 1);
const IPAddress kOtherServerIP_ = IPAddress(192, 168, 0, 2);
const url::Origin kOrigin_ = url::Origin::Create(kUrl_);
const url::Origin kOriginDifferentPort_ =
url::Origin::Create(kUrlDifferentPort_);
const url::Origin kOriginSubdomain_ = url::Origin::Create(kUrlSubdomain_);
const url::Origin kOriginDifferentHost_ =
url::Origin::Create(kUrlDifferentHost_);
const url::Origin kOriginEtld_ = url::Origin::Create(kUrlEtld_);
const std::string kHeader_ = "{\"report_to\":\"group\",\"max_age\":86400}";
const std::string kHeaderSuccessFraction0_ =
"{\"report_to\":\"group\",\"max_age\":86400,\"success_fraction\":0.0}";
const std::string kHeaderSuccessFraction1_ =
"{\"report_to\":\"group\",\"max_age\":86400,\"success_fraction\":1.0}";
const std::string kHeaderIncludeSubdomains_ =
"{\"report_to\":\"group\",\"max_age\":86400,\"include_subdomains\":true}";
const std::string kHeaderMaxAge0_ = "{\"max_age\":0}";
const std::string kHeaderTooLong_ =
"{\"report_to\":\"group\",\"max_age\":86400,\"junk\":\"" +
std::string(32 * 1024, 'a') + "\"}";
const std::string kHeaderTooDeep_ =
"{\"report_to\":\"group\",\"max_age\":86400,\"junk\":[[[[[[[[[[]]]]]]]]]]"
"}";
const std::string kUserAgent_ = "Mozilla/1.0";
const std::string kGroup_ = "group";
const std::string kType_ = NetworkErrorLoggingService::kReportType;
const GURL kReferrer_ = GURL("https://referrer.com/");
// |store_| needs to outlive |service_|.
std::unique_ptr<MockPersistentNelStore> store_;
std::unique_ptr<NetworkErrorLoggingService> service_;
std::unique_ptr<TestReportingService> reporting_service_;
};
void ExpectDictDoubleValue(double expected_value,
const base::DictionaryValue& value,
const std::string& key) {
double double_value = 0.0;
EXPECT_TRUE(value.GetDouble(key, &double_value)) << key;
EXPECT_DOUBLE_EQ(expected_value, double_value) << key;
}
TEST_P(NetworkErrorLoggingServiceTest, CreateService) {
// Service is created by default in the test fixture..
EXPECT_TRUE(service());
}
TEST_P(NetworkErrorLoggingServiceTest, NoReportingService) {
service_ = NetworkErrorLoggingService::Create(store_.get());
service()->OnHeader(kOrigin_, kServerIP_, kHeader_);
// Make the rest of the test run synchronously.
FinishLoading(true /* load_success */);
// Should not crash.
service()->OnRequest(MakeRequestDetails(kUrl_, ERR_CONNECTION_REFUSED));
}
TEST_P(NetworkErrorLoggingServiceTest, NoPolicyForOrigin) {
service()->OnRequest(MakeRequestDetails(kUrl_, ERR_CONNECTION_REFUSED));
// Make the rest of the test run synchronously.
FinishLoading(true /* load_success */);
EXPECT_TRUE(reports().empty());
}
TEST_P(NetworkErrorLoggingServiceTest, JsonTooLong) {
service()->OnHeader(kOrigin_, kServerIP_, kHeaderTooLong_);
// Make the rest of the test run synchronously.
FinishLoading(true /* load_success */);
service()->OnRequest(MakeRequestDetails(kUrl_, ERR_CONNECTION_REFUSED));
EXPECT_TRUE(reports().empty());
}
TEST_P(NetworkErrorLoggingServiceTest, JsonTooDeep) {
service()->OnHeader(kOrigin_, kServerIP_, kHeaderTooDeep_);
// Make the rest of the test run synchronously.
FinishLoading(true /* load_success */);
service()->OnRequest(MakeRequestDetails(kUrl_, ERR_CONNECTION_REFUSED));
EXPECT_TRUE(reports().empty());
}
TEST_P(NetworkErrorLoggingServiceTest, IncludeSubdomainsEtldRejected) {
service()->OnHeader(kOriginEtld_, kServerIP_, kHeaderIncludeSubdomains_);
// Make the rest of the test run synchronously.
FinishLoading(true /* load_success */);
EXPECT_EQ(0u, PolicyCount());
service()->OnRequest(MakeRequestDetails(kUrlEtld_, ERR_CONNECTION_REFUSED));
EXPECT_TRUE(reports().empty());
}
TEST_P(NetworkErrorLoggingServiceTest, NonIncludeSubdomainsEtldAccepted) {
service()->OnHeader(kOriginEtld_, kServerIP_, kHeader_);
// Make the rest of the test run synchronously.
FinishLoading(true /* load_success */);
EXPECT_EQ(1u, PolicyCount());
service()->OnRequest(MakeRequestDetails(kUrlEtld_, ERR_CONNECTION_REFUSED));
EXPECT_EQ(1u, reports().size());
EXPECT_EQ(kUrlEtld_, reports()[0].url);
}
TEST_P(NetworkErrorLoggingServiceTest, SuccessReportQueued) {
service()->OnHeader(kOrigin_, kServerIP_, kHeaderSuccessFraction1_);
// Make the rest of the test run synchronously.
FinishLoading(true /* load_success */);
service()->OnRequest(MakeRequestDetails(kUrl_, OK));
ASSERT_EQ(1u, reports().size());
EXPECT_EQ(kUrl_, reports()[0].url);
EXPECT_EQ(kUserAgent_, reports()[0].user_agent);
EXPECT_EQ(kGroup_, reports()[0].group);
EXPECT_EQ(kType_, reports()[0].type);
EXPECT_EQ(0, reports()[0].depth);
const base::DictionaryValue* body;
ASSERT_TRUE(reports()[0].body->GetAsDictionary(&body));
base::ExpectDictStringValue(kReferrer_.spec(), *body,
NetworkErrorLoggingService::kReferrerKey);
// TODO(juliatuttle): Extract these constants.
ExpectDictDoubleValue(1.0, *body,
NetworkErrorLoggingService::kSamplingFractionKey);
base::ExpectDictStringValue(kServerIP_.ToString(), *body,
NetworkErrorLoggingService::kServerIpKey);
base::ExpectDictStringValue("", *body,
NetworkErrorLoggingService::kProtocolKey);
base::ExpectDictStringValue("GET", *body,
NetworkErrorLoggingService::kMethodKey);
base::ExpectDictIntegerValue(0, *body,
NetworkErrorLoggingService::kStatusCodeKey);
base::ExpectDictIntegerValue(1000, *body,
NetworkErrorLoggingService::kElapsedTimeKey);
base::ExpectDictStringValue("application", *body,
NetworkErrorLoggingService::kPhaseKey);
base::ExpectDictStringValue("ok", *body,
NetworkErrorLoggingService::kTypeKey);
}
TEST_P(NetworkErrorLoggingServiceTest, FailureReportQueued) {
static const std::string kHeaderFailureFraction1 =
"{\"report_to\":\"group\",\"max_age\":86400,\"failure_fraction\":1.0}";
service()->OnHeader(kOrigin_, kServerIP_, kHeaderFailureFraction1);
// Make the rest of the test run synchronously.
FinishLoading(true /* load_success */);
service()->OnRequest(MakeRequestDetails(kUrl_, ERR_CONNECTION_REFUSED));
ASSERT_EQ(1u, reports().size());
EXPECT_EQ(kUrl_, reports()[0].url);
EXPECT_EQ(kUserAgent_, reports()[0].user_agent);
EXPECT_EQ(kGroup_, reports()[0].group);
EXPECT_EQ(kType_, reports()[0].type);
EXPECT_EQ(0, reports()[0].depth);
const base::DictionaryValue* body;
ASSERT_TRUE(reports()[0].body->GetAsDictionary(&body));
base::ExpectDictStringValue(kReferrer_.spec(), *body,
NetworkErrorLoggingService::kReferrerKey);
// TODO(juliatuttle): Extract these constants.
ExpectDictDoubleValue(1.0, *body,
NetworkErrorLoggingService::kSamplingFractionKey);
base::ExpectDictStringValue(kServerIP_.ToString(), *body,
NetworkErrorLoggingService::kServerIpKey);
base::ExpectDictStringValue("", *body,
NetworkErrorLoggingService::kProtocolKey);
base::ExpectDictStringValue("GET", *body,
NetworkErrorLoggingService::kMethodKey);
base::ExpectDictIntegerValue(0, *body,
NetworkErrorLoggingService::kStatusCodeKey);
base::ExpectDictIntegerValue(1000, *body,
NetworkErrorLoggingService::kElapsedTimeKey);
base::ExpectDictStringValue("connection", *body,
NetworkErrorLoggingService::kPhaseKey);
base::ExpectDictStringValue("tcp.refused", *body,
NetworkErrorLoggingService::kTypeKey);
}
TEST_P(NetworkErrorLoggingServiceTest, UnknownFailureReportQueued) {
static const std::string kHeaderFailureFraction1 =
"{\"report_to\":\"group\",\"max_age\":86400,\"failure_fraction\":1.0}";
service()->OnHeader(kOrigin_, kServerIP_, kHeaderFailureFraction1);
// Make the rest of the test run synchronously.
FinishLoading(true /* load_success */);
// This error code happens to not be mapped to a NEL report `type` field
// value.
service()->OnRequest(MakeRequestDetails(kUrl_, ERR_FILE_NO_SPACE));
ASSERT_EQ(1u, reports().size());
const base::DictionaryValue* body;
ASSERT_TRUE(reports()[0].body->GetAsDictionary(&body));
base::ExpectDictStringValue("application", *body,
NetworkErrorLoggingService::kPhaseKey);
base::ExpectDictStringValue("unknown", *body,
NetworkErrorLoggingService::kTypeKey);
}
TEST_P(NetworkErrorLoggingServiceTest, UnknownCertFailureReportQueued) {
static const std::string kHeaderFailureFraction1 =
"{\"report_to\":\"group\",\"max_age\":86400,\"failure_fraction\":1.0}";
service()->OnHeader(kOrigin_, kServerIP_, kHeaderFailureFraction1);
// Make the rest of the test run synchronously.
FinishLoading(true /* load_success */);
// This error code happens to not be mapped to a NEL report `type` field
// value. Because it's a certificate error, we'll set the `phase` to be
// `connection`.
service()->OnRequest(MakeRequestDetails(kUrl_, ERR_CERT_NON_UNIQUE_NAME));
ASSERT_EQ(1u, reports().size());
const base::DictionaryValue* body;
ASSERT_TRUE(reports()[0].body->GetAsDictionary(&body));
base::ExpectDictStringValue("connection", *body,
NetworkErrorLoggingService::kPhaseKey);
base::ExpectDictStringValue("unknown", *body,
NetworkErrorLoggingService::kTypeKey);
}
TEST_P(NetworkErrorLoggingServiceTest, HttpErrorReportQueued) {
static const std::string kHeaderFailureFraction1 =
"{\"report_to\":\"group\",\"max_age\":86400,\"failure_fraction\":1.0}";
service()->OnHeader(kOrigin_, kServerIP_, kHeaderFailureFraction1);
// Make the rest of the test run synchronously.
FinishLoading(true /* load_success */);
service()->OnRequest(MakeRequestDetails(kUrl_, OK, "GET", 504));
ASSERT_EQ(1u, reports().size());
EXPECT_EQ(kUrl_, reports()[0].url);
EXPECT_EQ(kUserAgent_, reports()[0].user_agent);
EXPECT_EQ(kGroup_, reports()[0].group);
EXPECT_EQ(kType_, reports()[0].type);
EXPECT_EQ(0, reports()[0].depth);
const base::DictionaryValue* body;
ASSERT_TRUE(reports()[0].body->GetAsDictionary(&body));
base::ExpectDictStringValue(kReferrer_.spec(), *body,
NetworkErrorLoggingService::kReferrerKey);
// TODO(juliatuttle): Extract these constants.
ExpectDictDoubleValue(1.0, *body,
NetworkErrorLoggingService::kSamplingFractionKey);
base::ExpectDictStringValue(kServerIP_.ToString(), *body,
NetworkErrorLoggingService::kServerIpKey);
base::ExpectDictStringValue("", *body,
NetworkErrorLoggingService::kProtocolKey);
base::ExpectDictStringValue("GET", *body,
NetworkErrorLoggingService::kMethodKey);
base::ExpectDictIntegerValue(504, *body,
NetworkErrorLoggingService::kStatusCodeKey);
base::ExpectDictIntegerValue(1000, *body,
NetworkErrorLoggingService::kElapsedTimeKey);
base::ExpectDictStringValue("application", *body,
NetworkErrorLoggingService::kPhaseKey);
base::ExpectDictStringValue("http.error", *body,
NetworkErrorLoggingService::kTypeKey);
}
TEST_P(NetworkErrorLoggingServiceTest, SuccessReportDowngraded) {
service()->OnHeader(kOrigin_, kServerIP_, kHeaderSuccessFraction1_);
// Make the rest of the test run synchronously.
FinishLoading(true /* load_success */);
service()->OnRequest(
MakeRequestDetails(kUrl_, OK, "GET", 200, kOtherServerIP_));
ASSERT_EQ(1u, reports().size());
EXPECT_EQ(kUrl_, reports()[0].url);
EXPECT_EQ(kGroup_, reports()[0].group);
EXPECT_EQ(kType_, reports()[0].type);
EXPECT_EQ(0, reports()[0].depth);
const base::DictionaryValue* body;
ASSERT_TRUE(reports()[0].body->GetAsDictionary(&body));
base::ExpectDictStringValue(kReferrer_.spec(), *body,
NetworkErrorLoggingService::kReferrerKey);
ExpectDictDoubleValue(1.0, *body,
NetworkErrorLoggingService::kSamplingFractionKey);
base::ExpectDictStringValue(kOtherServerIP_.ToString(), *body,
NetworkErrorLoggingService::kServerIpKey);
base::ExpectDictStringValue("", *body,
NetworkErrorLoggingService::kProtocolKey);
base::ExpectDictStringValue("GET", *body,
NetworkErrorLoggingService::kMethodKey);
base::ExpectDictIntegerValue(0, *body,
NetworkErrorLoggingService::kStatusCodeKey);
base::ExpectDictIntegerValue(0, *body,
NetworkErrorLoggingService::kElapsedTimeKey);
base::ExpectDictStringValue("dns", *body,
NetworkErrorLoggingService::kPhaseKey);
base::ExpectDictStringValue("dns.address_changed", *body,
NetworkErrorLoggingService::kTypeKey);
}
TEST_P(NetworkErrorLoggingServiceTest, FailureReportDowngraded) {
service()->OnHeader(kOrigin_, kServerIP_, kHeaderSuccessFraction1_);
// Make the rest of the test run synchronously.
FinishLoading(true /* load_success */);
service()->OnRequest(MakeRequestDetails(kUrl_, ERR_CONNECTION_REFUSED, "GET",
200, kOtherServerIP_));
ASSERT_EQ(1u, reports().size());
EXPECT_EQ(kUrl_, reports()[0].url);
EXPECT_EQ(kGroup_, reports()[0].group);
EXPECT_EQ(kType_, reports()[0].type);
EXPECT_EQ(0, reports()[0].depth);
const base::DictionaryValue* body;
ASSERT_TRUE(reports()[0].body->GetAsDictionary(&body));
base::ExpectDictStringValue(kReferrer_.spec(), *body,
NetworkErrorLoggingService::kReferrerKey);
ExpectDictDoubleValue(1.0, *body,
NetworkErrorLoggingService::kSamplingFractionKey);
base::ExpectDictStringValue(kOtherServerIP_.ToString(), *body,
NetworkErrorLoggingService::kServerIpKey);
base::ExpectDictStringValue("", *body,
NetworkErrorLoggingService::kProtocolKey);
base::ExpectDictStringValue("GET", *body,
NetworkErrorLoggingService::kMethodKey);
base::ExpectDictIntegerValue(0, *body,
NetworkErrorLoggingService::kStatusCodeKey);
base::ExpectDictIntegerValue(0, *body,
NetworkErrorLoggingService::kElapsedTimeKey);
base::ExpectDictStringValue("dns", *body,
NetworkErrorLoggingService::kPhaseKey);
base::ExpectDictStringValue("dns.address_changed", *body,
NetworkErrorLoggingService::kTypeKey);
}
TEST_P(NetworkErrorLoggingServiceTest, HttpErrorReportDowngraded) {
service()->OnHeader(kOrigin_, kServerIP_, kHeaderSuccessFraction1_);
// Make the rest of the test run synchronously.
FinishLoading(true /* load_success */);
service()->OnRequest(
MakeRequestDetails(kUrl_, OK, "GET", 504, kOtherServerIP_));
ASSERT_EQ(1u, reports().size());
EXPECT_EQ(kUrl_, reports()[0].url);
EXPECT_EQ(kGroup_, reports()[0].group);
EXPECT_EQ(kType_, reports()[0].type);
EXPECT_EQ(0, reports()[0].depth);
const base::DictionaryValue* body;
ASSERT_TRUE(reports()[0].body->GetAsDictionary(&body));
base::ExpectDictStringValue(kReferrer_.spec(), *body,
NetworkErrorLoggingService::kReferrerKey);
ExpectDictDoubleValue(1.0, *body,
NetworkErrorLoggingService::kSamplingFractionKey);
base::ExpectDictStringValue(kOtherServerIP_.ToString(), *body,
NetworkErrorLoggingService::kServerIpKey);
base::ExpectDictStringValue("", *body,
NetworkErrorLoggingService::kProtocolKey);
base::ExpectDictStringValue("GET", *body,
NetworkErrorLoggingService::kMethodKey);
base::ExpectDictIntegerValue(0, *body,
NetworkErrorLoggingService::kStatusCodeKey);
base::ExpectDictIntegerValue(0, *body,
NetworkErrorLoggingService::kElapsedTimeKey);
base::ExpectDictStringValue("dns", *body,
NetworkErrorLoggingService::kPhaseKey);
base::ExpectDictStringValue("dns.address_changed", *body,
NetworkErrorLoggingService::kTypeKey);
}
TEST_P(NetworkErrorLoggingServiceTest, DNSFailureReportNotDowngraded) {
service()->OnHeader(kOrigin_, kServerIP_, kHeaderSuccessFraction1_);
// Make the rest of the test run synchronously.
FinishLoading(true /* load_success */);
service()->OnRequest(MakeRequestDetails(kUrl_, ERR_NAME_NOT_RESOLVED, "GET",
0, kOtherServerIP_));
ASSERT_EQ(1u, reports().size());
EXPECT_EQ(kUrl_, reports()[0].url);
EXPECT_EQ(kGroup_, reports()[0].group);
EXPECT_EQ(kType_, reports()[0].type);
EXPECT_EQ(0, reports()[0].depth);
const base::DictionaryValue* body;
ASSERT_TRUE(reports()[0].body->GetAsDictionary(&body));
base::ExpectDictStringValue(kReferrer_.spec(), *body,
NetworkErrorLoggingService::kReferrerKey);
ExpectDictDoubleValue(1.0, *body,
NetworkErrorLoggingService::kSamplingFractionKey);
base::ExpectDictStringValue(kOtherServerIP_.ToString(), *body,
NetworkErrorLoggingService::kServerIpKey);
base::ExpectDictStringValue("", *body,
NetworkErrorLoggingService::kProtocolKey);
base::ExpectDictStringValue("GET", *body,
NetworkErrorLoggingService::kMethodKey);
base::ExpectDictIntegerValue(0, *body,
NetworkErrorLoggingService::kStatusCodeKey);
base::ExpectDictIntegerValue(1000, *body,
NetworkErrorLoggingService::kElapsedTimeKey);
base::ExpectDictStringValue("dns", *body,
NetworkErrorLoggingService::kPhaseKey);
base::ExpectDictStringValue("dns.name_not_resolved", *body,
NetworkErrorLoggingService::kTypeKey);
}
TEST_P(NetworkErrorLoggingServiceTest, SuccessPOSTReportQueued) {
service()->OnHeader(kOrigin_, kServerIP_, kHeaderSuccessFraction1_);
// Make the rest of the test run synchronously.
FinishLoading(true /* load_success */);
service()->OnRequest(MakeRequestDetails(kUrl_, OK, "POST"));
ASSERT_EQ(1u, reports().size());
EXPECT_EQ(kUrl_, reports()[0].url);
EXPECT_EQ(kGroup_, reports()[0].group);
EXPECT_EQ(kType_, reports()[0].type);
EXPECT_EQ(0, reports()[0].depth);
const base::DictionaryValue* body;
ASSERT_TRUE(reports()[0].body->GetAsDictionary(&body));
base::ExpectDictStringValue(kReferrer_.spec(), *body,
NetworkErrorLoggingService::kReferrerKey);
ExpectDictDoubleValue(1.0, *body,
NetworkErrorLoggingService::kSamplingFractionKey);
base::ExpectDictStringValue(kServerIP_.ToString(), *body,
NetworkErrorLoggingService::kServerIpKey);
base::ExpectDictStringValue("", *body,
NetworkErrorLoggingService::kProtocolKey);
base::ExpectDictStringValue("POST", *body,
NetworkErrorLoggingService::kMethodKey);
base::ExpectDictStringValue("application", *body,
NetworkErrorLoggingService::kPhaseKey);
base::ExpectDictStringValue("ok", *body,
NetworkErrorLoggingService::kTypeKey);
}
TEST_P(NetworkErrorLoggingServiceTest, MaxAge0) {
service()->OnHeader(kOrigin_, kServerIP_, kHeader_);
// Make the rest of the test run synchronously.
FinishLoading(true /* load_success */);
EXPECT_EQ(1u, PolicyCount());
// Max_age of 0 removes the policy.
service()->OnHeader(kOrigin_, kServerIP_, kHeaderMaxAge0_);
EXPECT_EQ(0u, PolicyCount());
service()->OnRequest(MakeRequestDetails(kUrl_, ERR_CONNECTION_REFUSED));
EXPECT_TRUE(reports().empty());
}
TEST_P(NetworkErrorLoggingServiceTest, SuccessFraction0) {
service()->OnHeader(kOrigin_, kServerIP_, kHeaderSuccessFraction0_);
// Make the rest of the test run synchronously.
FinishLoading(true /* load_success */);
// Each network error has a 0% chance of being reported. Fire off several and
// verify that no reports are produced.
constexpr size_t kReportCount = 100;
for (size_t i = 0; i < kReportCount; ++i)
service()->OnRequest(MakeRequestDetails(kUrl_, OK));
EXPECT_TRUE(reports().empty());
}
TEST_P(NetworkErrorLoggingServiceTest, SuccessFractionHalf) {
// Include a different value for failure_fraction to ensure that we copy the
// right value into sampling_fraction.
static const std::string kHeaderSuccessFractionHalf =
"{\"report_to\":\"group\",\"max_age\":86400,\"success_fraction\":0.5,"
"\"failure_fraction\":0.25}";
service()->OnHeader(kOrigin_, kServerIP_, kHeaderSuccessFractionHalf);
// Make the rest of the test run synchronously.
FinishLoading(true /* load_success */);
// Each network error has a 50% chance of being reported. Fire off several
// and verify that some requests were reported and some weren't. (We can't
// verify exact counts because each decision is made randomly.)
constexpr size_t kReportCount = 100;
for (size_t i = 0; i < kReportCount; ++i)
service()->OnRequest(MakeRequestDetails(kUrl_, OK));
// If our random selection logic is correct, there is a 2^-100 chance that
// every single report above was skipped. If this check fails, it's much more
// likely that our code is wrong.
EXPECT_FALSE(reports().empty());
// There's also a 2^-100 chance that every single report was logged. Same as
// above, that's much more likely to be a code error.
EXPECT_GT(kReportCount, reports().size());
for (const auto& report : reports()) {
const base::DictionaryValue* body;
ASSERT_TRUE(report.body->GetAsDictionary(&body));
// Our header includes a different value for failure_fraction, so that this
// check verifies that we copy the correct fraction into sampling_fraction.
ExpectDictDoubleValue(0.5, *body,
NetworkErrorLoggingService::kSamplingFractionKey);
}
}
TEST_P(NetworkErrorLoggingServiceTest, FailureFraction0) {
static const std::string kHeaderFailureFraction0 =
"{\"report_to\":\"group\",\"max_age\":86400,\"failure_fraction\":0.0}";
service()->OnHeader(kOrigin_, kServerIP_, kHeaderFailureFraction0);
// Make the rest of the test run synchronously.
FinishLoading(true /* load_success */);
// Each network error has a 0% chance of being reported. Fire off several and
// verify that no reports are produced.
constexpr size_t kReportCount = 100;
for (size_t i = 0; i < kReportCount; ++i)
service()->OnRequest(MakeRequestDetails(kUrl_, ERR_CONNECTION_REFUSED));
EXPECT_TRUE(reports().empty());
}
TEST_P(NetworkErrorLoggingServiceTest, FailureFractionHalf) {
// Include a different value for success_fraction to ensure that we copy the
// right value into sampling_fraction.
static const std::string kHeaderFailureFractionHalf =
"{\"report_to\":\"group\",\"max_age\":86400,\"failure_fraction\":0.5,"
"\"success_fraction\":0.25}";
service()->OnHeader(kOrigin_, kServerIP_, kHeaderFailureFractionHalf);
// Make the rest of the test run synchronously.
FinishLoading(true /* load_success */);
// Each network error has a 50% chance of being reported. Fire off several
// and verify that some requests were reported and some weren't. (We can't
// verify exact counts because each decision is made randomly.)
constexpr size_t kReportCount = 100;
for (size_t i = 0; i < kReportCount; ++i)
service()->OnRequest(MakeRequestDetails(kUrl_, ERR_CONNECTION_REFUSED));
// If our random selection logic is correct, there is a 2^-100 chance that
// every single report above was skipped. If this check fails, it's much more
// likely that our code is wrong.
EXPECT_FALSE(reports().empty());
// There's also a 2^-100 chance that every single report was logged. Same as
// above, that's much more likely to be a code error.
EXPECT_GT(kReportCount, reports().size());
for (const auto& report : reports()) {
const base::DictionaryValue* body;
ASSERT_TRUE(report.body->GetAsDictionary(&body));
ExpectDictDoubleValue(0.5, *body,
NetworkErrorLoggingService::kSamplingFractionKey);
}
}
TEST_P(NetworkErrorLoggingServiceTest,
ExcludeSubdomainsDoesntMatchDifferentPort) {
service()->OnHeader(kOrigin_, kServerIP_, kHeader_);
// Make the rest of the test run synchronously.
FinishLoading(true /* load_success */);
service()->OnRequest(
MakeRequestDetails(kUrlDifferentPort_, ERR_CONNECTION_REFUSED));
EXPECT_TRUE(reports().empty());
}
TEST_P(NetworkErrorLoggingServiceTest, ExcludeSubdomainsDoesntMatchSubdomain) {
service()->OnHeader(kOrigin_, kServerIP_, kHeader_);
// Make the rest of the test run synchronously.
FinishLoading(true /* load_success */);
service()->OnRequest(
MakeRequestDetails(kUrlSubdomain_, ERR_CONNECTION_REFUSED));
EXPECT_TRUE(reports().empty());
}
TEST_P(NetworkErrorLoggingServiceTest, IncludeSubdomainsMatchesDifferentPort) {
service()->OnHeader(kOrigin_, kServerIP_, kHeaderIncludeSubdomains_);
// Make the rest of the test run synchronously.
FinishLoading(true /* load_success */);
service()->OnRequest(
MakeRequestDetails(kUrlDifferentPort_, ERR_NAME_NOT_RESOLVED));
ASSERT_EQ(1u, reports().size());
EXPECT_EQ(kUrlDifferentPort_, reports()[0].url);
}
TEST_P(NetworkErrorLoggingServiceTest, IncludeSubdomainsMatchesSubdomain) {
service()->OnHeader(kOrigin_, kServerIP_, kHeaderIncludeSubdomains_);
// Make the rest of the test run synchronously.
FinishLoading(true /* load_success */);
service()->OnRequest(
MakeRequestDetails(kUrlSubdomain_, ERR_NAME_NOT_RESOLVED));
ASSERT_EQ(1u, reports().size());
}
TEST_P(NetworkErrorLoggingServiceTest,
IncludeSubdomainsDoesntMatchSuperdomain) {
service()->OnHeader(kOriginSubdomain_, kServerIP_, kHeaderIncludeSubdomains_);
// Make the rest of the test run synchronously.
FinishLoading(true /* load_success */);
service()->OnRequest(MakeRequestDetails(kUrl_, ERR_NAME_NOT_RESOLVED));
EXPECT_TRUE(reports().empty());
}
TEST_P(NetworkErrorLoggingServiceTest,
IncludeSubdomainsDoesntReportConnectionError) {
service()->OnHeader(kOrigin_, kServerIP_, kHeaderIncludeSubdomains_);
// Make the rest of the test run synchronously.
FinishLoading(true /* load_success */);
service()->OnRequest(
MakeRequestDetails(kUrlSubdomain_, ERR_CONNECTION_REFUSED));
EXPECT_TRUE(reports().empty());
}
TEST_P(NetworkErrorLoggingServiceTest,
IncludeSubdomainsDoesntReportApplicationError) {
service()->OnHeader(kOrigin_, kServerIP_, kHeaderIncludeSubdomains_);
// Make the rest of the test run synchronously.
FinishLoading(true /* load_success */);
service()->OnRequest(
MakeRequestDetails(kUrlSubdomain_, ERR_INVALID_HTTP_RESPONSE));
EXPECT_TRUE(reports().empty());
}
TEST_P(NetworkErrorLoggingServiceTest, IncludeSubdomainsDoesntReportSuccess) {
service()->OnHeader(kOrigin_, kServerIP_, kHeaderIncludeSubdomains_);
// Make the rest of the test run synchronously.
FinishLoading(true /* load_success */);
service()->OnRequest(MakeRequestDetails(kUrlSubdomain_, OK));
EXPECT_TRUE(reports().empty());
}
TEST_P(NetworkErrorLoggingServiceTest,
IncludeSubdomainsReportsSameOriginSuccess) {
static const std::string kHeaderIncludeSubdomainsSuccess1 =
"{\"report_to\":\"group\",\"max_age\":86400,"
"\"include_subdomains\":true,\"success_fraction\":1.0}";
service()->OnHeader(kOrigin_, kServerIP_, kHeaderIncludeSubdomainsSuccess1);
// Make the rest of the test run synchronously.
FinishLoading(true /* load_success */);
service()->OnRequest(MakeRequestDetails(kUrl_, OK));
ASSERT_EQ(1u, reports().size());
EXPECT_EQ(kUrl_, reports()[0].url);
}
TEST_P(NetworkErrorLoggingServiceTest, RemoveAllBrowsingData) {
service()->OnHeader(kOrigin_, kServerIP_, kHeader_);
// Make the rest of the test run synchronously.
FinishLoading(true /* load_success */);
EXPECT_EQ(1u, PolicyCount());
EXPECT_TRUE(HasPolicyForOrigin(kOrigin_));
service()->RemoveAllBrowsingData();
service()->OnRequest(MakeRequestDetails(kUrl_, ERR_CONNECTION_REFUSED));
EXPECT_EQ(0u, PolicyCount());
EXPECT_FALSE(HasPolicyForOrigin(kOrigin_));
EXPECT_TRUE(reports().empty());
}
TEST_P(NetworkErrorLoggingServiceTest, RemoveSomeBrowsingData) {
service()->OnHeader(kOrigin_, kServerIP_, kHeader_);
// Make the rest of the test run synchronously.
FinishLoading(true /* load_success */);
service()->OnHeader(kOriginDifferentHost_, kServerIP_, kHeader_);
EXPECT_EQ(2u, PolicyCount());
// Remove policy for kOrigin_ but not kOriginDifferentHost_
service()->RemoveBrowsingData(
base::BindRepeating([](const GURL& origin) -> bool {
return origin.host() == "example.com";
}));
EXPECT_EQ(1u, PolicyCount());
EXPECT_TRUE(HasPolicyForOrigin(kOriginDifferentHost_));
EXPECT_FALSE(HasPolicyForOrigin(kOrigin_));
service()->OnRequest(MakeRequestDetails(kUrl_, ERR_CONNECTION_REFUSED));
EXPECT_TRUE(reports().empty());
service()->OnRequest(
MakeRequestDetails(kUrlDifferentHost_, ERR_CONNECTION_REFUSED));
ASSERT_EQ(1u, reports().size());
}
TEST_P(NetworkErrorLoggingServiceTest, Nested) {
service()->OnHeader(kOrigin_, kServerIP_, kHeader_);
// Make the rest of the test run synchronously.
FinishLoading(true /* load_success */);
NetworkErrorLoggingService::RequestDetails details =
MakeRequestDetails(kUrl_, ERR_CONNECTION_REFUSED);
details.reporting_upload_depth =
NetworkErrorLoggingService::kMaxNestedReportDepth;
service()->OnRequest(details);
ASSERT_EQ(1u, reports().size());
EXPECT_EQ(NetworkErrorLoggingService::kMaxNestedReportDepth,
reports()[0].depth);
}
TEST_P(NetworkErrorLoggingServiceTest, NestedTooDeep) {
service()->OnHeader(kOrigin_, kServerIP_, kHeader_);
// Make the rest of the test run synchronously.
FinishLoading(true /* load_success */);
NetworkErrorLoggingService::RequestDetails details =
MakeRequestDetails(kUrl_, ERR_CONNECTION_REFUSED);
details.reporting_upload_depth =
NetworkErrorLoggingService::kMaxNestedReportDepth + 1;
service()->OnRequest(details);
EXPECT_TRUE(reports().empty());
}
TEST_P(NetworkErrorLoggingServiceTest, StatusAsValue) {
// The expiration times will be bogus, but we need a reproducible value for
// this test.
base::SimpleTestClock clock;
service()->SetClockForTesting(&clock);
// The clock is initialized to the "zero" or origin point of the Time class.
// This sets the clock's Time to the equivalent of the "zero" or origin point
// of the TimeTicks class, so that the serialized value produced by
// NetLog::TimeToString is consistent across restarts.
base::TimeDelta delta_from_origin =
base::Time::UnixEpoch().since_origin() -
base::TimeTicks::UnixEpoch().since_origin();
clock.Advance(delta_from_origin);
service()->OnHeader(kOrigin_, kServerIP_, kHeaderSuccessFraction1_);
// Make the rest of the test run synchronously.
FinishLoading(true /* load_success */);
service()->OnHeader(kOriginDifferentHost_, kServerIP_, kHeader_);
service()->OnHeader(kOriginSubdomain_, kServerIP_, kHeaderIncludeSubdomains_);
const std::string kHeaderWrongTypes =
("{\"report_to\":\"group\","
"\"max_age\":86400,"
// We'll ignore each of these fields because they're the wrong type.
// We'll use a default value instead.
"\"include_subdomains\":\"true\","
"\"success_fraction\": \"1.0\","
"\"failure_fraction\": \"0.0\"}");
service()->OnHeader(
url::Origin::Create(GURL("https://invalid-types.example.com")),
kServerIP_, kHeaderWrongTypes);
base::Value actual = service()->StatusAsValue();
std::unique_ptr<base::Value> expected =
base::test::ParseJsonDeprecated(R"json(
{
"originPolicies": [
{
"origin": "https://example.com",
"includeSubdomains": false,
"expires": "86400000",
"reportTo": "group",
"successFraction": 1.0,
"failureFraction": 1.0,
},
{
"origin": "https://example2.com",
"includeSubdomains": false,
"expires": "86400000",
"reportTo": "group",
"successFraction": 0.0,
"failureFraction": 1.0,
},
{
"origin": "https://invalid-types.example.com",
"includeSubdomains": false,
"expires": "86400000",
"reportTo": "group",
"successFraction": 0.0,
"failureFraction": 1.0,
},
{
"origin": "https://subdomain.example.com",
"includeSubdomains": true,
"expires": "86400000",
"reportTo": "group",
"successFraction": 0.0,
"failureFraction": 1.0,
},
]
}
)json");
EXPECT_EQ(*expected, actual);
}
TEST_P(NetworkErrorLoggingServiceTest, NoReportingService_SignedExchange) {
service_ = NetworkErrorLoggingService::Create(store_.get());
service()->OnHeader(kOrigin_, kServerIP_, kHeader_);
// Make the rest of the test run synchronously.
FinishLoading(true /* load_success */);
// Should not crash
service()->QueueSignedExchangeReport(MakeSignedExchangeReportDetails(
false, "sxg.failed", kUrl_, kInnerUrl_, kCertUrl_, kServerIP_));
}
TEST_P(NetworkErrorLoggingServiceTest, NoPolicyForOrigin_SignedExchange) {
service()->QueueSignedExchangeReport(MakeSignedExchangeReportDetails(
false, "sxg.failed", kUrl_, kInnerUrl_, kCertUrl_, kServerIP_));
// Make the rest of the test run synchronously.
FinishLoading(true /* load_success */);
EXPECT_TRUE(reports().empty());
}
TEST_P(NetworkErrorLoggingServiceTest, SuccessFraction0_SignedExchange) {
service()->OnHeader(kOrigin_, kServerIP_, kHeaderSuccessFraction0_);
// Make the rest of the test run synchronously.
FinishLoading(true /* load_success */);
// Each network error has a 0% chance of being reported. Fire off several and
// verify that no reports are produced.
constexpr size_t kReportCount = 100;
for (size_t i = 0; i < kReportCount; ++i) {
service()->QueueSignedExchangeReport(MakeSignedExchangeReportDetails(
true, "ok", kUrl_, kInnerUrl_, kCertUrl_, kServerIP_));
}
EXPECT_TRUE(reports().empty());
}
TEST_P(NetworkErrorLoggingServiceTest, SuccessReportQueued_SignedExchange) {
service()->OnHeader(kOrigin_, kServerIP_, kHeaderSuccessFraction1_);
// Make the rest of the test run synchronously.
FinishLoading(true /* load_success */);
service()->QueueSignedExchangeReport(MakeSignedExchangeReportDetails(
true, "ok", kUrl_, kInnerUrl_, kCertUrl_, kServerIP_));
ASSERT_EQ(1u, reports().size());
EXPECT_EQ(kUrl_, reports()[0].url);
EXPECT_EQ(kUserAgent_, reports()[0].user_agent);
EXPECT_EQ(kGroup_, reports()[0].group);
EXPECT_EQ(kType_, reports()[0].type);
EXPECT_EQ(0, reports()[0].depth);
const base::DictionaryValue* body;
ASSERT_TRUE(reports()[0].body->GetAsDictionary(&body));
base::ExpectDictStringValue(kReferrer_.spec(), *body,
NetworkErrorLoggingService::kReferrerKey);
ExpectDictDoubleValue(1.0, *body,
NetworkErrorLoggingService::kSamplingFractionKey);
base::ExpectDictStringValue(kServerIP_.ToString(), *body,
NetworkErrorLoggingService::kServerIpKey);
base::ExpectDictStringValue("http/1.1", *body,
NetworkErrorLoggingService::kProtocolKey);
base::ExpectDictStringValue("GET", *body,
NetworkErrorLoggingService::kMethodKey);
base::ExpectDictIntegerValue(200, *body,
NetworkErrorLoggingService::kStatusCodeKey);
base::ExpectDictIntegerValue(1234, *body,
NetworkErrorLoggingService::kElapsedTimeKey);
base::ExpectDictStringValue(
NetworkErrorLoggingService::kSignedExchangePhaseValue, *body,
NetworkErrorLoggingService::kPhaseKey);
base::ExpectDictStringValue("ok", *body,
NetworkErrorLoggingService::kTypeKey);
const base::DictionaryValue* sxg_body;
ASSERT_TRUE(body->FindKey(NetworkErrorLoggingService::kSignedExchangeBodyKey)
->GetAsDictionary(&sxg_body));
base::ExpectDictStringValue(kUrl_.spec(), *sxg_body,
NetworkErrorLoggingService::kOuterUrlKey);
base::ExpectDictStringValue(kInnerUrl_.spec(), *sxg_body,
NetworkErrorLoggingService::kInnerUrlKey);
base::ExpectStringValue(
kCertUrl_.spec(),
sxg_body->FindKey(NetworkErrorLoggingService::kCertUrlKey)->GetList()[0]);
}
TEST_P(NetworkErrorLoggingServiceTest, FailureReportQueued_SignedExchange) {
service()->OnHeader(kOrigin_, kServerIP_, kHeader_);
// Make the rest of the test run synchronously.
FinishLoading(true /* load_success */);
service()->QueueSignedExchangeReport(MakeSignedExchangeReportDetails(
false, "sxg.failed", kUrl_, kInnerUrl_, kCertUrl_, kServerIP_));
ASSERT_EQ(1u, reports().size());
EXPECT_EQ(kUrl_, reports()[0].url);
EXPECT_EQ(kUserAgent_, reports()[0].user_agent);
EXPECT_EQ(kGroup_, reports()[0].group);
EXPECT_EQ(kType_, reports()[0].type);
EXPECT_EQ(0, reports()[0].depth);
const base::DictionaryValue* body;
ASSERT_TRUE(reports()[0].body->GetAsDictionary(&body));
base::ExpectDictStringValue(kReferrer_.spec(), *body,
NetworkErrorLoggingService::kReferrerKey);
ExpectDictDoubleValue(1.0, *body,
NetworkErrorLoggingService::kSamplingFractionKey);
base::ExpectDictStringValue(kServerIP_.ToString(), *body,
NetworkErrorLoggingService::kServerIpKey);
base::ExpectDictStringValue("http/1.1", *body,
NetworkErrorLoggingService::kProtocolKey);
base::ExpectDictStringValue("GET", *body,
NetworkErrorLoggingService::kMethodKey);
base::ExpectDictIntegerValue(200, *body,
NetworkErrorLoggingService::kStatusCodeKey);
base::ExpectDictIntegerValue(1234, *body,
NetworkErrorLoggingService::kElapsedTimeKey);
base::ExpectDictStringValue(
NetworkErrorLoggingService::kSignedExchangePhaseValue, *body,
NetworkErrorLoggingService::kPhaseKey);
base::ExpectDictStringValue("sxg.failed", *body,
NetworkErrorLoggingService::kTypeKey);
const base::DictionaryValue* sxg_body;
ASSERT_TRUE(body->FindKey(NetworkErrorLoggingService::kSignedExchangeBodyKey)
->GetAsDictionary(&sxg_body));
base::ExpectDictStringValue(kUrl_.spec(), *sxg_body,
NetworkErrorLoggingService::kOuterUrlKey);
base::ExpectDictStringValue(kInnerUrl_.spec(), *sxg_body,
NetworkErrorLoggingService::kInnerUrlKey);
base::ExpectStringValue(
kCertUrl_.spec(),
sxg_body->FindKey(NetworkErrorLoggingService::kCertUrlKey)->GetList()[0]);
}
TEST_P(NetworkErrorLoggingServiceTest, MismatchingSubdomain_SignedExchange) {
service()->OnHeader(kOrigin_, kServerIP_, kHeaderIncludeSubdomains_);
// Make the rest of the test run synchronously.
FinishLoading(true /* load_success */);
service()->QueueSignedExchangeReport(MakeSignedExchangeReportDetails(
false, "sxg.failed", kUrlSubdomain_, kInnerUrl_, kCertUrl_, kServerIP_));
EXPECT_TRUE(reports().empty());
}
TEST_P(NetworkErrorLoggingServiceTest, MismatchingIPAddress_SignedExchange) {
service()->OnHeader(kOrigin_, kServerIP_, kHeader_);
// Make the rest of the test run synchronously.
FinishLoading(true /* load_success */);
service()->QueueSignedExchangeReport(MakeSignedExchangeReportDetails(
false, "sxg.failed", kUrl_, kInnerUrl_, kCertUrl_, kOtherServerIP_));
EXPECT_TRUE(reports().empty());
}
// When the max number of policies is exceeded, first try to remove expired
// policies before evicting the least recently used unexpired policy.
TEST_P(NetworkErrorLoggingServiceTest, EvictAllExpiredPoliciesFirst) {
base::SimpleTestClock clock;
service()->SetClockForTesting(&clock);
// Add 100 policies then make them expired.
for (size_t i = 0; i < 100; ++i) {
service()->OnHeader(MakeOrigin(i), kServerIP_, kHeader_);
}
// Make the rest of the test run synchronously.
FinishLoading(true /* load_success */);
EXPECT_EQ(100u, PolicyCount());
clock.Advance(base::TimeDelta::FromSeconds(86401)); // max_age is 86400 sec
// Expired policies are allowed to linger before hitting the policy limit.
EXPECT_EQ(100u, PolicyCount());
// Reach the max policy limit.
for (size_t i = 100; i < NetworkErrorLoggingService::kMaxPolicies; ++i) {
service()->OnHeader(MakeOrigin(i), kServerIP_, kHeader_);
}
EXPECT_EQ(NetworkErrorLoggingService::kMaxPolicies, PolicyCount());
// Add one more policy to trigger eviction of only the expired policies.
service()->OnHeader(kOrigin_, kServerIP_, kHeader_);
EXPECT_EQ(NetworkErrorLoggingService::kMaxPolicies - 100 + 1, PolicyCount());
}
TEST_P(NetworkErrorLoggingServiceTest, EvictLeastRecentlyUsedPolicy) {
base::SimpleTestClock clock;
service()->SetClockForTesting(&clock);
// A policy's |last_used| is updated when it is added
for (size_t i = 0; i < NetworkErrorLoggingService::kMaxPolicies; ++i) {
service()->OnHeader(MakeOrigin(i), kServerIP_, kHeader_);
clock.Advance(base::TimeDelta::FromSeconds(1));
}
// Make the rest of the test run synchronously.
FinishLoading(true /* load_success */);
EXPECT_EQ(PolicyCount(), NetworkErrorLoggingService::kMaxPolicies);
// Set another policy which triggers eviction. None of the policies have
// expired, so the least recently used (i.e. least recently added) policy
// should be evicted.
service()->OnHeader(kOrigin_, kServerIP_, kHeader_);
clock.Advance(base::TimeDelta::FromSeconds(1));
EXPECT_EQ(PolicyCount(), NetworkErrorLoggingService::kMaxPolicies);
EXPECT_FALSE(HasPolicyForOrigin(MakeOrigin(0))); // evicted
std::set<url::Origin> all_policy_origins =
service()->GetPolicyOriginsForTesting();
for (size_t i = 1; i < NetworkErrorLoggingService::kMaxPolicies; ++i) {
// Avoid n calls to HasPolicyForOrigin(), which would be O(n^2).
EXPECT_EQ(1u, all_policy_origins.count(MakeOrigin(i)));
}
EXPECT_TRUE(HasPolicyForOrigin(kOrigin_));
// Now use the policies in reverse order starting with kOrigin_, then add
// another policy to trigger eviction, to check that the stalest policy is
// identified correctly.
service()->OnRequest(
MakeRequestDetails(kOrigin_.GetURL(), ERR_CONNECTION_REFUSED));
clock.Advance(base::TimeDelta::FromSeconds(1));
for (size_t i = NetworkErrorLoggingService::kMaxPolicies - 1; i >= 1; --i) {
service()->OnRequest(
MakeRequestDetails(MakeOrigin(i).GetURL(), ERR_CONNECTION_REFUSED));
clock.Advance(base::TimeDelta::FromSeconds(1));
}
service()->OnHeader(kOriginSubdomain_, kServerIP_, kHeader_);
EXPECT_EQ(PolicyCount(), NetworkErrorLoggingService::kMaxPolicies);
EXPECT_FALSE(HasPolicyForOrigin(kOrigin_)); // evicted
all_policy_origins = service()->GetPolicyOriginsForTesting();
for (size_t i = NetworkErrorLoggingService::kMaxPolicies - 1; i >= 1; --i) {
// Avoid n calls to HasPolicyForOrigin(), which would be O(n^2).
EXPECT_EQ(1u, all_policy_origins.count(MakeOrigin(i)));
}
EXPECT_TRUE(HasPolicyForOrigin(kOriginSubdomain_)); // most recently added
// Note: This test advances the clock by ~2000 seconds, which is below the
// specified max_age of 86400 seconds, so none of the policies expire during
// this test.
}
TEST_P(NetworkErrorLoggingServiceTest, SendsCommandsToStoreSynchronous) {
if (!store())
return;
MockPersistentNelStore::CommandList expected_commands;
NetworkErrorLoggingService::NelPolicy policy1 = MakePolicyForOrigin(kOrigin_);
NetworkErrorLoggingService::NelPolicy policy2 =
MakePolicyForOrigin(kOriginDifferentHost_);
std::vector<NetworkErrorLoggingService::NelPolicy> prestored_policies = {
policy1, policy2};
store()->SetPrestoredPolicies(std::move(prestored_policies));
// The first call to any of the public methods triggers a load.
service()->OnHeader(kOrigin_, kServerIP_, kHeader_);
expected_commands.emplace_back(
MockPersistentNelStore::Command::Type::LOAD_NEL_POLICIES);
EXPECT_TRUE(store()->VerifyCommands(expected_commands));
// Make the rest of the test run synchronously.
FinishLoading(true /* load_success */);
// DoOnHeader() should now execute.
expected_commands.emplace_back(
MockPersistentNelStore::Command::Type::DELETE_NEL_POLICY, policy1);
expected_commands.emplace_back(
MockPersistentNelStore::Command::Type::ADD_NEL_POLICY, policy1);
EXPECT_TRUE(store()->VerifyCommands(expected_commands));
service()->OnRequest(
MakeRequestDetails(kOrigin_.GetURL(), ERR_CONNECTION_REFUSED));
expected_commands.emplace_back(
MockPersistentNelStore::Command::Type::UPDATE_NEL_POLICY, policy1);
EXPECT_TRUE(store()->VerifyCommands(expected_commands));
service()->QueueSignedExchangeReport(MakeSignedExchangeReportDetails(
false, "sxg.failed", kUrl_, kInnerUrl_, kCertUrl_, kServerIP_));
expected_commands.emplace_back(
MockPersistentNelStore::Command::Type::UPDATE_NEL_POLICY, policy1);
EXPECT_TRUE(store()->VerifyCommands(expected_commands));
// Removes policy1 but not policy2.
EXPECT_EQ(2, store()->StoredPoliciesCount());
service()->RemoveBrowsingData(
base::BindRepeating([](const GURL& origin) -> bool {
return origin.host() == "example.com";
}));
expected_commands.emplace_back(
MockPersistentNelStore::Command::Type::DELETE_NEL_POLICY, policy1);
expected_commands.emplace_back(MockPersistentNelStore::Command::Type::FLUSH);
EXPECT_EQ(1, store()->StoredPoliciesCount());
EXPECT_TRUE(store()->VerifyCommands(expected_commands));
service()->RemoveAllBrowsingData();
expected_commands.emplace_back(
MockPersistentNelStore::Command::Type::DELETE_NEL_POLICY, policy2);
expected_commands.emplace_back(MockPersistentNelStore::Command::Type::FLUSH);
EXPECT_EQ(0, store()->StoredPoliciesCount());
EXPECT_TRUE(store()->VerifyCommands(expected_commands));
}
// Same as the above test, except that all the tasks are queued until loading
// is complete.
TEST_P(NetworkErrorLoggingServiceTest, SendsCommandsToStoreDeferred) {
if (!store())
return;
MockPersistentNelStore::CommandList expected_commands;
NetworkErrorLoggingService::NelPolicy policy1 = MakePolicyForOrigin(kOrigin_);
NetworkErrorLoggingService::NelPolicy policy2 =
MakePolicyForOrigin(kOriginDifferentHost_);
std::vector<NetworkErrorLoggingService::NelPolicy> prestored_policies = {
policy1, policy2};
store()->SetPrestoredPolicies(std::move(prestored_policies));
// The first call to any of the public methods triggers a load.
service()->OnHeader(kOrigin_, kServerIP_, kHeader_);
expected_commands.emplace_back(
MockPersistentNelStore::Command::Type::LOAD_NEL_POLICIES);
EXPECT_TRUE(store()->VerifyCommands(expected_commands));
service()->OnRequest(
MakeRequestDetails(kOrigin_.GetURL(), ERR_CONNECTION_REFUSED));
EXPECT_TRUE(store()->VerifyCommands(expected_commands));
service()->QueueSignedExchangeReport(MakeSignedExchangeReportDetails(
false, "sxg.failed", kUrl_, kInnerUrl_, kCertUrl_, kServerIP_));
EXPECT_TRUE(store()->VerifyCommands(expected_commands));
// Removes policy1 but not policy2.
service()->RemoveBrowsingData(
base::BindRepeating([](const GURL& origin) -> bool {
return origin.host() == "example.com";
}));
EXPECT_TRUE(store()->VerifyCommands(expected_commands));
service()->RemoveAllBrowsingData();
EXPECT_TRUE(store()->VerifyCommands(expected_commands));
// The store has not yet been told to remove the policies because the tasks
// to remove browsing data were queued pending initialization.
EXPECT_EQ(2, store()->StoredPoliciesCount());
FinishLoading(true /* load_success */);
// DoOnHeader()
expected_commands.emplace_back(
MockPersistentNelStore::Command::Type::DELETE_NEL_POLICY, policy1);
expected_commands.emplace_back(
MockPersistentNelStore::Command::Type::ADD_NEL_POLICY, policy1);
// DoOnRequest()
expected_commands.emplace_back(
MockPersistentNelStore::Command::Type::UPDATE_NEL_POLICY, policy1);
// DoQueueSignedExchangeReport()
expected_commands.emplace_back(
MockPersistentNelStore::Command::Type::UPDATE_NEL_POLICY, policy1);
// DoRemoveBrowsingData()
expected_commands.emplace_back(
MockPersistentNelStore::Command::Type::DELETE_NEL_POLICY, policy1);
expected_commands.emplace_back(MockPersistentNelStore::Command::Type::FLUSH);
// DoRemoveAllBrowsingData()
expected_commands.emplace_back(
MockPersistentNelStore::Command::Type::DELETE_NEL_POLICY, policy2);
expected_commands.emplace_back(MockPersistentNelStore::Command::Type::FLUSH);
EXPECT_TRUE(store()->VerifyCommands(expected_commands));
}
// These two tests check that if loading fails, the commands should still
// be sent to the store; the actual store impl will just ignore them.
TEST_P(NetworkErrorLoggingServiceTest,
SendsCommandsToStoreSynchronousLoadFailed) {
if (!store())
return;
MockPersistentNelStore::CommandList expected_commands;
NetworkErrorLoggingService::NelPolicy policy1 = MakePolicyForOrigin(kOrigin_);
NetworkErrorLoggingService::NelPolicy policy2 =
MakePolicyForOrigin(kOriginDifferentHost_);
std::vector<NetworkErrorLoggingService::NelPolicy> prestored_policies = {
policy1, policy2};
store()->SetPrestoredPolicies(std::move(prestored_policies));
// The first call to any of the public methods triggers a load.
service()->OnHeader(kOrigin_, kServerIP_, kHeader_);
expected_commands.emplace_back(
MockPersistentNelStore::Command::Type::LOAD_NEL_POLICIES);
EXPECT_TRUE(store()->VerifyCommands(expected_commands));
// Make the rest of the test run synchronously.
FinishLoading(false /* load_success */);
// DoOnHeader() should now execute.
// Because the load failed, there will be no policies in memory, so the store
// is not told to delete anything.
expected_commands.emplace_back(
MockPersistentNelStore::Command::Type::ADD_NEL_POLICY, policy1);
EXPECT_TRUE(store()->VerifyCommands(expected_commands));
LOG(INFO) << store()->GetDebugString();
service()->OnRequest(
MakeRequestDetails(kOrigin_.GetURL(), ERR_CONNECTION_REFUSED));
expected_commands.emplace_back(
MockPersistentNelStore::Command::Type::UPDATE_NEL_POLICY, policy1);
EXPECT_TRUE(store()->VerifyCommands(expected_commands));
service()->QueueSignedExchangeReport(MakeSignedExchangeReportDetails(
false, "sxg.failed", kUrl_, kInnerUrl_, kCertUrl_, kServerIP_));
expected_commands.emplace_back(
MockPersistentNelStore::Command::Type::UPDATE_NEL_POLICY, policy1);
EXPECT_TRUE(store()->VerifyCommands(expected_commands));
// Removes policy1 but not policy2.
service()->RemoveBrowsingData(
base::BindRepeating([](const GURL& origin) -> bool {
return origin.host() == "example.com";
}));
expected_commands.emplace_back(
MockPersistentNelStore::Command::Type::DELETE_NEL_POLICY, policy1);
expected_commands.emplace_back(MockPersistentNelStore::Command::Type::FLUSH);
EXPECT_TRUE(store()->VerifyCommands(expected_commands));
service()->RemoveAllBrowsingData();
// We failed to load policy2 from the store, so there is nothing to remove
// here.
expected_commands.emplace_back(MockPersistentNelStore::Command::Type::FLUSH);
EXPECT_TRUE(store()->VerifyCommands(expected_commands));
}
TEST_P(NetworkErrorLoggingServiceTest, SendsCommandsToStoreDeferredLoadFailed) {
if (!store())
return;
MockPersistentNelStore::CommandList expected_commands;
NetworkErrorLoggingService::NelPolicy policy1 = MakePolicyForOrigin(kOrigin_);
NetworkErrorLoggingService::NelPolicy policy2 =
MakePolicyForOrigin(kOriginDifferentHost_);
std::vector<NetworkErrorLoggingService::NelPolicy> prestored_policies = {
policy1, policy2};
store()->SetPrestoredPolicies(std::move(prestored_policies));
// The first call to any of the public methods triggers a load.
service()->OnHeader(kOrigin_, kServerIP_, kHeader_);
expected_commands.emplace_back(
MockPersistentNelStore::Command::Type::LOAD_NEL_POLICIES);
EXPECT_TRUE(store()->VerifyCommands(expected_commands));
service()->OnRequest(
MakeRequestDetails(kOrigin_.GetURL(), ERR_CONNECTION_REFUSED));
EXPECT_TRUE(store()->VerifyCommands(expected_commands));
service()->QueueSignedExchangeReport(MakeSignedExchangeReportDetails(
false, "sxg.failed", kUrl_, kInnerUrl_, kCertUrl_, kServerIP_));
EXPECT_TRUE(store()->VerifyCommands(expected_commands));
// Removes policy1 but not policy2.
service()->RemoveBrowsingData(
base::BindRepeating([](const GURL& origin) -> bool {
return origin.host() == "example.com";
}));
EXPECT_TRUE(store()->VerifyCommands(expected_commands));
service()->RemoveAllBrowsingData();
EXPECT_TRUE(store()->VerifyCommands(expected_commands));
FinishLoading(false /* load_success */);
// DoOnHeader()
// Because the load failed, there will be no policies in memory, so the store
// is not told to delete anything.
expected_commands.emplace_back(
MockPersistentNelStore::Command::Type::ADD_NEL_POLICY, policy1);
// DoOnRequest()
expected_commands.emplace_back(
MockPersistentNelStore::Command::Type::UPDATE_NEL_POLICY, policy1);
// DoQueueSignedExchangeReport()
expected_commands.emplace_back(
MockPersistentNelStore::Command::Type::UPDATE_NEL_POLICY, policy1);
// DoRemoveBrowsingData()
expected_commands.emplace_back(
MockPersistentNelStore::Command::Type::DELETE_NEL_POLICY, policy1);
expected_commands.emplace_back(MockPersistentNelStore::Command::Type::FLUSH);
// DoRemoveAllBrowsingData()
// We failed to load policy2 from the store, so there is nothing to remove
// here.
expected_commands.emplace_back(MockPersistentNelStore::Command::Type::FLUSH);
EXPECT_TRUE(store()->VerifyCommands(expected_commands));
}
TEST_P(NetworkErrorLoggingServiceTest, FlushesStoreOnDestruction) {
auto store = std::make_unique<MockPersistentNelStore>();
std::unique_ptr<NetworkErrorLoggingService> service =
NetworkErrorLoggingService::Create(store.get());
MockPersistentNelStore::CommandList expected_commands;
service->OnHeader(kOrigin_, kServerIP_, kHeader_);
expected_commands.emplace_back(
MockPersistentNelStore::Command::Type::LOAD_NEL_POLICIES);
EXPECT_TRUE(store->VerifyCommands(expected_commands));
store->FinishLoading(false /* load_success */);
expected_commands.emplace_back(
MockPersistentNelStore::Command::Type::ADD_NEL_POLICY,
MakePolicyForOrigin(kOrigin_));
EXPECT_TRUE(store->VerifyCommands(expected_commands));
// Store should be flushed on destruction of service.
service.reset();
expected_commands.emplace_back(MockPersistentNelStore::Command::Type::FLUSH);
EXPECT_TRUE(store->VerifyCommands(expected_commands));
}
TEST_P(NetworkErrorLoggingServiceTest,
DoesntFlushStoreOnDestructionBeforeLoad) {
auto store = std::make_unique<MockPersistentNelStore>();
std::unique_ptr<NetworkErrorLoggingService> service =
NetworkErrorLoggingService::Create(store.get());
service.reset();
EXPECT_EQ(0u, store->GetAllCommands().size());
}
TEST_P(NetworkErrorLoggingServiceTest, DoNothingIfShutDown) {
if (!store())
return;
MockPersistentNelStore::CommandList expected_commands;
// The first call to any of the public methods triggers a load.
service()->OnHeader(kOrigin_, kServerIP_, kHeader_);
expected_commands.emplace_back(
MockPersistentNelStore::Command::Type::LOAD_NEL_POLICIES);
EXPECT_TRUE(store()->VerifyCommands(expected_commands));
service()->OnRequest(
MakeRequestDetails(kOrigin_.GetURL(), ERR_CONNECTION_REFUSED));
service()->QueueSignedExchangeReport(MakeSignedExchangeReportDetails(
false, "sxg.failed", kUrl_, kInnerUrl_, kCertUrl_, kServerIP_));
service()->RemoveBrowsingData(
base::BindRepeating([](const GURL& origin) -> bool {
return origin.host() == "example.com";
}));
service()->RemoveAllBrowsingData();
// Finish loading after the service has been shut down.
service()->OnShutdown();
FinishLoading(true /* load_success */);
// Only the LOAD command should have been sent to the store.
EXPECT_EQ(1u, store()->GetAllCommands().size());
EXPECT_EQ(0u, PolicyCount());
EXPECT_EQ(0u, reports().size());
}
INSTANTIATE_TEST_SUITE_P(NetworkErrorLoggingServiceStoreTest,
NetworkErrorLoggingServiceTest,
testing::Bool());
} // namespace
} // namespace net
| {
"pile_set_name": "Github"
} |
/*
* $Id: kern_gzio.c,v 1.6 2008-10-18 22:54:45 lbazinet Exp $
*
* core_gzip.c -- gzip routines used in compressing user process cores
*
* This file is derived from src/lib/libz/gzio.c in FreeBSD.
*/
/* gzio.c -- IO on .gz files
* Copyright (C) 1995-1998 Jean-loup Gailly.
* For conditions of distribution and use, see copyright notice in zlib.h
*
*/
/* @(#) $FreeBSD: release/9.1.0/sys/kern/kern_gzio.c 233353 2012-03-23 11:26:54Z kib $ */
#include <sys/param.h>
#include <sys/proc.h>
#include <sys/malloc.h>
#include <sys/vnode.h>
#include <sys/syslog.h>
#include <sys/endian.h>
#include <net/zutil.h>
#include <sys/libkern.h>
#include <sys/vnode.h>
#include <sys/mount.h>
#define GZ_HEADER_LEN 10
#ifndef Z_BUFSIZE
# ifdef MAXSEG_64K
# define Z_BUFSIZE 4096 /* minimize memory usage for 16-bit DOS */
# else
# define Z_BUFSIZE 16384
# endif
#endif
#ifndef Z_PRINTF_BUFSIZE
# define Z_PRINTF_BUFSIZE 4096
#endif
#define ALLOC(size) malloc(size, M_TEMP, M_WAITOK | M_ZERO)
#define TRYFREE(p) {if (p) free(p, M_TEMP);}
static int gz_magic[2] = {0x1f, 0x8b}; /* gzip magic header */
/* gzip flag byte */
#define ASCII_FLAG 0x01 /* bit 0 set: file probably ascii text */
#define HEAD_CRC 0x02 /* bit 1 set: header CRC present */
#define EXTRA_FIELD 0x04 /* bit 2 set: extra field present */
#define ORIG_NAME 0x08 /* bit 3 set: original file name present */
#define COMMENT 0x10 /* bit 4 set: file comment present */
#define RESERVED 0xE0 /* bits 5..7: reserved */
typedef struct gz_stream {
z_stream stream;
int z_err; /* error code for last stream operation */
int z_eof; /* set if end of input file */
struct vnode *file; /* vnode pointer of .gz file */
Byte *inbuf; /* input buffer */
Byte *outbuf; /* output buffer */
uLong crc; /* crc32 of uncompressed data */
char *msg; /* error message */
char *path; /* path name for debugging only */
int transparent; /* 1 if input file is not a .gz file */
char mode; /* 'w' or 'r' */
long startpos; /* start of compressed data in file (header skipped) */
off_t outoff; /* current offset in output file */
int flags;
} gz_stream;
local int do_flush OF((gzFile file, int flush));
local int destroy OF((gz_stream *s));
local void putU32 OF((gz_stream *file, uint32_t x));
local void *gz_alloc OF((void *notused, u_int items, u_int size));
local void gz_free OF((void *notused, void *ptr));
/* ===========================================================================
Opens a gzip (.gz) file for reading or writing. The mode parameter
is as in fopen ("rb" or "wb"). The file is given either by file descriptor
or path name (if fd == -1).
gz_open return NULL if the file could not be opened or if there was
insufficient memory to allocate the (de)compression state; errno
can be checked to distinguish the two cases (if errno is zero, the
zlib error is Z_MEM_ERROR).
*/
gzFile gz_open (path, mode, vp)
const char *path;
const char *mode;
struct vnode *vp;
{
int err;
int level = Z_DEFAULT_COMPRESSION; /* compression level */
int strategy = Z_DEFAULT_STRATEGY; /* compression strategy */
const char *p = mode;
gz_stream *s;
char fmode[80]; /* copy of mode, without the compression level */
char *m = fmode;
ssize_t resid;
int error;
char buf[GZ_HEADER_LEN + 1];
if (!path || !mode) return Z_NULL;
s = (gz_stream *)ALLOC(sizeof(gz_stream));
if (!s) return Z_NULL;
s->stream.zalloc = (alloc_func)gz_alloc;
s->stream.zfree = (free_func)gz_free;
s->stream.opaque = (voidpf)0;
s->stream.next_in = s->inbuf = Z_NULL;
s->stream.next_out = s->outbuf = Z_NULL;
s->stream.avail_in = s->stream.avail_out = 0;
s->file = NULL;
s->z_err = Z_OK;
s->z_eof = 0;
s->crc = 0;
s->msg = NULL;
s->transparent = 0;
s->outoff = 0;
s->flags = 0;
s->path = (char*)ALLOC(strlen(path)+1);
if (s->path == NULL) {
return destroy(s), (gzFile)Z_NULL;
}
strcpy(s->path, path); /* do this early for debugging */
s->mode = '\0';
do {
if (*p == 'r') s->mode = 'r';
if (*p == 'w' || *p == 'a') s->mode = 'w';
if (*p >= '0' && *p <= '9') {
level = *p - '0';
} else if (*p == 'f') {
strategy = Z_FILTERED;
} else if (*p == 'h') {
strategy = Z_HUFFMAN_ONLY;
} else {
*m++ = *p; /* copy the mode */
}
} while (*p++ && m != fmode + sizeof(fmode));
if (s->mode != 'w') {
log(LOG_ERR, "gz_open: mode is not w (%c)\n", s->mode);
return destroy(s), (gzFile)Z_NULL;
}
err = deflateInit2(&(s->stream), level,
Z_DEFLATED, -MAX_WBITS, DEF_MEM_LEVEL, strategy);
/* windowBits is passed < 0 to suppress zlib header */
s->stream.next_out = s->outbuf = (Byte*)ALLOC(Z_BUFSIZE);
if (err != Z_OK || s->outbuf == Z_NULL) {
return destroy(s), (gzFile)Z_NULL;
}
s->stream.avail_out = Z_BUFSIZE;
s->file = vp;
/* Write a very simple .gz header:
*/
snprintf(buf, sizeof(buf), "%c%c%c%c%c%c%c%c%c%c", gz_magic[0],
gz_magic[1], Z_DEFLATED, 0 /*flags*/, 0,0,0,0 /*time*/,
0 /*xflags*/, OS_CODE);
if ((error = vn_rdwr(UIO_WRITE, s->file, buf, GZ_HEADER_LEN, s->outoff,
UIO_SYSSPACE, IO_UNIT, curproc->p_ucred,
NOCRED, &resid, curthread))) {
s->outoff += GZ_HEADER_LEN - resid;
return destroy(s), (gzFile)Z_NULL;
}
s->outoff += GZ_HEADER_LEN;
s->startpos = 10L;
return (gzFile)s;
}
/* ===========================================================================
* Cleanup then free the given gz_stream. Return a zlib error code.
Try freeing in the reverse order of allocations.
*/
local int destroy (s)
gz_stream *s;
{
int err = Z_OK;
if (!s) return Z_STREAM_ERROR;
TRYFREE(s->msg);
if (s->stream.state != NULL) {
if (s->mode == 'w') {
err = deflateEnd(&(s->stream));
}
}
if (s->z_err < 0) err = s->z_err;
TRYFREE(s->inbuf);
TRYFREE(s->outbuf);
TRYFREE(s->path);
TRYFREE(s);
return err;
}
/* ===========================================================================
Writes the given number of uncompressed bytes into the compressed file.
gzwrite returns the number of bytes actually written (0 in case of error).
*/
int ZEXPORT gzwrite (file, buf, len)
gzFile file;
const voidp buf;
unsigned len;
{
gz_stream *s = (gz_stream*)file;
off_t curoff;
size_t resid;
int error;
int vfslocked;
if (s == NULL || s->mode != 'w') return Z_STREAM_ERROR;
s->stream.next_in = (Bytef*)buf;
s->stream.avail_in = len;
curoff = s->outoff;
while (s->stream.avail_in != 0) {
if (s->stream.avail_out == 0) {
s->stream.next_out = s->outbuf;
vfslocked = VFS_LOCK_GIANT(s->file->v_mount);
error = vn_rdwr_inchunks(UIO_WRITE, s->file, s->outbuf, Z_BUFSIZE,
curoff, UIO_SYSSPACE, IO_UNIT,
curproc->p_ucred, NOCRED, &resid, curthread);
VFS_UNLOCK_GIANT(vfslocked);
if (error) {
log(LOG_ERR, "gzwrite: vn_rdwr return %d\n", error);
curoff += Z_BUFSIZE - resid;
s->z_err = Z_ERRNO;
break;
}
curoff += Z_BUFSIZE;
s->stream.avail_out = Z_BUFSIZE;
}
s->z_err = deflate(&(s->stream), Z_NO_FLUSH);
if (s->z_err != Z_OK) {
log(LOG_ERR,
"gzwrite: deflate returned error %d\n", s->z_err);
break;
}
}
s->crc = ~crc32_raw(buf, len, ~s->crc);
s->outoff = curoff;
return (int)(len - s->stream.avail_in);
}
/* ===========================================================================
Flushes all pending output into the compressed file. The parameter
flush is as in the deflate() function.
*/
local int do_flush (file, flush)
gzFile file;
int flush;
{
uInt len;
int done = 0;
gz_stream *s = (gz_stream*)file;
off_t curoff = s->outoff;
size_t resid;
int vfslocked = 0;
int error;
if (s == NULL || s->mode != 'w') return Z_STREAM_ERROR;
if (s->stream.avail_in) {
log(LOG_WARNING, "do_flush: avail_in non-zero on entry\n");
}
s->stream.avail_in = 0; /* should be zero already anyway */
for (;;) {
len = Z_BUFSIZE - s->stream.avail_out;
if (len != 0) {
vfslocked = VFS_LOCK_GIANT(s->file->v_mount);
error = vn_rdwr_inchunks(UIO_WRITE, s->file, s->outbuf, len, curoff,
UIO_SYSSPACE, IO_UNIT, curproc->p_ucred,
NOCRED, &resid, curthread);
VFS_UNLOCK_GIANT(vfslocked);
if (error) {
s->z_err = Z_ERRNO;
s->outoff = curoff + len - resid;
return Z_ERRNO;
}
s->stream.next_out = s->outbuf;
s->stream.avail_out = Z_BUFSIZE;
curoff += len;
}
if (done) break;
s->z_err = deflate(&(s->stream), flush);
/* Ignore the second of two consecutive flushes: */
if (len == 0 && s->z_err == Z_BUF_ERROR) s->z_err = Z_OK;
/* deflate has finished flushing only when it hasn't used up
* all the available space in the output buffer:
*/
done = (s->stream.avail_out != 0 || s->z_err == Z_STREAM_END);
if (s->z_err != Z_OK && s->z_err != Z_STREAM_END) break;
}
s->outoff = curoff;
return s->z_err == Z_STREAM_END ? Z_OK : s->z_err;
}
int ZEXPORT gzflush (file, flush)
gzFile file;
int flush;
{
gz_stream *s = (gz_stream*)file;
int err = do_flush (file, flush);
if (err) return err;
return s->z_err == Z_STREAM_END ? Z_OK : s->z_err;
}
/* ===========================================================================
Outputs a long in LSB order to the given file
*/
local void putU32 (s, x)
gz_stream *s;
uint32_t x;
{
uint32_t xx;
off_t curoff = s->outoff;
ssize_t resid;
#if BYTE_ORDER == BIG_ENDIAN
xx = bswap32(x);
#else
xx = x;
#endif
vn_rdwr(UIO_WRITE, s->file, (caddr_t)&xx, sizeof(xx), curoff,
UIO_SYSSPACE, IO_UNIT, curproc->p_ucred,
NOCRED, &resid, curthread);
s->outoff += sizeof(xx) - resid;
}
/* ===========================================================================
Flushes all pending output if necessary, closes the compressed file
and deallocates all the (de)compression state.
*/
int ZEXPORT gzclose (file)
gzFile file;
{
int err;
gz_stream *s = (gz_stream*)file;
if (s == NULL) return Z_STREAM_ERROR;
if (s->mode == 'w') {
err = do_flush (file, Z_FINISH);
if (err != Z_OK) {
log(LOG_ERR, "gzclose: do_flush failed (err %d)\n", err);
return destroy((gz_stream*)file);
}
#if 0
printf("gzclose: putting crc: %lld total: %lld\n",
(long long)s->crc, (long long)s->stream.total_in);
printf("sizeof uLong = %d\n", (int)sizeof(uLong));
#endif
putU32 (s, s->crc);
putU32 (s, (uint32_t) s->stream.total_in);
}
return destroy((gz_stream*)file);
}
/*
* Space allocation and freeing routines for use by zlib routines when called
* from gzip modules.
*/
static void *
gz_alloc(void *notused __unused, u_int items, u_int size)
{
void *ptr;
MALLOC(ptr, void *, items * size, M_TEMP, M_NOWAIT | M_ZERO);
return ptr;
}
static void
gz_free(void *opaque __unused, void *ptr)
{
FREE(ptr, M_TEMP);
}
| {
"pile_set_name": "Github"
} |
---
layout: post
title: Sorry we're delayed
author:
name: Devrim Yasar
email: [email protected]
excerpt_separator: "<!--more-->"
---
Hi Everyone,
We're receiving no less than a few emails every single day asking about the release, encouraging us to release or being angry about the release :) Thanks a lot.
<!--more-->
I'm going to cut this short - there are many reasons, but primary ones,
\+ we are trying to achieve multi server backend per uid
\+ we are leaving wordpress
\+ we are moving to a new stack that holds all apps in one
Stuff we tried that didn't work,
\+ c++ multiserver http/socket communications (4 months/3 developers)
\+ cocoa style frontend development (5 months 2 devs - we tried this twice!)
\+ pure js/css frontend (4 devs 4 months - those are made for sites, not for apps)
\+ golang multiserver http/socket communications (this did work well - but then we liked ours more)
I will write more on those later on a different topic but just to let you know, we have spent 18 months with 5 devs on average on this release.
Two things to take out,
\+ first, now we know what works and we are about to finalize our current stack (seriously no changes anymore :))
\+ second, Kodingen v1.0 works really REALLY well!
I'm not going to make concrete promises here, because we want to do it good. Also, this is a project that runs with our own savings, we really can't afford much, expensive developers, or even full time developers are beyond our reach. We gotta do it within our own constraints (and it is fun that way!). But of course we don't want you to wait any longer. We're annoyed, excited, frustrated, happy, sad more than anyone, and we want to put an end to that emotional turmoil. Good news is, we're now testing our new release internally, so it's live on our screens..
Hopefully we will bring it to your screens very soon!
Cheers!
Devrim
| {
"pile_set_name": "Github"
} |
<?php
/*
+ ----------------------------------------------------------------------------------------------+
| e107 website system : http://e107.org
| Steve Dunstan 2001-2002 : [email protected]
| Released under the terms and conditions of the GNU General Public License (http://gnu.org).
|
| $Source: /cvs_backup/e107_0.8/e107_themes/index.html,v $
| $Revision: 1.2 $
| $Date: 2008-08-08 20:16:12 $
| $Author: e107steved $
+-----------------------------------------------------------------------------------------------+
*/
if (!defined('e107_INIT')) { exit; }
// Parameters available: $line, $file, $message
echo "<?xml version='1.0' encoding='utf-8' ?>\n";
echo "<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.1//EN\" \"http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd\">\n";
echo "<html xmlns='http://www.w3.org/1999/xhtml'".(defined("TEXTDIRECTION") ? " dir='".TEXTDIRECTION."'" : "").(defined("CORE_LC") ? " xml:lang=\"".CORE_LC."\"" : "").">\n";
echo "<head>
<title>".LAN_ERROR_43."</title>
<meta http-equiv='content-type' content='text/html; charset=utf-8' />
<meta http-equiv='content-style-type' content='text/css' />
<meta http-equiv='content-language' content='en' />
</head>\n
<body>\n
<div id='bodytext'>";
if (is_readable(e_IMAGE.'logo.png'))
{
echo "<img src='".e_IMAGE_ABS.'logo.png'."' alt='".LAN_ERROR_44."' />";
}
echo "<div style='text-align:center; font: 14px verdana, tahoma, arial, helvetica, sans-serif;'>";
echo LAN_ERROR_38.'<br />';
echo LAN_ERROR_39.'<br />';
echo LAN_ERROR_40.'<br /><br /><br /></div>';
echo "<div style='text-align:center; font: 12px verdana, tahoma, arial, helvetica, sans-serif;'>";
echo LAN_ERROR_41.'<br />';
echo "<b>CRITICAL_ERROR: </b><br />Line {$line} {$file}<br />
<br />".LAN_ERROR_42.$message."</div>\n";
echo "</div></body>\n</html>";
?>
| {
"pile_set_name": "Github"
} |
/*
Copyright IBM Corp. 2016 All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package example
import (
"fmt"
"github.com/golang/protobuf/proto"
"github.com/hyperledger/fabric/core/ledger"
"github.com/hyperledger/fabric/common/util"
"github.com/hyperledger/fabric/protos/common"
pb "github.com/hyperledger/fabric/protos/peer"
ptestutils "github.com/hyperledger/fabric/protos/testutils"
)
// App - a sample fund transfer app
type App struct {
name string
ledger ledger.PeerLedger
}
// ConstructAppInstance constructs an instance of an app
func ConstructAppInstance(ledger ledger.PeerLedger) *App {
return &App{"PaymentApp", ledger}
}
// Init simulates init transaction
func (app *App) Init(initialBalances map[string]int) (*common.Envelope, error) {
var txSimulator ledger.TxSimulator
var err error
if txSimulator, err = app.ledger.NewTxSimulator(); err != nil {
return nil, err
}
defer txSimulator.Done()
for accountID, bal := range initialBalances {
txSimulator.SetState(app.name, accountID, toBytes(bal))
}
var txSimulationResults []byte
if txSimulationResults, err = txSimulator.GetTxSimulationResults(); err != nil {
return nil, err
}
tx := constructTransaction(txSimulationResults)
return tx, nil
}
// TransferFunds simulates a transaction for transferring fund from fromAccount to toAccount
func (app *App) TransferFunds(fromAccount string, toAccount string, transferAmt int) (*common.Envelope, error) {
// act as endorsing peer shim code to simulate a transaction on behalf of chaincode
var txSimulator ledger.TxSimulator
var err error
if txSimulator, err = app.ledger.NewTxSimulator(); err != nil {
return nil, err
}
defer txSimulator.Done()
var balFromBytes []byte
if balFromBytes, err = txSimulator.GetState(app.name, fromAccount); err != nil {
return nil, err
}
balFrom := toInt(balFromBytes)
if balFrom-transferAmt < 0 {
return nil, fmt.Errorf("Not enough balance in account [%s]. Balance = [%d], transfer request = [%d]",
fromAccount, balFrom, transferAmt)
}
var balToBytes []byte
if balToBytes, err = txSimulator.GetState(app.name, toAccount); err != nil {
return nil, err
}
balTo := toInt(balToBytes)
txSimulator.SetState(app.name, fromAccount, toBytes(balFrom-transferAmt))
txSimulator.SetState(app.name, toAccount, toBytes(balTo+transferAmt))
var txSimulationResults []byte
if txSimulationResults, err = txSimulator.GetTxSimulationResults(); err != nil {
return nil, err
}
// act as endorsing peer to create an Action with the SimulationResults
// then act as SDK to create a Transaction with the EndorsedAction
tx := constructTransaction(txSimulationResults)
return tx, nil
}
// QueryBalances queries the balance funds
func (app *App) QueryBalances(accounts []string) ([]int, error) {
var queryExecutor ledger.QueryExecutor
var err error
if queryExecutor, err = app.ledger.NewQueryExecutor(); err != nil {
return nil, err
}
defer queryExecutor.Done()
balances := make([]int, len(accounts))
for i := 0; i < len(accounts); i++ {
var balBytes []byte
if balBytes, err = queryExecutor.GetState(app.name, accounts[i]); err != nil {
return nil, err
}
balances[i] = toInt(balBytes)
}
return balances, nil
}
func constructTransaction(simulationResults []byte) *common.Envelope {
ccid := &pb.ChaincodeID{
Name: "foo",
Version: "v1",
}
response := &pb.Response{Status: 200}
txEnv, _, _ := ptestutils.ConstructSingedTxEnvWithDefaultSigner(util.GetTestChainID(), ccid, response, simulationResults, nil, nil)
return txEnv
}
func toBytes(balance int) []byte {
return proto.EncodeVarint(uint64(balance))
}
func toInt(balanceBytes []byte) int {
v, _ := proto.DecodeVarint(balanceBytes)
return int(v)
}
| {
"pile_set_name": "Github"
} |
Microsoft Visual Studio Solution File, Format Version 11.00
# Visual Studio 2010
Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "DOMParser", "DOMParser\DOMParser_vs100.vcxproj", "{70F2F655-67D5-32A1-A99B-D4903547DB3E}"
EndProject
Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "DOMWriter", "DOMWriter\DOMWriter_vs100.vcxproj", "{A3CBDFA6-6261-3C04-B1FD-51AA20763BB8}"
EndProject
Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "PrettyPrint", "PrettyPrint\PrettyPrint_vs100.vcxproj", "{DFA97011-8DD4-3A84-A0C9-EB2101BD6082}"
EndProject
Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "SAXParser", "SAXParser\SAXParser_vs100.vcxproj", "{2A54653D-9F55-348B-8F79-A3E454563AE3}"
EndProject
Global
GlobalSection(SolutionConfigurationPlatforms) = preSolution
debug_shared|Win32 = debug_shared|Win32
release_shared|Win32 = release_shared|Win32
debug_static_mt|Win32 = debug_static_mt|Win32
release_static_mt|Win32 = release_static_mt|Win32
debug_static_md|Win32 = debug_static_md|Win32
release_static_md|Win32 = release_static_md|Win32
EndGlobalSection
GlobalSection(ProjectConfigurationPlatforms) = postSolution
{70F2F655-67D5-32A1-A99B-D4903547DB3E}.debug_shared|Win32.ActiveCfg = debug_shared|Win32
{70F2F655-67D5-32A1-A99B-D4903547DB3E}.debug_shared|Win32.Build.0 = debug_shared|Win32
{70F2F655-67D5-32A1-A99B-D4903547DB3E}.debug_shared|Win32.Deploy.0 = debug_shared|Win32
{70F2F655-67D5-32A1-A99B-D4903547DB3E}.release_shared|Win32.ActiveCfg = release_shared|Win32
{70F2F655-67D5-32A1-A99B-D4903547DB3E}.release_shared|Win32.Build.0 = release_shared|Win32
{70F2F655-67D5-32A1-A99B-D4903547DB3E}.release_shared|Win32.Deploy.0 = release_shared|Win32
{70F2F655-67D5-32A1-A99B-D4903547DB3E}.debug_static_mt|Win32.ActiveCfg = debug_static_mt|Win32
{70F2F655-67D5-32A1-A99B-D4903547DB3E}.debug_static_mt|Win32.Build.0 = debug_static_mt|Win32
{70F2F655-67D5-32A1-A99B-D4903547DB3E}.debug_static_mt|Win32.Deploy.0 = debug_static_mt|Win32
{70F2F655-67D5-32A1-A99B-D4903547DB3E}.release_static_mt|Win32.ActiveCfg = release_static_mt|Win32
{70F2F655-67D5-32A1-A99B-D4903547DB3E}.release_static_mt|Win32.Build.0 = release_static_mt|Win32
{70F2F655-67D5-32A1-A99B-D4903547DB3E}.release_static_mt|Win32.Deploy.0 = release_static_mt|Win32
{70F2F655-67D5-32A1-A99B-D4903547DB3E}.debug_static_md|Win32.ActiveCfg = debug_static_md|Win32
{70F2F655-67D5-32A1-A99B-D4903547DB3E}.debug_static_md|Win32.Build.0 = debug_static_md|Win32
{70F2F655-67D5-32A1-A99B-D4903547DB3E}.debug_static_md|Win32.Deploy.0 = debug_static_md|Win32
{70F2F655-67D5-32A1-A99B-D4903547DB3E}.release_static_md|Win32.ActiveCfg = release_static_md|Win32
{70F2F655-67D5-32A1-A99B-D4903547DB3E}.release_static_md|Win32.Build.0 = release_static_md|Win32
{70F2F655-67D5-32A1-A99B-D4903547DB3E}.release_static_md|Win32.Deploy.0 = release_static_md|Win32
{A3CBDFA6-6261-3C04-B1FD-51AA20763BB8}.debug_shared|Win32.ActiveCfg = debug_shared|Win32
{A3CBDFA6-6261-3C04-B1FD-51AA20763BB8}.debug_shared|Win32.Build.0 = debug_shared|Win32
{A3CBDFA6-6261-3C04-B1FD-51AA20763BB8}.debug_shared|Win32.Deploy.0 = debug_shared|Win32
{A3CBDFA6-6261-3C04-B1FD-51AA20763BB8}.release_shared|Win32.ActiveCfg = release_shared|Win32
{A3CBDFA6-6261-3C04-B1FD-51AA20763BB8}.release_shared|Win32.Build.0 = release_shared|Win32
{A3CBDFA6-6261-3C04-B1FD-51AA20763BB8}.release_shared|Win32.Deploy.0 = release_shared|Win32
{A3CBDFA6-6261-3C04-B1FD-51AA20763BB8}.debug_static_mt|Win32.ActiveCfg = debug_static_mt|Win32
{A3CBDFA6-6261-3C04-B1FD-51AA20763BB8}.debug_static_mt|Win32.Build.0 = debug_static_mt|Win32
{A3CBDFA6-6261-3C04-B1FD-51AA20763BB8}.debug_static_mt|Win32.Deploy.0 = debug_static_mt|Win32
{A3CBDFA6-6261-3C04-B1FD-51AA20763BB8}.release_static_mt|Win32.ActiveCfg = release_static_mt|Win32
{A3CBDFA6-6261-3C04-B1FD-51AA20763BB8}.release_static_mt|Win32.Build.0 = release_static_mt|Win32
{A3CBDFA6-6261-3C04-B1FD-51AA20763BB8}.release_static_mt|Win32.Deploy.0 = release_static_mt|Win32
{A3CBDFA6-6261-3C04-B1FD-51AA20763BB8}.debug_static_md|Win32.ActiveCfg = debug_static_md|Win32
{A3CBDFA6-6261-3C04-B1FD-51AA20763BB8}.debug_static_md|Win32.Build.0 = debug_static_md|Win32
{A3CBDFA6-6261-3C04-B1FD-51AA20763BB8}.debug_static_md|Win32.Deploy.0 = debug_static_md|Win32
{A3CBDFA6-6261-3C04-B1FD-51AA20763BB8}.release_static_md|Win32.ActiveCfg = release_static_md|Win32
{A3CBDFA6-6261-3C04-B1FD-51AA20763BB8}.release_static_md|Win32.Build.0 = release_static_md|Win32
{A3CBDFA6-6261-3C04-B1FD-51AA20763BB8}.release_static_md|Win32.Deploy.0 = release_static_md|Win32
{DFA97011-8DD4-3A84-A0C9-EB2101BD6082}.debug_shared|Win32.ActiveCfg = debug_shared|Win32
{DFA97011-8DD4-3A84-A0C9-EB2101BD6082}.debug_shared|Win32.Build.0 = debug_shared|Win32
{DFA97011-8DD4-3A84-A0C9-EB2101BD6082}.debug_shared|Win32.Deploy.0 = debug_shared|Win32
{DFA97011-8DD4-3A84-A0C9-EB2101BD6082}.release_shared|Win32.ActiveCfg = release_shared|Win32
{DFA97011-8DD4-3A84-A0C9-EB2101BD6082}.release_shared|Win32.Build.0 = release_shared|Win32
{DFA97011-8DD4-3A84-A0C9-EB2101BD6082}.release_shared|Win32.Deploy.0 = release_shared|Win32
{DFA97011-8DD4-3A84-A0C9-EB2101BD6082}.debug_static_mt|Win32.ActiveCfg = debug_static_mt|Win32
{DFA97011-8DD4-3A84-A0C9-EB2101BD6082}.debug_static_mt|Win32.Build.0 = debug_static_mt|Win32
{DFA97011-8DD4-3A84-A0C9-EB2101BD6082}.debug_static_mt|Win32.Deploy.0 = debug_static_mt|Win32
{DFA97011-8DD4-3A84-A0C9-EB2101BD6082}.release_static_mt|Win32.ActiveCfg = release_static_mt|Win32
{DFA97011-8DD4-3A84-A0C9-EB2101BD6082}.release_static_mt|Win32.Build.0 = release_static_mt|Win32
{DFA97011-8DD4-3A84-A0C9-EB2101BD6082}.release_static_mt|Win32.Deploy.0 = release_static_mt|Win32
{DFA97011-8DD4-3A84-A0C9-EB2101BD6082}.debug_static_md|Win32.ActiveCfg = debug_static_md|Win32
{DFA97011-8DD4-3A84-A0C9-EB2101BD6082}.debug_static_md|Win32.Build.0 = debug_static_md|Win32
{DFA97011-8DD4-3A84-A0C9-EB2101BD6082}.debug_static_md|Win32.Deploy.0 = debug_static_md|Win32
{DFA97011-8DD4-3A84-A0C9-EB2101BD6082}.release_static_md|Win32.ActiveCfg = release_static_md|Win32
{DFA97011-8DD4-3A84-A0C9-EB2101BD6082}.release_static_md|Win32.Build.0 = release_static_md|Win32
{DFA97011-8DD4-3A84-A0C9-EB2101BD6082}.release_static_md|Win32.Deploy.0 = release_static_md|Win32
{2A54653D-9F55-348B-8F79-A3E454563AE3}.debug_shared|Win32.ActiveCfg = debug_shared|Win32
{2A54653D-9F55-348B-8F79-A3E454563AE3}.debug_shared|Win32.Build.0 = debug_shared|Win32
{2A54653D-9F55-348B-8F79-A3E454563AE3}.debug_shared|Win32.Deploy.0 = debug_shared|Win32
{2A54653D-9F55-348B-8F79-A3E454563AE3}.release_shared|Win32.ActiveCfg = release_shared|Win32
{2A54653D-9F55-348B-8F79-A3E454563AE3}.release_shared|Win32.Build.0 = release_shared|Win32
{2A54653D-9F55-348B-8F79-A3E454563AE3}.release_shared|Win32.Deploy.0 = release_shared|Win32
{2A54653D-9F55-348B-8F79-A3E454563AE3}.debug_static_mt|Win32.ActiveCfg = debug_static_mt|Win32
{2A54653D-9F55-348B-8F79-A3E454563AE3}.debug_static_mt|Win32.Build.0 = debug_static_mt|Win32
{2A54653D-9F55-348B-8F79-A3E454563AE3}.debug_static_mt|Win32.Deploy.0 = debug_static_mt|Win32
{2A54653D-9F55-348B-8F79-A3E454563AE3}.release_static_mt|Win32.ActiveCfg = release_static_mt|Win32
{2A54653D-9F55-348B-8F79-A3E454563AE3}.release_static_mt|Win32.Build.0 = release_static_mt|Win32
{2A54653D-9F55-348B-8F79-A3E454563AE3}.release_static_mt|Win32.Deploy.0 = release_static_mt|Win32
{2A54653D-9F55-348B-8F79-A3E454563AE3}.debug_static_md|Win32.ActiveCfg = debug_static_md|Win32
{2A54653D-9F55-348B-8F79-A3E454563AE3}.debug_static_md|Win32.Build.0 = debug_static_md|Win32
{2A54653D-9F55-348B-8F79-A3E454563AE3}.debug_static_md|Win32.Deploy.0 = debug_static_md|Win32
{2A54653D-9F55-348B-8F79-A3E454563AE3}.release_static_md|Win32.ActiveCfg = release_static_md|Win32
{2A54653D-9F55-348B-8F79-A3E454563AE3}.release_static_md|Win32.Build.0 = release_static_md|Win32
{2A54653D-9F55-348B-8F79-A3E454563AE3}.release_static_md|Win32.Deploy.0 = release_static_md|Win32
EndGlobalSection
GlobalSection(SolutionProperties) = preSolution
HideSolutionNode = FALSE
EndGlobalSection
EndGlobal
| {
"pile_set_name": "Github"
} |
// Copyright (c) Lawrence Livermore National Security, LLC and other VisIt
// Project developers. See the top-level LICENSE file for dates and other
// details. No copyright assignment is required to contribute to VisIt.
// ****************************************************************************
// File: StaggerViewerPluginInfo.C
// ****************************************************************************
#include <StaggerPluginInfo.h>
#include <StaggerAttributes.h>
VISIT_OPERATOR_PLUGIN_ENTRY_EV(Stagger,Viewer)
| {
"pile_set_name": "Github"
} |
"Roles of user","Rollen des Benutzers"
"Please choose","Bitte wählen"
"You have to select at least one user and one role.","Sie müssen mindestens einen Benutzer und eine Rolle auswählen."
"Administrator","Super-Administrator"
"Web application administrator.","Hauptadministrator der gesamten Webapplikation."
"User","Benutzer"
"Can login and change the own password.","Kann sich einloggen und das eigene Passwort ändern."
"User-Group-Manager","Benutzer-Gruppen-Verwalter"
"Create/Delete users and groups and manages memberships and change passwords of users.","Erstellen/Löschen von Benutzern und Gruppen und deren Mitgliedschaften. Kann das Passwort von allen Benutzern ändern."
"Access-Path-Manager","Zugriffspfad-Verwalter"
"Create/Delete Access-Paths and manages user and group permissions.","Erstellen/Löschen von Zugriffspfaden und zuweisen von Benutzer- und Gruppenrechten."
"Repository-Creator","Repository-Ersteller"
"Can create new repositories, but NOT delete.","Kann Repositories erstellen, aber NICHT löschen."
"Repository-Manager","Repository-Verwalter"
"Create/Delete repositories.","Kann Repositories erstellen und löschen."
"Role-Manager","Rollen-Verwalter"
"Assign and unassign web application roles to users.","Kann Rollen an Benutzer vergeben."
"Update-Manager","Update-Verwalter"
"Can synchronize the user data from provider with the SVNAuthFile.","Kann ggfs. die Benutzer-/Gruppendaten von den Datenprovidern mit der SVNAuthFile synchronisieren." | {
"pile_set_name": "Github"
} |
namespace Blog.Test.Services
{
using System.Threading.Tasks;
using Blog.Services.Images;
using Fakes;
using Xunit;
public class ImageServiceTest
{
[Fact]
public void CalculateOptimalSizeShouldReturnMinimumSizeWhenSizeIsLessThanTheAllowedMinimum()
{
// Arrange
const int minimumSize = 100;
const int originalSize = 200;
const int resizeSize = 50;
var imageService = new ImageService(null, null);
// Act
var (width, height) = imageService
.CalculateOptimalSize(resizeSize, resizeSize, originalSize, originalSize);
// Assert
Assert.Equal(minimumSize, width);
Assert.Equal(minimumSize, height);
}
[Fact]
public async Task UpdateImageShouldDownloadImageAndResizeItToCorrectDestination()
{
// Arrange
const string imageUrl = "TestImageUrl";
const string destination = "TestDestination";
const int size = 200;
var webClientService = new FakeWebClientService();
var imageProcessorService = new FakeImageProcessorService();
var imageService = new ImageService(webClientService, imageProcessorService);
// Act
await imageService.UpdateImage(imageUrl, destination, size, size);
// Assert
var imageDestination = $"{destination}.jpg";
Assert.True(webClientService.FileDownloaded);
Assert.Equal($"{destination}.jpg", webClientService.DownloadDestination);
Assert.True(imageProcessorService.ImageResized);
Assert.Equal(imageDestination, imageProcessorService.ImageSource);
Assert.Equal($"{destination}_optimized.jpg", imageProcessorService.ImageDestination);
}
}
}
| {
"pile_set_name": "Github"
} |
fileFormatVersion: 2
guid: 21940121e51c91c49b0fd1a836752bac
TextureImporter:
fileIDToRecycleName: {}
externalObjects: {}
serializedVersion: 4
mipmaps:
mipMapMode: 0
enableMipMap: 0
sRGBTexture: 1
linearTexture: 0
fadeOut: 0
borderMipMap: 0
mipMapsPreserveCoverage: 0
alphaTestReferenceValue: 0.5
mipMapFadeDistanceStart: 1
mipMapFadeDistanceEnd: 3
bumpmap:
convertToNormalMap: 0
externalNormalMap: 0
heightScale: 0.25
normalMapFilter: 0
isReadable: 0
grayScaleToAlpha: 0
generateCubemap: 6
cubemapConvolution: 0
seamlessCubemap: 0
textureFormat: 1
maxTextureSize: 2048
textureSettings:
serializedVersion: 2
filterMode: -1
aniso: -1
mipBias: -1
wrapU: 1
wrapV: 1
wrapW: 1
nPOTScale: 0
lightmap: 0
compressionQuality: 50
spriteMode: 1
spriteExtrude: 1
spriteMeshType: 1
alignment: 0
spritePivot: {x: 0.5, y: 0.5}
spritePixelsToUnits: 100
spriteBorder: {x: 0, y: 0, z: 0, w: 0}
spriteGenerateFallbackPhysicsShape: 1
alphaUsage: 1
alphaIsTransparency: 1
spriteTessellationDetail: -1
textureType: 8
textureShape: 1
maxTextureSizeSet: 0
compressionQualitySet: 0
textureFormatSet: 0
platformSettings:
- buildTarget: DefaultTexturePlatform
maxTextureSize: 256
resizeAlgorithm: 1
textureFormat: -1
textureCompression: 3
compressionQuality: 100
crunchedCompression: 1
allowsAlphaSplitting: 0
overridden: 0
androidETC2FallbackOverride: 0
- buildTarget: Standalone
maxTextureSize: 256
resizeAlgorithm: 1
textureFormat: -1
textureCompression: 3
compressionQuality: 100
crunchedCompression: 1
allowsAlphaSplitting: 0
overridden: 0
androidETC2FallbackOverride: 0
spriteSheet:
serializedVersion: 2
sprites: []
outline: []
physicsShape: []
spritePackingTag:
userData:
assetBundleName:
assetBundleVariant:
| {
"pile_set_name": "Github"
} |
{{- if .Values.clusterAgent.enabled -}}
apiVersion: v1
kind: Service
metadata:
name: {{ template "datadog.fullname" . }}-cluster-agent
labels:
helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
app.kubernetes.io/name: "{{ template "datadog.fullname" . }}"
app.kubernetes.io/instance: {{ .Release.Name | quote }}
app.kubernetes.io/managed-by: {{ .Release.Service | quote }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
spec:
type: ClusterIP
selector:
app: {{ template "datadog.fullname" . }}-cluster-agent
ports:
- port: 5005
name: agentport
protocol: TCP
{{ end }}
{{- if and .Values.clusterAgent.enabled .Values.clusterAgent.metricsProvider.enabled -}}
---
apiVersion: v1
kind: Service
metadata:
name: {{ template "datadog.fullname" . }}-cluster-agent-metrics-api
labels:
app: "{{ template "datadog.fullname" . }}"
chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
release: {{ .Release.Name | quote }}
heritage: {{ .Release.Service | quote }}
helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
app.kubernetes.io/name: "{{ template "datadog.fullname" . }}"
app.kubernetes.io/instance: {{ .Release.Name | quote }}
app.kubernetes.io/managed-by: {{ .Release.Service | quote }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
spec:
type: {{ .Values.clusterAgent.metricsProvider.service.type }}
selector:
app: {{ template "datadog.fullname" . }}-cluster-agent
ports:
- port: {{ .Values.clusterAgent.metricsProvider.service.port }}
name: metricsapi
protocol: TCP
{{ end }}
| {
"pile_set_name": "Github"
} |
<?php
/**
* This file is part of the wangningkai/olaindex.
* (c) wangningkai <[email protected]>
* This source file is subject to the MIT license that is bundled
* with this source code in the file LICENSE.
*/
namespace App\Helpers;
use App\Models\ShortUrl;
use App\Models\Account;
use Curl\Curl;
use Parsedown;
use Log;
use Cache;
class Tool
{
/**
* 链接动态添加参数
* @param string $url
* @param string $key
* @param string $value
* @return string
*/
public static function buildQueryParams($url, $key, $value)
{
$url = preg_replace('/(.*)(?|&)' . $key . '=[^&]+?(&)(.*)/i', '$1$2$4', $url . '&');
$url = substr($url, 0, -1);
if (strpos($url, '?') === false) {
return ($url . '?' . $key . '=' . $value);
}
return ($url . '&' . $key . '=' . $value);
}
/**
* markdown转html
*
* @param string $markdown
* @param bool $line
*
* @return string
*/
public static function markdown2Html($markdown, $line = false): string
{
$parser = new Parsedown();
if (!$line) {
$html = $parser->text($markdown);
} else {
$html = $parser->line($markdown);
}
return $html;
}
/**
* 短网址生成
* @param string $url
* @return mixed
*/
public static function shortenUrl($url)
{
$code = shorten_str($url);
$data = ShortUrl::query()->select('id', 'original_url', 'short_code')->where(['short_code' => $code])->first();
if (!$data) {
$new = new ShortUrl();
$new->short_code = $code;
$new->original_url = $url;
$new->save();
}
return route('short', ['code' => $code]);
}
/**
* 短网址解析
* @param string $code
* @return \Illuminate\Database\Eloquent\HigherOrderBuilderProxy|mixed|string
*/
public static function decodeShortUrl($code)
{
$url = ShortUrl::query()->select('id', 'original_url', 'short_code')->where(['short_code' => $code])->first();
if (!$url) {
return '';
}
return $url->original_url;
}
/**
* 面包屑导航
* @param string $key
* @param string $path
* @return string
*/
public static function combineBreadcrumb($key, $path): string
{
$path = array_slice($path, 0, $key);
$url = '';
foreach ($path as $param) {
$url .= '/' . $param;
}
return trim($url, '/');
}
/**
* 面包屑返回上一级
* @param string $path
* @return string
*/
public static function fetchGoBack($path): string
{
array_pop($path);
if (count($path) === 0) {
return '';
}
$url = '';
foreach ($path as $param) {
$url .= '/' . $param;
}
return trim($url, '/');
}
/**
* 获取图标
* @param string $ext
* @return mixed|string
*/
public static function fetchExtIco($ext)
{
$patterns = [
'stream' => ['file-text', ['txt', 'log']],
'image' => ['image', ['bmp', 'jpg', 'jpeg', 'png', 'gif', 'ico', 'jpe']],
'video' => ['video', ['mkv', 'mp4', 'webm', 'avi', 'mpg', 'mpeg', 'rm', 'rmvb', 'mov', 'wmv', 'asf', 'ts', 'flv',]],
'audio' => ['file-music', ['ogg', 'mp3', 'wav']],
'code' => ['file-code', ['html', 'htm', 'css', 'go', 'java', 'js', 'json', 'txt', 'sh', 'md', 'php',]],
'doc' => ['file-word', ['csv', 'doc', 'docx', 'odp', 'ods', 'odt', 'pot', 'potm', 'potx', 'pps', 'ppsx', 'ppsxm', 'ppt', 'pptm', 'pptx', 'rtf', 'xls', 'xlsx',]],
'pdf' => ['pdf', ['pdf']],
'zip' => ['file-zip', ['zip', '7z', 'rar', 'bz', 'gz']],
'android' => ['android', ['apk']],
'exe' => ['apps', ['exe', 'msi']],
'folder' => ['folder', ['folder']],
];
$icon = 'file';
foreach ($patterns as $key => $suffix) {
if (in_array($ext, $suffix[1], false)) {
$icon = $suffix[0];
break;
}
}
return $icon;
}
/**
* 获取文件流类型
* @param string $ext
* @return mixed|string
*/
public static function fetchFileType($ext)
{
$map = [
'file' => 'application/octet-stream',
'chm' => 'application/octet-stream',
'ppt' => 'application/vnd.ms-powerpoint',
'xls' => 'application/vnd.ms-excel',
'doc' => 'application/msword',
'exe' => 'application/octet-stream',
'rar' => 'application/octet-stream',
'js' => 'javascript/js',
'css' => 'text/css',
'hqx' => 'application/mac-binhex40',
'bin' => 'application/octet-stream',
'oda' => 'application/oda',
'pdf' => 'application/pdf',
'ai' => 'application/postsrcipt',
'eps' => 'application/postsrcipt',
'es' => 'application/postsrcipt',
'rtf' => 'application/rtf',
'mif' => 'application/x-mif',
'csh' => 'application/x-csh',
'dvi' => 'application/x-dvi',
'hdf' => 'application/x-hdf',
'nc' => 'application/x-netcdf',
'cdf' => 'application/x-netcdf',
'latex' => 'application/x-latex',
'ts' => 'application/x-troll-ts',
'src' => 'application/x-wais-source',
'zip' => 'application/zip',
'bcpio' => 'application/x-bcpio',
'cpio' => 'application/x-cpio',
'gtar' => 'application/x-gtar',
'shar' => 'application/x-shar',
'sv4cpio' => 'application/x-sv4cpio',
'sv4crc' => 'application/x-sv4crc',
'tar' => 'application/x-tar',
'ustar' => 'application/x-ustar',
'man' => 'application/x-troff-man',
'sh' => 'application/x-sh',
'tcl' => 'application/x-tcl',
'tex' => 'application/x-tex',
'texi' => 'application/x-texinfo',
'texinfo' => 'application/x-texinfo',
't' => 'application/x-troff',
'tr' => 'application/x-troff',
'roff' => 'application/x-troff',
'shar' => 'application/x-shar',
'me' => 'application/x-troll-me',
'ts' => 'application/x-troll-ts',
'gif' => 'image/gif',
'jpeg' => 'image/pjpeg',
'jpg' => 'image/pjpeg',
'jpe' => 'image/pjpeg',
'ras' => 'image/x-cmu-raster',
'pbm' => 'image/x-portable-bitmap',
'ppm' => 'image/x-portable-pixmap',
'xbm' => 'image/x-xbitmap',
'xwd' => 'image/x-xwindowdump',
'ief' => 'image/ief',
'tif' => 'image/tiff',
'tiff' => 'image/tiff',
'pnm' => 'image/x-portable-anymap',
'pgm' => 'image/x-portable-graymap',
'rgb' => 'image/x-rgb',
'xpm' => 'image/x-xpixmap',
'txt' => 'text/plain',
'c' => 'text/plain',
'cc' => 'text/plain',
'h' => 'text/plain',
'html' => 'text/html',
'htm' => 'text/html',
'htl' => 'text/html',
'txt' => 'text/html',
'php' => 'text/html',
'rtx' => 'text/richtext',
'etx' => 'text/x-setext',
'tsv' => 'text/tab-separated-values',
'mpeg' => 'video/mpeg',
'mpg' => 'video/mpeg',
'mpe' => 'video/mpeg',
'avi' => 'video/x-msvideo',
'qt' => 'video/quicktime',
'mov' => 'video/quicktime',
'moov' => 'video/quicktime',
'movie' => 'video/x-sgi-movie',
'au' => 'audio/basic',
'snd' => 'audio/basic',
'wav' => 'audio/x-wav',
'aif' => 'audio/x-aiff',
'aiff' => 'audio/x-aiff',
'aifc' => 'audio/x-aiff',
'swf' => 'application/x-shockwave-flash',
'myz' => 'application/myz',
];
return array_get($map, $ext, 'application/octet-stream');
}
/**
* 获取远程文件内容
* @param string $url
* @return string|null
* @throws \Exception
*/
public static function fetchContent($url)
{
$curl = new Curl();
$curl->setConnectTimeout(5);
$curl->setTimeout(3);
$curl->setRetry(3);
$curl->setProxy(config('olaindex.proxy'));
$curl->setOpts([
CURLOPT_AUTOREFERER => true,
CURLOPT_FAILONERROR => true,
CURLOPT_FOLLOWLOCATION => true,
CURLOPT_ENCODING => 'gzip,deflate',
]);
$curl->get($url);
$curl->close();
if ($curl->error) {
Log::error(
'获取远程文件内容失败',
[
'code' => $curl->errorCode,
'msg' => $curl->errorMessage,
]
);
throw new \Exception($curl->errorMessage, $curl->errorCode);
}
return $curl->rawResponse;
}
/**
* 获取账号
* @return mixed
*/
public static function fetchAccounts()
{
return Cache::remember('ac:list', 600, static function () {
return Account::query()
->select(['id', 'remark'])
->where('status', 1)->get();
});
}
}
| {
"pile_set_name": "Github"
} |
#ifndef PQCLEAN_SPHINCSHARAKA192FROBUST_CLEAN_PARAMS_H
#define PQCLEAN_SPHINCSHARAKA192FROBUST_CLEAN_PARAMS_H
/* Hash output length in bytes. */
#define PQCLEAN_SPHINCSHARAKA192FROBUST_CLEAN_N 24
/* Height of the hypertree. */
#define PQCLEAN_SPHINCSHARAKA192FROBUST_CLEAN_FULL_HEIGHT 66
/* Number of subtree layer. */
#define PQCLEAN_SPHINCSHARAKA192FROBUST_CLEAN_D 22
/* FORS tree dimensions. */
#define PQCLEAN_SPHINCSHARAKA192FROBUST_CLEAN_FORS_HEIGHT 8
#define PQCLEAN_SPHINCSHARAKA192FROBUST_CLEAN_FORS_TREES 33
/* Winternitz parameter, */
#define PQCLEAN_SPHINCSHARAKA192FROBUST_CLEAN_WOTS_W 16
/* The hash function is defined by linking a different hash.c file, as opposed
to setting a #define constant. */
/* For clarity */
#define PQCLEAN_SPHINCSHARAKA192FROBUST_CLEAN_ADDR_BYTES 32
/* WOTS parameters. */
#define PQCLEAN_SPHINCSHARAKA192FROBUST_CLEAN_WOTS_LOGW 4
#define PQCLEAN_SPHINCSHARAKA192FROBUST_CLEAN_WOTS_LEN1 (8 * PQCLEAN_SPHINCSHARAKA192FROBUST_CLEAN_N / PQCLEAN_SPHINCSHARAKA192FROBUST_CLEAN_WOTS_LOGW)
/* PQCLEAN_SPHINCSHARAKA192FROBUST_CLEAN_WOTS_LEN2 is floor(log(len_1 * (w - 1)) / log(w)) + 1; we precompute */
#define PQCLEAN_SPHINCSHARAKA192FROBUST_CLEAN_WOTS_LEN2 3
#define PQCLEAN_SPHINCSHARAKA192FROBUST_CLEAN_WOTS_LEN (PQCLEAN_SPHINCSHARAKA192FROBUST_CLEAN_WOTS_LEN1 + PQCLEAN_SPHINCSHARAKA192FROBUST_CLEAN_WOTS_LEN2)
#define PQCLEAN_SPHINCSHARAKA192FROBUST_CLEAN_WOTS_BYTES (PQCLEAN_SPHINCSHARAKA192FROBUST_CLEAN_WOTS_LEN * PQCLEAN_SPHINCSHARAKA192FROBUST_CLEAN_N)
#define PQCLEAN_SPHINCSHARAKA192FROBUST_CLEAN_WOTS_PK_BYTES PQCLEAN_SPHINCSHARAKA192FROBUST_CLEAN_WOTS_BYTES
/* Subtree size. */
#define PQCLEAN_SPHINCSHARAKA192FROBUST_CLEAN_TREE_HEIGHT (PQCLEAN_SPHINCSHARAKA192FROBUST_CLEAN_FULL_HEIGHT / PQCLEAN_SPHINCSHARAKA192FROBUST_CLEAN_D)
/* FORS parameters. */
#define PQCLEAN_SPHINCSHARAKA192FROBUST_CLEAN_FORS_MSG_BYTES ((PQCLEAN_SPHINCSHARAKA192FROBUST_CLEAN_FORS_HEIGHT * PQCLEAN_SPHINCSHARAKA192FROBUST_CLEAN_FORS_TREES + 7) / 8)
#define PQCLEAN_SPHINCSHARAKA192FROBUST_CLEAN_FORS_BYTES ((PQCLEAN_SPHINCSHARAKA192FROBUST_CLEAN_FORS_HEIGHT + 1) * PQCLEAN_SPHINCSHARAKA192FROBUST_CLEAN_FORS_TREES * PQCLEAN_SPHINCSHARAKA192FROBUST_CLEAN_N)
#define PQCLEAN_SPHINCSHARAKA192FROBUST_CLEAN_FORS_PK_BYTES PQCLEAN_SPHINCSHARAKA192FROBUST_CLEAN_N
/* Resulting SPX sizes. */
#define PQCLEAN_SPHINCSHARAKA192FROBUST_CLEAN_BYTES (PQCLEAN_SPHINCSHARAKA192FROBUST_CLEAN_N + PQCLEAN_SPHINCSHARAKA192FROBUST_CLEAN_FORS_BYTES + PQCLEAN_SPHINCSHARAKA192FROBUST_CLEAN_D * PQCLEAN_SPHINCSHARAKA192FROBUST_CLEAN_WOTS_BYTES +\
PQCLEAN_SPHINCSHARAKA192FROBUST_CLEAN_FULL_HEIGHT * PQCLEAN_SPHINCSHARAKA192FROBUST_CLEAN_N)
#define PQCLEAN_SPHINCSHARAKA192FROBUST_CLEAN_PK_BYTES (2 * PQCLEAN_SPHINCSHARAKA192FROBUST_CLEAN_N)
#define PQCLEAN_SPHINCSHARAKA192FROBUST_CLEAN_SK_BYTES (2 * PQCLEAN_SPHINCSHARAKA192FROBUST_CLEAN_N + PQCLEAN_SPHINCSHARAKA192FROBUST_CLEAN_PK_BYTES)
/* Optionally, signing can be made non-deterministic using optrand.
This can help counter side-channel attacks that would benefit from
getting a large number of traces when the signer uses the same nodes. */
#define PQCLEAN_SPHINCSHARAKA192FROBUST_CLEAN_OPTRAND_BYTES 32
#endif
| {
"pile_set_name": "Github"
} |
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* Copyright (C) Jonathan Naylor G4KLX ([email protected])
*/
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/timer.h>
#include <linux/string.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <net/ax25.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <asm/uaccess.h>
#include <linux/fcntl.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
/*
* The default broadcast address of an interface is QST-0; the default address
* is LINUX-1. The null address is defined as a callsign of all spaces with
* an SSID of zero.
*/
const ax25_address ax25_bcast =
{{'Q' << 1, 'S' << 1, 'T' << 1, ' ' << 1, ' ' << 1, ' ' << 1, 0 << 1}};
const ax25_address ax25_defaddr =
{{'L' << 1, 'I' << 1, 'N' << 1, 'U' << 1, 'X' << 1, ' ' << 1, 1 << 1}};
const ax25_address null_ax25_address =
{{' ' << 1, ' ' << 1, ' ' << 1, ' ' << 1, ' ' << 1, ' ' << 1, 0 << 1}};
EXPORT_SYMBOL_GPL(ax25_bcast);
EXPORT_SYMBOL_GPL(ax25_defaddr);
EXPORT_SYMBOL(null_ax25_address);
/*
* ax25 -> ascii conversion
*/
char *ax2asc(char *buf, const ax25_address *a)
{
char c, *s;
int n;
for (n = 0, s = buf; n < 6; n++) {
c = (a->ax25_call[n] >> 1) & 0x7F;
if (c != ' ') *s++ = c;
}
*s++ = '-';
if ((n = ((a->ax25_call[6] >> 1) & 0x0F)) > 9) {
*s++ = '1';
n -= 10;
}
*s++ = n + '0';
*s++ = '\0';
if (*buf == '\0' || *buf == '-')
return "*";
return buf;
}
EXPORT_SYMBOL(ax2asc);
/*
* ascii -> ax25 conversion
*/
void asc2ax(ax25_address *addr, const char *callsign)
{
const char *s;
int n;
for (s = callsign, n = 0; n < 6; n++) {
if (*s != '\0' && *s != '-')
addr->ax25_call[n] = *s++;
else
addr->ax25_call[n] = ' ';
addr->ax25_call[n] <<= 1;
addr->ax25_call[n] &= 0xFE;
}
if (*s++ == '\0') {
addr->ax25_call[6] = 0x00;
return;
}
addr->ax25_call[6] = *s++ - '0';
if (*s != '\0') {
addr->ax25_call[6] *= 10;
addr->ax25_call[6] += *s++ - '0';
}
addr->ax25_call[6] <<= 1;
addr->ax25_call[6] &= 0x1E;
}
EXPORT_SYMBOL(asc2ax);
/*
* Compare two ax.25 addresses
*/
int ax25cmp(const ax25_address *a, const ax25_address *b)
{
int ct = 0;
while (ct < 6) {
if ((a->ax25_call[ct] & 0xFE) != (b->ax25_call[ct] & 0xFE)) /* Clean off repeater bits */
return 1;
ct++;
}
if ((a->ax25_call[ct] & 0x1E) == (b->ax25_call[ct] & 0x1E)) /* SSID without control bit */
return 0;
return 2; /* Partial match */
}
EXPORT_SYMBOL(ax25cmp);
/*
* Compare two AX.25 digipeater paths.
*/
int ax25digicmp(const ax25_digi *digi1, const ax25_digi *digi2)
{
int i;
if (digi1->ndigi != digi2->ndigi)
return 1;
if (digi1->lastrepeat != digi2->lastrepeat)
return 1;
for (i = 0; i < digi1->ndigi; i++)
if (ax25cmp(&digi1->calls[i], &digi2->calls[i]) != 0)
return 1;
return 0;
}
/*
* Given an AX.25 address pull of to, from, digi list, command/response and the start of data
*
*/
const unsigned char *ax25_addr_parse(const unsigned char *buf, int len,
ax25_address *src, ax25_address *dest, ax25_digi *digi, int *flags,
int *dama)
{
int d = 0;
if (len < 14) return NULL;
if (flags != NULL) {
*flags = 0;
if (buf[6] & AX25_CBIT)
*flags = AX25_COMMAND;
if (buf[13] & AX25_CBIT)
*flags = AX25_RESPONSE;
}
if (dama != NULL)
*dama = ~buf[13] & AX25_DAMA_FLAG;
/* Copy to, from */
if (dest != NULL)
memcpy(dest, buf + 0, AX25_ADDR_LEN);
if (src != NULL)
memcpy(src, buf + 7, AX25_ADDR_LEN);
buf += 2 * AX25_ADDR_LEN;
len -= 2 * AX25_ADDR_LEN;
digi->lastrepeat = -1;
digi->ndigi = 0;
while (!(buf[-1] & AX25_EBIT)) {
if (d >= AX25_MAX_DIGIS)
return NULL;
if (len < AX25_ADDR_LEN)
return NULL;
memcpy(&digi->calls[d], buf, AX25_ADDR_LEN);
digi->ndigi = d + 1;
if (buf[6] & AX25_HBIT) {
digi->repeated[d] = 1;
digi->lastrepeat = d;
} else {
digi->repeated[d] = 0;
}
buf += AX25_ADDR_LEN;
len -= AX25_ADDR_LEN;
d++;
}
return buf;
}
/*
* Assemble an AX.25 header from the bits
*/
int ax25_addr_build(unsigned char *buf, const ax25_address *src,
const ax25_address *dest, const ax25_digi *d, int flag, int modulus)
{
int len = 0;
int ct = 0;
memcpy(buf, dest, AX25_ADDR_LEN);
buf[6] &= ~(AX25_EBIT | AX25_CBIT);
buf[6] |= AX25_SSSID_SPARE;
if (flag == AX25_COMMAND) buf[6] |= AX25_CBIT;
buf += AX25_ADDR_LEN;
len += AX25_ADDR_LEN;
memcpy(buf, src, AX25_ADDR_LEN);
buf[6] &= ~(AX25_EBIT | AX25_CBIT);
buf[6] &= ~AX25_SSSID_SPARE;
if (modulus == AX25_MODULUS)
buf[6] |= AX25_SSSID_SPARE;
else
buf[6] |= AX25_ESSID_SPARE;
if (flag == AX25_RESPONSE) buf[6] |= AX25_CBIT;
/*
* Fast path the normal digiless path
*/
if (d == NULL || d->ndigi == 0) {
buf[6] |= AX25_EBIT;
return 2 * AX25_ADDR_LEN;
}
buf += AX25_ADDR_LEN;
len += AX25_ADDR_LEN;
while (ct < d->ndigi) {
memcpy(buf, &d->calls[ct], AX25_ADDR_LEN);
if (d->repeated[ct])
buf[6] |= AX25_HBIT;
else
buf[6] &= ~AX25_HBIT;
buf[6] &= ~AX25_EBIT;
buf[6] |= AX25_SSSID_SPARE;
buf += AX25_ADDR_LEN;
len += AX25_ADDR_LEN;
ct++;
}
buf[-1] |= AX25_EBIT;
return len;
}
int ax25_addr_size(const ax25_digi *dp)
{
if (dp == NULL)
return 2 * AX25_ADDR_LEN;
return AX25_ADDR_LEN * (2 + dp->ndigi);
}
/*
* Reverse Digipeat List. May not pass both parameters as same struct
*/
void ax25_digi_invert(const ax25_digi *in, ax25_digi *out)
{
int ct;
out->ndigi = in->ndigi;
out->lastrepeat = in->ndigi - in->lastrepeat - 2;
/* Invert the digipeaters */
for (ct = 0; ct < in->ndigi; ct++) {
out->calls[ct] = in->calls[in->ndigi - ct - 1];
if (ct <= out->lastrepeat) {
out->calls[ct].ax25_call[6] |= AX25_HBIT;
out->repeated[ct] = 1;
} else {
out->calls[ct].ax25_call[6] &= ~AX25_HBIT;
out->repeated[ct] = 0;
}
}
}
| {
"pile_set_name": "Github"
} |
// Copyright (c) 2003-present, Jodd Team (http://jodd.org)
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
package jodd.servlet.filter;
import javax.servlet.ReadListener;
import javax.servlet.ServletInputStream;
import java.io.ByteArrayInputStream;
/**
* Servlet input stream that is backed by an byte array.
*/
public class ByteArrayServletInputStream extends ServletInputStream {
private final ByteArrayInputStream inputStream;
public ByteArrayServletInputStream(final byte[] body) {
this.inputStream = new ByteArrayInputStream(body);
}
public ByteArrayServletInputStream(final ByteArrayInputStream baos) {
this.inputStream = baos;
}
@Override
public int read() {
return inputStream.read();
}
@Override
public boolean isFinished() {
return inputStream.available() == 0;
}
@Override
public boolean isReady() {
return true;
}
@Override
public void setReadListener(final ReadListener readListener) {
throw new RuntimeException("Not implemented");
}
}
| {
"pile_set_name": "Github"
} |
/*
* Copyright 2011 Stephan Aßmus <[email protected]>
* All rights reserved. Distributed under the terms of the MIT license.
*/
#ifndef TOOL_BAR_ICONS_H
#define TOOL_BAR_ICONS_H
#include <SupportDefs.h>
class BBitmap;
enum {
kIconDocumentOpen = 0,
kIconDocumentSaveAs,
kIconDocumentSave,
kIconDrawRectangularSelection,
kIconEditCopy,
kIconEditCut,
kIconEditDelete,
kIconEditTrash,
kIconMediaMovieLibrary,
kIconMediaPlaybackStartEnabled,
kIconGoDown,
kIconGoNext,
kIconGoPrevious,
kIconGoUp,
kIconViewFullScreen,
kIconViewWindowed,
kIconZoomFitBest,
kIconZoomFitViewBest,
kIconZoomIn,
kIconZoomOriginal,
kIconZoomOut,
kIconPagePrevious,
kIconPageNext
};
status_t init_tool_bar_icons();
void uninit_tool_bar_icons();
const BBitmap* tool_bar_icon(uint32 which);
#endif // TOOL_BAR_ICONS_H
| {
"pile_set_name": "Github"
} |
<span class="hljs-function"><span class="hljs-keyword">function</span> <span class="hljs-title">visibleTodoFilter</span>(<span class="hljs-params">state = 'watch', action</span>) </span>{
<span class="hljs-keyword">switch</span> (action.type) {
<span class="hljs-keyword">case</span> <span class="hljs-string">'CHANGE_VISIBLE_FILTER'</span>:
<span class="hljs-keyword">return</span> action.filter;
<span class="hljs-keyword">default</span>:
<span class="hljs-keyword">return</span> state;
}
}
<span class="hljs-function"><span class="hljs-keyword">function</span> <span class="hljs-title">todos</span>(<span class="hljs-params">state, action</span>) </span>{
<span class="hljs-keyword">switch</span> (action.type) {
<span class="hljs-keyword">case</span> <span class="hljs-string">'ADD_TODO'</span>:
<span class="hljs-keyword">return</span> [...state, {
text: action.text,
completed: <span class="hljs-literal">false</span>
}];
<span class="hljs-keyword">case</span> <span class="hljs-string">'COMPLETE_TODO'</span>:
<span class="hljs-keyword">return</span> [
...state.slice(<span class="hljs-number">0</span>, action.index),
<span class="hljs-built_in">Object</span>.assign({}, state[action.index], {
completed: <span class="hljs-literal">true</span>
}),
...state.slice(action.index + <span class="hljs-number">1</span>)
]
<span class="hljs-keyword">default</span>:
<span class="hljs-keyword">return</span> state;
}
}
<span class="hljs-keyword">import</span> { combineReducers, createStore } <span class="hljs-keyword">from</span> <span class="hljs-string">'redux'</span>;
<span class="hljs-keyword">let</span> reducer = combineReducers({ visibleTodoFilter, todos });
<span class="hljs-keyword">let</span> store = createStore(reducer);
| {
"pile_set_name": "Github"
} |
false
for v; do
exit 2
done
echo Zero:$?
| {
"pile_set_name": "Github"
} |
import './go-wasm-runtime.js';
if (!WebAssembly.instantiateStreaming) { // polyfill
WebAssembly.instantiateStreaming = async (resp, importObject) => {
const source = await (await resp).arrayBuffer();
return await WebAssembly.instantiate(source, importObject);
};
}
const go = new Go();
WebAssembly.instantiateStreaming(fetch("hello.wasm"), go.importObject).then((result) => {
return WebAssembly.instantiate(result.module, go.importObject);
}).then(instance => go.run(instance)); | {
"pile_set_name": "Github"
} |
/*
* Virtual Raw MIDI client on Sequencer
*
* Copyright (c) 2000 by Takashi Iwai <[email protected]>,
* Jaroslav Kysela <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
/*
* Virtual Raw MIDI client
*
* The virtual rawmidi client is a sequencer client which associate
* a rawmidi device file. The created rawmidi device file can be
* accessed as a normal raw midi, but its MIDI source and destination
* are arbitrary. For example, a user-client software synth connected
* to this port can be used as a normal midi device as well.
*
* The virtual rawmidi device accepts also multiple opens. Each file
* has its own input buffer, so that no conflict would occur. The drain
* of input/output buffer acts only to the local buffer.
*
*/
#include <sound/driver.h>
#include <linux/init.h>
#include <linux/wait.h>
#include <linux/slab.h>
#include <sound/core.h>
#include <sound/rawmidi.h>
#include <sound/info.h>
#include <sound/control.h>
#include <sound/minors.h>
#include <sound/seq_kernel.h>
#include <sound/seq_midi_event.h>
#include <sound/seq_virmidi.h>
MODULE_AUTHOR("Takashi Iwai <[email protected]>");
MODULE_DESCRIPTION("Virtual Raw MIDI client on Sequencer");
MODULE_LICENSE("GPL");
/*
* initialize an event record
*/
static void snd_virmidi_init_event(struct snd_virmidi *vmidi,
struct snd_seq_event *ev)
{
memset(ev, 0, sizeof(*ev));
ev->source.port = vmidi->port;
switch (vmidi->seq_mode) {
case SNDRV_VIRMIDI_SEQ_DISPATCH:
ev->dest.client = SNDRV_SEQ_ADDRESS_SUBSCRIBERS;
break;
case SNDRV_VIRMIDI_SEQ_ATTACH:
/* FIXME: source and destination are same - not good.. */
ev->dest.client = vmidi->client;
ev->dest.port = vmidi->port;
break;
}
ev->type = SNDRV_SEQ_EVENT_NONE;
}
/*
* decode input event and put to read buffer of each opened file
*/
static int snd_virmidi_dev_receive_event(struct snd_virmidi_dev *rdev,
struct snd_seq_event *ev)
{
struct snd_virmidi *vmidi;
unsigned char msg[4];
int len;
read_lock(&rdev->filelist_lock);
list_for_each_entry(vmidi, &rdev->filelist, list) {
if (!vmidi->trigger)
continue;
if (ev->type == SNDRV_SEQ_EVENT_SYSEX) {
if ((ev->flags & SNDRV_SEQ_EVENT_LENGTH_MASK) != SNDRV_SEQ_EVENT_LENGTH_VARIABLE)
continue;
snd_seq_dump_var_event(ev, (snd_seq_dump_func_t)snd_rawmidi_receive, vmidi->substream);
} else {
len = snd_midi_event_decode(vmidi->parser, msg, sizeof(msg), ev);
if (len > 0)
snd_rawmidi_receive(vmidi->substream, msg, len);
}
}
read_unlock(&rdev->filelist_lock);
return 0;
}
/*
* receive an event from the remote virmidi port
*
* for rawmidi inputs, you can call this function from the event
* handler of a remote port which is attached to the virmidi via
* SNDRV_VIRMIDI_SEQ_ATTACH.
*/
#if 0
int snd_virmidi_receive(struct snd_rawmidi *rmidi, struct snd_seq_event *ev)
{
struct snd_virmidi_dev *rdev;
rdev = rmidi->private_data;
return snd_virmidi_dev_receive_event(rdev, ev);
}
#endif /* 0 */
/*
* event handler of virmidi port
*/
static int snd_virmidi_event_input(struct snd_seq_event *ev, int direct,
void *private_data, int atomic, int hop)
{
struct snd_virmidi_dev *rdev;
rdev = private_data;
if (!(rdev->flags & SNDRV_VIRMIDI_USE))
return 0; /* ignored */
return snd_virmidi_dev_receive_event(rdev, ev);
}
/*
* trigger rawmidi stream for input
*/
static void snd_virmidi_input_trigger(struct snd_rawmidi_substream *substream, int up)
{
struct snd_virmidi *vmidi = substream->runtime->private_data;
if (up) {
vmidi->trigger = 1;
} else {
vmidi->trigger = 0;
}
}
/*
* trigger rawmidi stream for output
*/
static void snd_virmidi_output_trigger(struct snd_rawmidi_substream *substream, int up)
{
struct snd_virmidi *vmidi = substream->runtime->private_data;
int count, res;
unsigned char buf[32], *pbuf;
if (up) {
vmidi->trigger = 1;
if (vmidi->seq_mode == SNDRV_VIRMIDI_SEQ_DISPATCH &&
!(vmidi->rdev->flags & SNDRV_VIRMIDI_SUBSCRIBE)) {
snd_rawmidi_transmit_ack(substream, substream->runtime->buffer_size - substream->runtime->avail);
return; /* ignored */
}
if (vmidi->event.type != SNDRV_SEQ_EVENT_NONE) {
if (snd_seq_kernel_client_dispatch(vmidi->client, &vmidi->event, in_atomic(), 0) < 0)
return;
vmidi->event.type = SNDRV_SEQ_EVENT_NONE;
}
while (1) {
count = snd_rawmidi_transmit_peek(substream, buf, sizeof(buf));
if (count <= 0)
break;
pbuf = buf;
while (count > 0) {
res = snd_midi_event_encode(vmidi->parser, pbuf, count, &vmidi->event);
if (res < 0) {
snd_midi_event_reset_encode(vmidi->parser);
continue;
}
snd_rawmidi_transmit_ack(substream, res);
pbuf += res;
count -= res;
if (vmidi->event.type != SNDRV_SEQ_EVENT_NONE) {
if (snd_seq_kernel_client_dispatch(vmidi->client, &vmidi->event, in_atomic(), 0) < 0)
return;
vmidi->event.type = SNDRV_SEQ_EVENT_NONE;
}
}
}
} else {
vmidi->trigger = 0;
}
}
/*
* open rawmidi handle for input
*/
static int snd_virmidi_input_open(struct snd_rawmidi_substream *substream)
{
struct snd_virmidi_dev *rdev = substream->rmidi->private_data;
struct snd_rawmidi_runtime *runtime = substream->runtime;
struct snd_virmidi *vmidi;
unsigned long flags;
vmidi = kzalloc(sizeof(*vmidi), GFP_KERNEL);
if (vmidi == NULL)
return -ENOMEM;
vmidi->substream = substream;
if (snd_midi_event_new(0, &vmidi->parser) < 0) {
kfree(vmidi);
return -ENOMEM;
}
vmidi->seq_mode = rdev->seq_mode;
vmidi->client = rdev->client;
vmidi->port = rdev->port;
runtime->private_data = vmidi;
write_lock_irqsave(&rdev->filelist_lock, flags);
list_add_tail(&vmidi->list, &rdev->filelist);
write_unlock_irqrestore(&rdev->filelist_lock, flags);
vmidi->rdev = rdev;
return 0;
}
/*
* open rawmidi handle for output
*/
static int snd_virmidi_output_open(struct snd_rawmidi_substream *substream)
{
struct snd_virmidi_dev *rdev = substream->rmidi->private_data;
struct snd_rawmidi_runtime *runtime = substream->runtime;
struct snd_virmidi *vmidi;
vmidi = kzalloc(sizeof(*vmidi), GFP_KERNEL);
if (vmidi == NULL)
return -ENOMEM;
vmidi->substream = substream;
if (snd_midi_event_new(MAX_MIDI_EVENT_BUF, &vmidi->parser) < 0) {
kfree(vmidi);
return -ENOMEM;
}
vmidi->seq_mode = rdev->seq_mode;
vmidi->client = rdev->client;
vmidi->port = rdev->port;
snd_virmidi_init_event(vmidi, &vmidi->event);
vmidi->rdev = rdev;
runtime->private_data = vmidi;
return 0;
}
/*
* close rawmidi handle for input
*/
static int snd_virmidi_input_close(struct snd_rawmidi_substream *substream)
{
struct snd_virmidi *vmidi = substream->runtime->private_data;
snd_midi_event_free(vmidi->parser);
list_del(&vmidi->list);
substream->runtime->private_data = NULL;
kfree(vmidi);
return 0;
}
/*
* close rawmidi handle for output
*/
static int snd_virmidi_output_close(struct snd_rawmidi_substream *substream)
{
struct snd_virmidi *vmidi = substream->runtime->private_data;
snd_midi_event_free(vmidi->parser);
substream->runtime->private_data = NULL;
kfree(vmidi);
return 0;
}
/*
* subscribe callback - allow output to rawmidi device
*/
static int snd_virmidi_subscribe(void *private_data,
struct snd_seq_port_subscribe *info)
{
struct snd_virmidi_dev *rdev;
rdev = private_data;
if (!try_module_get(rdev->card->module))
return -EFAULT;
rdev->flags |= SNDRV_VIRMIDI_SUBSCRIBE;
return 0;
}
/*
* unsubscribe callback - disallow output to rawmidi device
*/
static int snd_virmidi_unsubscribe(void *private_data,
struct snd_seq_port_subscribe *info)
{
struct snd_virmidi_dev *rdev;
rdev = private_data;
rdev->flags &= ~SNDRV_VIRMIDI_SUBSCRIBE;
module_put(rdev->card->module);
return 0;
}
/*
* use callback - allow input to rawmidi device
*/
static int snd_virmidi_use(void *private_data,
struct snd_seq_port_subscribe *info)
{
struct snd_virmidi_dev *rdev;
rdev = private_data;
if (!try_module_get(rdev->card->module))
return -EFAULT;
rdev->flags |= SNDRV_VIRMIDI_USE;
return 0;
}
/*
* unuse callback - disallow input to rawmidi device
*/
static int snd_virmidi_unuse(void *private_data,
struct snd_seq_port_subscribe *info)
{
struct snd_virmidi_dev *rdev;
rdev = private_data;
rdev->flags &= ~SNDRV_VIRMIDI_USE;
module_put(rdev->card->module);
return 0;
}
/*
* Register functions
*/
static struct snd_rawmidi_ops snd_virmidi_input_ops = {
.open = snd_virmidi_input_open,
.close = snd_virmidi_input_close,
.trigger = snd_virmidi_input_trigger,
};
static struct snd_rawmidi_ops snd_virmidi_output_ops = {
.open = snd_virmidi_output_open,
.close = snd_virmidi_output_close,
.trigger = snd_virmidi_output_trigger,
};
/*
* create a sequencer client and a port
*/
static int snd_virmidi_dev_attach_seq(struct snd_virmidi_dev *rdev)
{
int client;
struct snd_seq_port_callback pcallbacks;
struct snd_seq_port_info *pinfo;
int err;
if (rdev->client >= 0)
return 0;
pinfo = kmalloc(sizeof(*pinfo), GFP_KERNEL);
if (!pinfo) {
err = -ENOMEM;
goto __error;
}
client = snd_seq_create_kernel_client(rdev->card, rdev->device,
"%s %d-%d", rdev->rmidi->name,
rdev->card->number,
rdev->device);
if (client < 0) {
err = client;
goto __error;
}
rdev->client = client;
/* create a port */
memset(pinfo, 0, sizeof(*pinfo));
pinfo->addr.client = client;
sprintf(pinfo->name, "VirMIDI %d-%d", rdev->card->number, rdev->device);
/* set all capabilities */
pinfo->capability |= SNDRV_SEQ_PORT_CAP_WRITE | SNDRV_SEQ_PORT_CAP_SYNC_WRITE | SNDRV_SEQ_PORT_CAP_SUBS_WRITE;
pinfo->capability |= SNDRV_SEQ_PORT_CAP_READ | SNDRV_SEQ_PORT_CAP_SYNC_READ | SNDRV_SEQ_PORT_CAP_SUBS_READ;
pinfo->capability |= SNDRV_SEQ_PORT_CAP_DUPLEX;
pinfo->type = SNDRV_SEQ_PORT_TYPE_MIDI_GENERIC
| SNDRV_SEQ_PORT_TYPE_SOFTWARE
| SNDRV_SEQ_PORT_TYPE_PORT;
pinfo->midi_channels = 16;
memset(&pcallbacks, 0, sizeof(pcallbacks));
pcallbacks.owner = THIS_MODULE;
pcallbacks.private_data = rdev;
pcallbacks.subscribe = snd_virmidi_subscribe;
pcallbacks.unsubscribe = snd_virmidi_unsubscribe;
pcallbacks.use = snd_virmidi_use;
pcallbacks.unuse = snd_virmidi_unuse;
pcallbacks.event_input = snd_virmidi_event_input;
pinfo->kernel = &pcallbacks;
err = snd_seq_kernel_client_ctl(client, SNDRV_SEQ_IOCTL_CREATE_PORT, pinfo);
if (err < 0) {
snd_seq_delete_kernel_client(client);
rdev->client = -1;
goto __error;
}
rdev->port = pinfo->addr.port;
err = 0; /* success */
__error:
kfree(pinfo);
return err;
}
/*
* release the sequencer client
*/
static void snd_virmidi_dev_detach_seq(struct snd_virmidi_dev *rdev)
{
if (rdev->client >= 0) {
snd_seq_delete_kernel_client(rdev->client);
rdev->client = -1;
}
}
/*
* register the device
*/
static int snd_virmidi_dev_register(struct snd_rawmidi *rmidi)
{
struct snd_virmidi_dev *rdev = rmidi->private_data;
int err;
switch (rdev->seq_mode) {
case SNDRV_VIRMIDI_SEQ_DISPATCH:
err = snd_virmidi_dev_attach_seq(rdev);
if (err < 0)
return err;
break;
case SNDRV_VIRMIDI_SEQ_ATTACH:
if (rdev->client == 0)
return -EINVAL;
/* should check presence of port more strictly.. */
break;
default:
snd_printk(KERN_ERR "seq_mode is not set: %d\n", rdev->seq_mode);
return -EINVAL;
}
return 0;
}
/*
* unregister the device
*/
static int snd_virmidi_dev_unregister(struct snd_rawmidi *rmidi)
{
struct snd_virmidi_dev *rdev = rmidi->private_data;
if (rdev->seq_mode == SNDRV_VIRMIDI_SEQ_DISPATCH)
snd_virmidi_dev_detach_seq(rdev);
return 0;
}
/*
*
*/
static struct snd_rawmidi_global_ops snd_virmidi_global_ops = {
.dev_register = snd_virmidi_dev_register,
.dev_unregister = snd_virmidi_dev_unregister,
};
/*
* free device
*/
static void snd_virmidi_free(struct snd_rawmidi *rmidi)
{
struct snd_virmidi_dev *rdev = rmidi->private_data;
kfree(rdev);
}
/*
* create a new device
*
*/
/* exported */
int snd_virmidi_new(struct snd_card *card, int device, struct snd_rawmidi **rrmidi)
{
struct snd_rawmidi *rmidi;
struct snd_virmidi_dev *rdev;
int err;
*rrmidi = NULL;
if ((err = snd_rawmidi_new(card, "VirMidi", device,
16, /* may be configurable */
16, /* may be configurable */
&rmidi)) < 0)
return err;
strcpy(rmidi->name, rmidi->id);
rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
if (rdev == NULL) {
snd_device_free(card, rmidi);
return -ENOMEM;
}
rdev->card = card;
rdev->rmidi = rmidi;
rdev->device = device;
rdev->client = -1;
rwlock_init(&rdev->filelist_lock);
INIT_LIST_HEAD(&rdev->filelist);
rdev->seq_mode = SNDRV_VIRMIDI_SEQ_DISPATCH;
rmidi->private_data = rdev;
rmidi->private_free = snd_virmidi_free;
rmidi->ops = &snd_virmidi_global_ops;
snd_rawmidi_set_ops(rmidi, SNDRV_RAWMIDI_STREAM_INPUT, &snd_virmidi_input_ops);
snd_rawmidi_set_ops(rmidi, SNDRV_RAWMIDI_STREAM_OUTPUT, &snd_virmidi_output_ops);
rmidi->info_flags = SNDRV_RAWMIDI_INFO_INPUT |
SNDRV_RAWMIDI_INFO_OUTPUT |
SNDRV_RAWMIDI_INFO_DUPLEX;
*rrmidi = rmidi;
return 0;
}
/*
* ENTRY functions
*/
static int __init alsa_virmidi_init(void)
{
return 0;
}
static void __exit alsa_virmidi_exit(void)
{
}
module_init(alsa_virmidi_init)
module_exit(alsa_virmidi_exit)
EXPORT_SYMBOL(snd_virmidi_new);
| {
"pile_set_name": "Github"
} |
<?php declare(strict_types=1);
namespace Shopware\Core\Framework\App\Lifecycle\Registration;
use GuzzleHttp\Psr7\Request;
use GuzzleHttp\Psr7\Uri;
use Psr\Http\Message\RequestInterface;
/**
* @internal
*/
class PrivateHandshake implements AppHandshakeInterface
{
/**
* @var string
*/
private $shopUrl;
/**
* @var string
*/
private $secret;
/**
* @var string
*/
private $appEndpoint;
/**
* @var string
*/
private $appName;
/**
* @var string
*/
private $shopId;
public function __construct(string $shopUrl, string $secret, string $appEndpoint, string $appName, string $shopId)
{
$this->shopUrl = $shopUrl;
$this->secret = $secret;
$this->appEndpoint = $appEndpoint;
$this->appName = $appName;
$this->shopId = $shopId;
}
public function assembleRequest(): RequestInterface
{
$date = new \DateTime();
$uri = new Uri($this->appEndpoint);
$uri = Uri::withQueryValues($uri, [
'shop-id' => $this->shopId,
'shop-url' => $this->shopUrl,
'timestamp' => $date->getTimestamp(),
]);
$signature = hash_hmac('sha256', $uri->getQuery(), $this->secret);
return new Request(
'GET',
$uri,
[
'shopware-app-signature' => $signature,
]
);
}
public function fetchAppProof(): string
{
return hash_hmac('sha256', $this->shopId . $this->shopUrl . $this->appName, $this->secret);
}
}
| {
"pile_set_name": "Github"
} |
/**
* @file v16i8.h
*
* @brief struct and _Generic based vector class implementation
*/
#ifndef _V16I8_H_INCLUDED
#define _V16I8_H_INCLUDED
/* include header for intel / amd sse2 instruction sets */
#include <x86intrin.h>
/* 8bit 32cell */
typedef struct v16i8_s {
__m128i v1;
} v16i8_t;
/* expanders (without argument) */
#define _e_x_v16i8_1(u)
#define _e_x_v16i8_2(u)
/* expanders (without immediate) */
#define _e_v_v16i8_1(a) (a).v1
#define _e_v_v16i8_2(a) (a).v1
#define _e_vv_v16i8_1(a, b) (a).v1, (b).v1
#define _e_vv_v16i8_2(a, b) (a).v1, (b).v1
#define _e_vvv_v16i8_1(a, b, c) (a).v1, (b).v1, (c).v1
#define _e_vvv_v16i8_2(a, b, c) (a).v1, (b).v1, (c).v1
/* expanders with immediate */
#define _e_i_v16i8_1(imm) (imm)
#define _e_i_v16i8_2(imm) (imm)
#define _e_vi_v16i8_1(a, imm) (a).v1, (imm)
#define _e_vi_v16i8_2(a, imm) (a).v1, (imm)
#define _e_vvi_v16i8_1(a, b, imm) (a).v1, (b).v1, (imm)
#define _e_vvi_v16i8_2(a, b, imm) (a).v1, (b).v1, (imm)
/* address calculation macros */
#define _addr_v16i8_1(imm) ( (__m128i *)(imm) )
#define _addr_v16i8_2(imm) ( (__m128i *)(imm) )
#define _pv_v16i8(ptr) ( _addr_v16i8_1(ptr) )
/* expanders with pointers */
#define _e_p_v16i8_1(ptr) _addr_v16i8_1(ptr)
#define _e_p_v16i8_2(ptr) _addr_v16i8_2(ptr)
#define _e_pv_v16i8_1(ptr, a) _addr_v16i8_1(ptr), (a).v1
#define _e_pv_v16i8_2(ptr, a) _addr_v16i8_2(ptr), (a).v1
/* expand intrinsic name */
#define _i_v16i8(intrin) _mm_##intrin##_epi8
#define _i_v16u8(intrin) _mm_##intrin##_epu8
#define _i_v16i8x(intrin) _mm_##intrin##_si128
/* apply */
#define _a_v16i8(intrin, expander, ...) ( \
(v16i8_t) { \
_i_v16i8(intrin)(expander##_v16i8_1(__VA_ARGS__)) \
} \
)
#define _a_v16u8(intrin, expander, ...) ( \
(v16i8_t) { \
_i_v16u8(intrin)(expander##_v16i8_1(__VA_ARGS__)) \
} \
)
#define _a_v16i8x(intrin, expander, ...) ( \
(v16i8_t) { \
_i_v16i8x(intrin)(expander##_v16i8_1(__VA_ARGS__)) \
} \
)
#define _a_v16i8xv(intrin, expander, ...) { \
_i_v16i8x(intrin)(expander##_v16i8_1(__VA_ARGS__)); \
}
/* load and store */
#define _load_v16i8(...) _a_v16i8x(load, _e_p, __VA_ARGS__)
#define _loadu_v16i8(...) _a_v16i8x(loadu, _e_p, __VA_ARGS__)
#define _store_v16i8(...) _a_v16i8xv(store, _e_pv, __VA_ARGS__)
#define _storeu_v16i8(...) _a_v16i8xv(storeu, _e_pv, __VA_ARGS__)
/* broadcast */
#define _set_v16i8(...) _a_v16i8(set1, _e_i, __VA_ARGS__)
#define _zero_v16i8() _a_v16i8x(setzero, _e_x, _unused)
#define _seta_v16i8(...) ( \
(v16i8_t) { \
_mm_set_epi8(__VA_ARGS__) \
} \
)
/* swap (reverse) */
#define _swap_idx_v16i8() ( \
_mm_set_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15) \
)
#define _swap_v16i8(a) ( \
(v16i8_t) { \
_mm_shuffle_epi8((a).v1, _swap_idx_v16i8()) \
} \
)
#define _swapn_idx_v16i8() ( \
_mm_set_epi8(-16, -15, -14, -13, -12, -11, -10, -9, -8, -7, -6, -5, -4, -3, -2, -1) \
)
#define _swapn_v16i8(a, l) ( \
(v16i8_t) { \
_mm_shuffle_epi8((a).v1, _mm_add_epi8(_swapn_idx_v16i8(), _mm_set1_epi8(l))) \
} \
)
/* logics */
#define _not_v16i8(...) _a_v16i8x(not, _e_v, __VA_ARGS__)
#define _and_v16i8(...) _a_v16i8x(and, _e_vv, __VA_ARGS__)
#define _or_v16i8(...) _a_v16i8x(or, _e_vv, __VA_ARGS__)
#define _xor_v16i8(...) _a_v16i8x(xor, _e_vv, __VA_ARGS__)
#define _andn_v16i8(...) _a_v16i8x(andnot, _e_vv, __VA_ARGS__)
/* arithmetics */
#define _add_v16i8(...) _a_v16i8(add, _e_vv, __VA_ARGS__)
#define _sub_v16i8(...) _a_v16i8(sub, _e_vv, __VA_ARGS__)
#define _adds_v16i8(...) _a_v16i8(adds, _e_vv, __VA_ARGS__)
#define _subs_v16i8(...) _a_v16i8(subs, _e_vv, __VA_ARGS__)
#define _addus_v16i8(...) _a_v16u8(adds, _e_vv, __VA_ARGS__)
#define _subus_v16i8(...) _a_v16u8(subs, _e_vv, __VA_ARGS__)
#define _max_v16i8(...) _a_v16i8(max, _e_vv, __VA_ARGS__)
#define _min_v16i8(...) _a_v16i8(min, _e_vv, __VA_ARGS__)
/* shuffle */
#define _shuf_v16i8(...) _a_v16i8(shuffle, _e_vv, __VA_ARGS__)
/* blend */
#define _sel_v16i8(...) _a_v16i8(blendv, _e_vvv, __VA_ARGS__)
/* compare */
#define _eq_v16i8(...) _a_v16i8(cmpeq, _e_vv, __VA_ARGS__)
#define _gt_v16i8(...) _a_v16i8(cmpgt, _e_vv, __VA_ARGS__)
/* insert and extract */
#define _ins_v16i8(a, val, imm) { \
(a).v1 = _i_v16i8(insert)((a).v1, (val), (imm)); \
}
#define _ext_v16i8(a, imm) ( \
(int8_t)_i_v16i8(extract)((a).v1, (imm)) \
)
/* byte shift */
#define _bsl_v16i8(a, imm) ( \
(v16i8_t) { \
_i_v16i8x(slli)((a).v1, (imm)) \
} \
)
#define _bsr_v16i8(a, imm) ( \
(v16i8_t) { \
_i_v16i8x(srli)((a).v1, (imm)) \
} \
)
/* double shift (palignr) */
#define _bsld_v16i8(a, b, imm) ( \
(v16i8_t) { \
_mm_alignr_epi8((a).v1, (b).v1, sizeof(__m128i) - (imm)) \
} \
)
#define _bsrd_v16i8(a, b, imm) ( \
(v16i8_t) { \
_mm_alignr_epi8((a).v1, (b).v1, (imm)) \
} \
)
/* bit shift */
#define _shl_v16i8(a, imm) ( \
(v16i8_t) { \
_mm_slli_epi32((a).v1, (imm)) \
} \
)
#define _shr_v16i8(a, imm) ( \
(v16i8_t) { \
_mm_srli_epi32((a).v1, (imm)) \
} \
)
#define _sal_v16i8(a, imm) ( \
(v16i8_t) { \
_mm_slai_epi32((a).v1, (imm)) \
} \
)
#define _sar_v16i8(a, imm) ( \
(v16i8_t) { \
_mm_srai_epi32((a).v1, (imm)) \
} \
)
/* mask */
#define _mask_v16i8(a) ( \
(v16_mask_t) { \
.m1 = _i_v16i8(movemask)((a).v1) \
} \
)
/* horizontal max */
#define _hmax_v16i8(a) ({ \
__m128i _vmax = _mm_max_epi8((a).v1, \
_mm_srli_si128((a).v1, 8)); \
_vmax = _mm_max_epi8(_vmax, \
_mm_srli_si128(_vmax, 4)); \
_vmax = _mm_max_epi8(_vmax, \
_mm_srli_si128(_vmax, 2)); \
_vmax = _mm_max_epi8(_vmax, \
_mm_srli_si128(_vmax, 1)); \
(int8_t)_mm_extract_epi8(_vmax, 0); \
})
/* convert */
#define _cvt_v16i16_v16i8(a) ( \
(v16i8_t) { \
_mm_packs_epi16((a).v1, (a).v2) \
} \
)
/* debug print */
// #ifdef _LOG_H_INCLUDED
#define _print_v16i8(a) { \
debug("(v16i8_t) %s(%d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d)", \
#a, \
_ext_v16i8(a, 15), \
_ext_v16i8(a, 14), \
_ext_v16i8(a, 13), \
_ext_v16i8(a, 12), \
_ext_v16i8(a, 11), \
_ext_v16i8(a, 10), \
_ext_v16i8(a, 9), \
_ext_v16i8(a, 8), \
_ext_v16i8(a, 7), \
_ext_v16i8(a, 6), \
_ext_v16i8(a, 5), \
_ext_v16i8(a, 4), \
_ext_v16i8(a, 3), \
_ext_v16i8(a, 2), \
_ext_v16i8(a, 1), \
_ext_v16i8(a, 0)); \
}
// #else
// #define _print_v16i8(x) ;
// #endif
#endif /* _V16I8_H_INCLUDED */
/**
* end of v16i8.h
*/
| {
"pile_set_name": "Github"
} |
package org.wikipedia.edit.richtext;
import org.wikipedia.model.BaseModel;
/**
* Represents a single syntax highlighting rule.
*
* example: [[ lorem ipsum ]]
* | | |
* startSymbol | |
* | endSymbol
* |
* spanStyle
*/
public class SyntaxRule extends BaseModel {
private final String startSymbol;
private final String endSymbol;
private final SyntaxRuleStyle spanStyle;
private final boolean sameStartEnd;
public String getStartSymbol() {
return startSymbol;
}
public String getEndSymbol() {
return endSymbol;
}
public SyntaxRuleStyle getSpanStyle() {
return spanStyle;
}
/**
* Whether the start symbol is the same as the end symbol
* (for faster processing)
*/
public boolean isStartEndSame() {
return sameStartEnd;
}
public SyntaxRule(String startSymbol, String endSymbol, SyntaxRuleStyle spanStyle) {
this.startSymbol = startSymbol;
this.endSymbol = endSymbol;
this.spanStyle = spanStyle;
sameStartEnd = startSymbol.equals(endSymbol);
}
}
| {
"pile_set_name": "Github"
} |
// Distributed under the terms of the MIT license
// Test case submitted to project by https://github.com/practicalswift (practicalswift)
// Test case found by fuzzing
protocol B {
struct B {
init {
let a {
func a
( = {
class A {
enum B {
class
case ,
| {
"pile_set_name": "Github"
} |
// Scintilla source code edit control
/** @file CharacterSet.cxx
** Simple case functions for ASCII.
** Lexer infrastructure.
**/
// Copyright 1998-2010 by Neil Hodgson <[email protected]>
// The License.txt file describes the conditions under which this software may be distributed.
#include <cstdlib>
#include <cassert>
#include "CharacterSet.h"
using namespace Scintilla;
namespace Scintilla {
int CompareCaseInsensitive(const char *a, const char *b) {
while (*a && *b) {
if (*a != *b) {
const char upperA = MakeUpperCase(*a);
const char upperB = MakeUpperCase(*b);
if (upperA != upperB)
return upperA - upperB;
}
a++;
b++;
}
// Either *a or *b is nul
return *a - *b;
}
int CompareNCaseInsensitive(const char *a, const char *b, size_t len) {
while (*a && *b && len) {
if (*a != *b) {
const char upperA = MakeUpperCase(*a);
const char upperB = MakeUpperCase(*b);
if (upperA != upperB)
return upperA - upperB;
}
a++;
b++;
len--;
}
if (len == 0)
return 0;
else
// Either *a or *b is nul
return *a - *b;
}
}
| {
"pile_set_name": "Github"
} |
#!/usr/bin/env bash
set -o errexit
trap 'echo "Aborting due to errexit on line $LINENO. Exit code: $?" >&2' ERR
set -o errtrace
set -o nounset
###############################################################################
# Functions
###############################################################################
NTW_FRONT=proxy
if [ ! "$(docker network ls --filter name=$NTW_FRONT -q)" ]; then
docker network create --driver overlay --attachable --opt encrypted "$NTW_FRONT"
echo "Network: $NTW_FRONT was created."
else
echo "Network: $NTW_FRONT already exist."
fi | {
"pile_set_name": "Github"
} |
// Copyright John Maddock 2005-2006.
// Use, modification and distribution are subject to the
// Boost Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
#ifndef BOOST_MATH_TOOLS_PRECISION_INCLUDED
#define BOOST_MATH_TOOLS_PRECISION_INCLUDED
#ifdef _MSC_VER
#pragma once
#endif
#include <boost/limits.hpp>
#include <boost/assert.hpp>
#include <boost/static_assert.hpp>
#include <boost/mpl/int.hpp>
#include <boost/mpl/bool.hpp>
#include <boost/mpl/if.hpp>
#include <boost/math/policies/policy.hpp>
// These two are for LDBL_MAN_DIG:
#include <limits.h>
#include <math.h>
namespace boost{ namespace math
{
namespace tools
{
// If T is not specialized, the functions digits, max_value and min_value,
// all get synthesised automatically from std::numeric_limits.
// However, if numeric_limits is not specialised for type RealType,
// for example with NTL::RR type, then you will get a compiler error
// when code tries to use these functions, unless you explicitly specialise them.
// For example if the precision of RealType varies at runtime,
// then numeric_limits support may not be appropriate,
// see boost/math/tools/ntl.hpp for examples like
// template <> NTL::RR max_value<NTL::RR> ...
// See Conceptual Requirements for Real Number Types.
template <class T>
inline int digits(BOOST_MATH_EXPLICIT_TEMPLATE_TYPE_SPEC(T))
{
#ifndef BOOST_NO_LIMITS_COMPILE_TIME_CONSTANTS
BOOST_STATIC_ASSERT( ::std::numeric_limits<T>::is_specialized);
BOOST_STATIC_ASSERT( ::std::numeric_limits<T>::radix == 2 || ::std::numeric_limits<T>::radix == 10);
#else
BOOST_ASSERT(::std::numeric_limits<T>::is_specialized);
BOOST_ASSERT(::std::numeric_limits<T>::radix == 2 || ::std::numeric_limits<T>::radix == 10);
#endif
return std::numeric_limits<T>::radix == 2
? std::numeric_limits<T>::digits
: ((std::numeric_limits<T>::digits + 1) * 1000L) / 301L;
}
template <class T>
inline T max_value(BOOST_MATH_EXPLICIT_TEMPLATE_TYPE(T))
{
#ifndef BOOST_NO_LIMITS_COMPILE_TIME_CONSTANTS
BOOST_STATIC_ASSERT( ::std::numeric_limits<T>::is_specialized);
#else
BOOST_ASSERT(::std::numeric_limits<T>::is_specialized);
#endif
return (std::numeric_limits<T>::max)();
} // Also used as a finite 'infinite' value for - and +infinity, for example:
// -max_value<double> = -1.79769e+308, max_value<double> = 1.79769e+308.
template <class T>
inline T min_value(BOOST_MATH_EXPLICIT_TEMPLATE_TYPE(T))
{
#ifndef BOOST_NO_LIMITS_COMPILE_TIME_CONSTANTS
BOOST_STATIC_ASSERT( ::std::numeric_limits<T>::is_specialized);
#else
BOOST_ASSERT(::std::numeric_limits<T>::is_specialized);
#endif
return (std::numeric_limits<T>::min)();
}
namespace detail{
//
// Logarithmic limits come next, note that although
// we can compute these from the log of the max value
// that is not in general thread safe (if we cache the value)
// so it's better to specialise these:
//
// For type float first:
//
template <class T>
inline T log_max_value(const mpl::int_<128>& BOOST_MATH_APPEND_EXPLICIT_TEMPLATE_TYPE(T))
{
return 88.0f;
}
template <class T>
inline T log_min_value(const mpl::int_<128>& BOOST_MATH_APPEND_EXPLICIT_TEMPLATE_TYPE(T))
{
return -87.0f;
}
//
// Now double:
//
template <class T>
inline T log_max_value(const mpl::int_<1024>& BOOST_MATH_APPEND_EXPLICIT_TEMPLATE_TYPE(T))
{
return 709.0;
}
template <class T>
inline T log_min_value(const mpl::int_<1024>& BOOST_MATH_APPEND_EXPLICIT_TEMPLATE_TYPE(T))
{
return -708.0;
}
//
// 80 and 128-bit long doubles:
//
template <class T>
inline T log_max_value(const mpl::int_<16384>& BOOST_MATH_APPEND_EXPLICIT_TEMPLATE_TYPE(T))
{
return 11356.0L;
}
template <class T>
inline T log_min_value(const mpl::int_<16384>& BOOST_MATH_APPEND_EXPLICIT_TEMPLATE_TYPE(T))
{
return -11355.0L;
}
template <class T>
inline T log_max_value(const mpl::int_<0>& BOOST_MATH_APPEND_EXPLICIT_TEMPLATE_TYPE(T))
{
#ifndef BOOST_NO_LIMITS_COMPILE_TIME_CONSTANTS
BOOST_STATIC_ASSERT( ::std::numeric_limits<T>::is_specialized);
#else
BOOST_ASSERT(::std::numeric_limits<T>::is_specialized);
#endif
BOOST_MATH_STD_USING
static const T val = log((std::numeric_limits<T>::max)());
return val;
}
template <class T>
inline T log_min_value(const mpl::int_<0>& BOOST_MATH_APPEND_EXPLICIT_TEMPLATE_TYPE(T))
{
#ifndef BOOST_NO_LIMITS_COMPILE_TIME_CONSTANTS
BOOST_STATIC_ASSERT( ::std::numeric_limits<T>::is_specialized);
#else
BOOST_ASSERT(::std::numeric_limits<T>::is_specialized);
#endif
BOOST_MATH_STD_USING
static const T val = log((std::numeric_limits<T>::min)());
return val;
}
template <class T>
inline T epsilon(const mpl::true_& BOOST_MATH_APPEND_EXPLICIT_TEMPLATE_TYPE(T))
{
return std::numeric_limits<T>::epsilon();
}
#if defined(__GNUC__) && ((LDBL_MANT_DIG == 106) || (__LDBL_MANT_DIG__ == 106))
template <>
inline long double epsilon<long double>(const mpl::true_& BOOST_MATH_APPEND_EXPLICIT_TEMPLATE_TYPE(long double))
{
// numeric_limits on Darwin (and elsewhere) tells lies here:
// the issue is that long double on a few platforms is
// really a "double double" which has a non-contiguous
// mantissa: 53 bits followed by an unspecified number of
// zero bits, followed by 53 more bits. Thus the apparent
// precision of the type varies depending where it's been.
// Set epsilon to the value that a 106 bit fixed mantissa
// type would have, as that will give us sensible behaviour everywhere.
//
// This static assert fails for some unknown reason, so
// disabled for now...
// BOOST_STATIC_ASSERT(std::numeric_limits<long double>::digits == 106);
return 2.4651903288156618919116517665087e-32L;
}
#endif
template <class T>
inline T epsilon(const mpl::false_& BOOST_MATH_APPEND_EXPLICIT_TEMPLATE_TYPE(T))
{
BOOST_MATH_STD_USING // for ADL of std names
static const T eps = ldexp(static_cast<T>(1), 1-policies::digits<T, policies::policy<> >());
return eps;
}
} // namespace detail
#ifdef BOOST_MSVC
#pragma warning(push)
#pragma warning(disable:4309)
#endif
template <class T>
inline T log_max_value(BOOST_MATH_EXPLICIT_TEMPLATE_TYPE(T))
{
#ifndef BOOST_NO_LIMITS_COMPILE_TIME_CONSTANTS
typedef typename mpl::if_c<
(std::numeric_limits<T>::radix == 2) &&
(std::numeric_limits<T>::max_exponent == 128
|| std::numeric_limits<T>::max_exponent == 1024
|| std::numeric_limits<T>::max_exponent == 16384),
mpl::int_<(std::numeric_limits<T>::max_exponent > INT_MAX ? INT_MAX : static_cast<int>(std::numeric_limits<T>::max_exponent))>,
mpl::int_<0>
>::type tag_type;
BOOST_STATIC_ASSERT( ::std::numeric_limits<T>::is_specialized);
return detail::log_max_value<T>(tag_type());
#else
BOOST_ASSERT(::std::numeric_limits<T>::is_specialized);
BOOST_MATH_STD_USING
static const T val = log((std::numeric_limits<T>::max)());
return val;
#endif
}
template <class T>
inline T log_min_value(BOOST_MATH_EXPLICIT_TEMPLATE_TYPE(T))
{
#ifndef BOOST_NO_LIMITS_COMPILE_TIME_CONSTANTS
typedef typename mpl::if_c<
(std::numeric_limits<T>::radix == 2) &&
(std::numeric_limits<T>::max_exponent == 128
|| std::numeric_limits<T>::max_exponent == 1024
|| std::numeric_limits<T>::max_exponent == 16384),
mpl::int_<(std::numeric_limits<T>::max_exponent > INT_MAX ? INT_MAX : static_cast<int>(std::numeric_limits<T>::max_exponent))>,
mpl::int_<0>
>::type tag_type;
BOOST_STATIC_ASSERT( ::std::numeric_limits<T>::is_specialized);
return detail::log_min_value<T>(tag_type());
#else
BOOST_ASSERT(::std::numeric_limits<T>::is_specialized);
BOOST_MATH_STD_USING
static const T val = log((std::numeric_limits<T>::min)());
return val;
#endif
}
#ifdef BOOST_MSVC
#pragma warning(pop)
#endif
template <class T>
inline T epsilon(BOOST_MATH_EXPLICIT_TEMPLATE_TYPE_SPEC(T))
{
#ifndef BOOST_NO_LIMITS_COMPILE_TIME_CONSTANTS
return detail::epsilon<T>(mpl::bool_< ::std::numeric_limits<T>::is_specialized>());
#else
return ::std::numeric_limits<T>::is_specialized ?
detail::epsilon<T>(mpl::true_()) :
detail::epsilon<T>(mpl::false_());
#endif
}
namespace detail{
template <class T>
inline T root_epsilon_imp(const mpl::int_<24>&)
{
return static_cast<T>(0.00034526698300124390839884978618400831996329879769945L);
}
template <class T>
inline T root_epsilon_imp(const T*, const mpl::int_<53>&)
{
return static_cast<T>(0.1490116119384765625e-7L);
}
template <class T>
inline T root_epsilon_imp(const T*, const mpl::int_<64>&)
{
return static_cast<T>(0.32927225399135962333569506281281311031656150598474e-9L);
}
template <class T>
inline T root_epsilon_imp(const T*, const mpl::int_<113>&)
{
return static_cast<T>(0.1387778780781445675529539585113525390625e-16L);
}
template <class T, class Tag>
inline T root_epsilon_imp(const T*, const Tag&)
{
BOOST_MATH_STD_USING
static const T r_eps = sqrt(tools::epsilon<T>());
return r_eps;
}
template <class T>
inline T cbrt_epsilon_imp(const mpl::int_<24>&)
{
return static_cast<T>(0.0049215666011518482998719164346805794944150447839903L);
}
template <class T>
inline T cbrt_epsilon_imp(const T*, const mpl::int_<53>&)
{
return static_cast<T>(6.05545445239333906078989272793696693569753008995e-6L);
}
template <class T>
inline T cbrt_epsilon_imp(const T*, const mpl::int_<64>&)
{
return static_cast<T>(4.76837158203125e-7L);
}
template <class T>
inline T cbrt_epsilon_imp(const T*, const mpl::int_<113>&)
{
return static_cast<T>(5.7749313854154005630396773604745549542403508090496e-12L);
}
template <class T, class Tag>
inline T cbrt_epsilon_imp(const T*, const Tag&)
{
BOOST_MATH_STD_USING;
static const T cbrt_eps = pow(tools::epsilon<T>(), T(1) / 3);
return cbrt_eps;
}
template <class T>
inline T forth_root_epsilon_imp(const T*, const mpl::int_<24>&)
{
return static_cast<T>(0.018581361171917516667460937040007436176452688944747L);
}
template <class T>
inline T forth_root_epsilon_imp(const T*, const mpl::int_<53>&)
{
return static_cast<T>(0.0001220703125L);
}
template <class T>
inline T forth_root_epsilon_imp(const T*, const mpl::int_<64>&)
{
return static_cast<T>(0.18145860519450699870567321328132261891067079047605e-4L);
}
template <class T>
inline T forth_root_epsilon_imp(const T*, const mpl::int_<113>&)
{
return static_cast<T>(0.37252902984619140625e-8L);
}
template <class T, class Tag>
inline T forth_root_epsilon_imp(const T*, const Tag&)
{
BOOST_MATH_STD_USING
static const T r_eps = sqrt(sqrt(tools::epsilon<T>()));
return r_eps;
}
}
template <class T>
inline T root_epsilon()
{
typedef mpl::int_< (::std::numeric_limits<T>::radix == 2) ? std::numeric_limits<T>::digits : 0> tag_type;
return detail::root_epsilon_imp(static_cast<T const*>(0), tag_type());
}
template <class T>
inline T cbrt_epsilon()
{
typedef mpl::int_< (::std::numeric_limits<T>::radix == 2) ? std::numeric_limits<T>::digits : 0> tag_type;
return detail::cbrt_epsilon_imp(static_cast<T const*>(0), tag_type());
}
template <class T>
inline T forth_root_epsilon()
{
typedef mpl::int_< (::std::numeric_limits<T>::radix == 2) ? std::numeric_limits<T>::digits : 0> tag_type;
return detail::forth_root_epsilon_imp(static_cast<T const*>(0), tag_type());
}
} // namespace tools
} // namespace math
} // namespace boost
#endif // BOOST_MATH_TOOLS_PRECISION_INCLUDED
| {
"pile_set_name": "Github"
} |
//
// BGViewController.h
// BGUtilities
//
// Created by Ben Gordon on 12/11/13.
// Copyright (c) 2013 Ben Gordon. All rights reserved.
//
#import <UIKit/UIKit.h>
@interface BGViewController : UIViewController
@end
| {
"pile_set_name": "Github"
} |
/*
* The MIT License
* Copyright © 2014-2019 Ilkka Seppälä
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package com.iluwatar.business.delegate;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
/**
* The Business Delegate pattern adds an abstraction layer between the presentation and business
* tiers. By using the pattern we gain loose coupling between the tiers. The Business Delegate
* encapsulates knowledge about how to locate, connect to, and interact with the business objects
* that make up the application.
*
* <p>Some of the services the Business Delegate uses are instantiated directly, and some can be
* retrieved through service lookups. The Business Delegate itself may contain business logic too
* potentially tying together multiple service calls, exception handling, retrying etc.
*/
public class BusinessDelegateTest {
private EjbService ejbService;
private JmsService jmsService;
private BusinessLookup businessLookup;
private BusinessDelegate businessDelegate;
/**
* This method sets up the instance variables of this test class. It is executed before the
* execution of every test.
*/
@BeforeEach
public void setup() {
ejbService = spy(new EjbService());
jmsService = spy(new JmsService());
businessLookup = spy(new BusinessLookup());
businessLookup.setEjbService(ejbService);
businessLookup.setJmsService(jmsService);
businessDelegate = spy(new BusinessDelegate());
businessDelegate.setLookupService(businessLookup);
}
/**
* In this example the client ({@link Client}) utilizes a business delegate (
* {@link BusinessDelegate}) to execute a task. The Business Delegate then selects the appropriate
* service and makes the service call.
*/
@Test
public void testBusinessDelegate() {
// setup a client object
var client = new Client(businessDelegate);
// set the service type
businessDelegate.setServiceType(ServiceType.EJB);
// action
client.doTask();
// verifying that the businessDelegate was used by client during doTask() method.
verify(businessDelegate).doTask();
verify(ejbService).doProcessing();
// set the service type
businessDelegate.setServiceType(ServiceType.JMS);
// action
client.doTask();
// verifying that the businessDelegate was used by client during doTask() method.
verify(businessDelegate, times(2)).doTask();
verify(jmsService).doProcessing();
}
}
| {
"pile_set_name": "Github"
} |
package com.sequenceiq.freeipa.util;
import java.util.UUID;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Component;
import com.sequenceiq.cloudbreak.auth.ThreadBasedUserCrnProvider;
import com.sequenceiq.cloudbreak.auth.altus.Crn;
import com.sequenceiq.cloudbreak.auth.altus.Crn.ResourceType;
import com.sequenceiq.cloudbreak.auth.altus.CrnParseException;
@Component
public class CrnService {
private static final Logger LOGGER = LoggerFactory.getLogger(CrnService.class);
public String getCurrentAccountId() {
String userCrn = ThreadBasedUserCrnProvider.getUserCrn();
Crn crn = null;
try {
crn = Crn.fromString(userCrn);
} catch (NullPointerException e) {
LOGGER.warn("Crn is not set", e);
throw new CrnParseException("CRN is not set");
}
if (crn != null) {
return crn.getAccountId();
} else {
throw new CrnParseException("Can not parse account ID from CRN");
}
}
public String getCurrentUserId() {
String userCrn = ThreadBasedUserCrnProvider.getUserCrn();
Crn crn = null;
try {
crn = Crn.fromString(userCrn);
} catch (NullPointerException e) {
LOGGER.warn("Crn is not set", e);
throw new CrnParseException("CRN is not set");
}
if (crn != null) {
if (ResourceType.USER.equals(crn.getResourceType())) {
return crn.getResource();
} else {
return null;
}
} else {
throw new CrnParseException("Can not parse account ID from CRN");
}
}
public String getUserCrn() {
return ThreadBasedUserCrnProvider.getUserCrn();
}
public String createCrn(String accountId, Crn.ResourceType resourceType) {
return Crn.builder()
.setService(Crn.Service.FREEIPA)
.setAccountId(accountId)
.setResourceType(resourceType)
.setResource(UUID.randomUUID().toString())
.build().toString();
}
}
| {
"pile_set_name": "Github"
} |
import mock from 'xhr-mock';
import coreStore from 'kolibri.coreVue.vuex.store';
import * as redirectBrowser from 'kolibri.utils.redirectBrowser';
import * as serverClock from 'kolibri.utils.serverClock';
import { HeartBeat } from '../src/heartbeat.js';
import disconnectionErrorCodes from '../src/disconnectionErrorCodes';
import { trs } from '../src/disconnection';
import { stubWindowLocation } from 'testUtils'; // eslint-disable-line
jest.mock('kolibri.lib.logging');
jest.mock('kolibri.urls');
jest.mock('lockr');
describe('HeartBeat', function() {
stubWindowLocation(beforeAll, afterAll);
// replace the real XHR object with the mock XHR object before each test
beforeEach(() => mock.setup());
// put the real XHR object back and clear the mocks after each test
afterEach(() => mock.teardown());
describe('constructor method', function() {
it('should set the setUserActive method to a bound method', function() {
const test = new HeartBeat();
expect(HeartBeat.prototype.setUserActive).not.toEqual(test.setUserActive);
});
it('should set the pollSessionEndPoint method to a bound method', function() {
const test = new HeartBeat();
expect(HeartBeat.prototype.pollSessionEndPoint).not.toEqual(test.pollSessionEndPoint);
});
it('should call the setUserInactive method', function() {
const spy = jest.spyOn(HeartBeat.prototype, 'setUserInactive');
new HeartBeat();
expect(spy).toHaveBeenCalledTimes(1);
spy.mockRestore();
});
it('should not call the startPolling method', function() {
const spy = jest.spyOn(HeartBeat.prototype, 'startPolling');
new HeartBeat();
expect(spy).not.toHaveBeenCalled();
spy.mockRestore();
});
});
describe('startPolling method', function() {
let heartBeat;
let pollSessionEndPointStub;
beforeEach(function() {
heartBeat = new HeartBeat();
pollSessionEndPointStub = jest
.spyOn(heartBeat, 'pollSessionEndPoint')
.mockReturnValue(Promise.resolve());
});
it('should call pollSessionEndPoint if not currently enabled', function() {
heartBeat._enabled = false;
heartBeat.startPolling();
expect(pollSessionEndPointStub).toHaveBeenCalledTimes(1);
});
it('should not call pollSessionEndPoint if currently enabled', function() {
heartBeat._enabled = true;
heartBeat.startPolling();
expect(pollSessionEndPointStub).toHaveBeenCalledTimes(0);
});
it('should return _activePromise if currently defined and _enabled true', function() {
heartBeat._enabled = true;
heartBeat._activePromise = 'test';
expect(heartBeat.startPolling()).toEqual('test');
});
it('should return a Promise if _activePromise is not defined and _enabled is true', function() {
heartBeat._enabled = true;
delete heartBeat._activePromise;
expect(heartBeat.startPolling()).toBeInstanceOf(Promise);
});
});
describe('pollSessionEndPoint method', function() {
let heartBeat;
let _checkSessionStub;
beforeEach(function() {
heartBeat = new HeartBeat();
heartBeat.active = false;
heartBeat._enabled = true;
_checkSessionStub = jest.spyOn(heartBeat, '_checkSession').mockReturnValue(Promise.resolve());
});
it('should call setUserInactive', function() {
const spy = jest.spyOn(heartBeat, 'setUserInactive');
return heartBeat.pollSessionEndPoint().then(() => {
expect(spy).toHaveBeenCalledTimes(1);
});
});
it('should call _wait', function() {
const spy = jest.spyOn(heartBeat, '_wait');
return heartBeat.pollSessionEndPoint().then(() => {
expect(spy).toHaveBeenCalledTimes(1);
});
});
it('should set _timerId to a setTimeout identifier', function() {
return heartBeat.pollSessionEndPoint().then(() => {
expect(typeof heartBeat._timerId).toEqual('number');
});
});
it('should call _checkSession if no _activePromise property', function() {
heartBeat.pollSessionEndPoint();
expect(_checkSessionStub).toHaveBeenCalledTimes(1);
});
it('should call remove _activePromise property once the session check is complete', function() {
return heartBeat.pollSessionEndPoint().then(() => {
expect(heartBeat._activePromise).toBeUndefined();
});
});
it('should call setUserInactive once the session check is complete if enabled', function() {
const setUserInactiveStub = jest.spyOn(heartBeat, 'setUserInactive');
heartBeat._enabled = true;
return heartBeat.pollSessionEndPoint().then(() => {
expect(setUserInactiveStub).toHaveBeenCalledTimes(1);
});
});
it('should not call setUserInactive once the session check is complete if not enabled', function() {
const setUserInactiveStub = jest.spyOn(heartBeat, 'setUserInactive');
heartBeat._enabled = false;
return heartBeat.pollSessionEndPoint().then(() => {
expect(setUserInactiveStub).toHaveBeenCalledTimes(0);
});
});
it('should call _wait once the session check is complete if enabled', function() {
const _waitStub = jest.spyOn(heartBeat, '_wait');
heartBeat._enabled = true;
return heartBeat.pollSessionEndPoint().then(() => {
expect(_waitStub).toHaveBeenCalledTimes(1);
});
});
it('should not call _wait once the session check is complete if not enabled', function() {
const _waitStub = jest.spyOn(heartBeat, '_wait');
heartBeat._enabled = false;
return heartBeat.pollSessionEndPoint().then(() => {
expect(_waitStub).toHaveBeenCalledTimes(0);
});
});
it('should not call _checkSession if there is an _activePromise property', function() {
heartBeat._activePromise = Promise.resolve();
heartBeat.pollSessionEndPoint();
expect(_checkSessionStub).toHaveBeenCalledTimes(0);
});
describe('and activity is detected', function() {
beforeEach(function() {
heartBeat._active = true;
});
it('should call _setActivityListeners', function() {
const spy = jest.spyOn(heartBeat, '_setActivityListeners');
heartBeat.pollSessionEndPoint();
expect(spy).toHaveBeenCalledTimes(1);
});
});
});
describe('monitorDisconnect method', function() {
let heartBeat;
beforeEach(function() {
heartBeat = new HeartBeat();
jest.spyOn(heartBeat, '_wait').mockImplementation(() => {});
heartBeat.monitorDisconnect();
});
it('should set connected to false', function() {
expect(coreStore.getters.connected).toEqual(false);
});
it('should set reconnectTime to not null', function() {
expect(coreStore.getters.reconnectTime).not.toEqual(null);
});
it('should set current snackbar to disconnected', function() {
expect(coreStore.getters.snackbarIsVisible).toEqual(true);
expect(
coreStore.getters.snackbarOptions.text.startsWith(
'Disconnected from server. Will try to reconnect in'
)
).toEqual(true);
});
it('should not do anything if it already knows it is disconnected', function() {
coreStore.commit('CORE_SET_RECONNECT_TIME', 'fork');
heartBeat.monitorDisconnect();
expect(coreStore.getters.reconnectTime).toEqual('fork');
});
});
describe('_checkSession method', function() {
let heartBeat;
beforeEach(function() {
heartBeat = new HeartBeat();
jest.spyOn(heartBeat, '_sessionUrl').mockReturnValue('url');
});
it('should sign out if an auto logout is detected', function() {
coreStore.commit('CORE_SET_SESSION', { user_id: 'test', id: 'current' });
mock.get(/.*/, {
status: 200,
body: JSON.stringify({ user_id: null, id: 'current' }),
headers: { 'Content-Type': 'application/json' },
});
const stub = jest.spyOn(heartBeat, 'signOutDueToInactivity');
return heartBeat._checkSession().finally(() => {
expect(stub).toHaveBeenCalledTimes(1);
});
});
it('should redirect if a change in user is detected', function() {
coreStore.commit('CORE_SET_SESSION', { user_id: 'test', id: 'current' });
mock.get(/.*/, {
status: 200,
body: JSON.stringify({ user_id: 'nottest', id: 'current' }),
headers: { 'Content-Type': 'application/json' },
});
const redirectStub = jest.spyOn(redirectBrowser, 'redirectBrowser');
return heartBeat._checkSession().finally(() => {
expect(redirectStub).toHaveBeenCalledTimes(1);
});
});
it('should not sign out if user_id changes but session is being set for first time', function() {
coreStore.commit('CORE_SET_SESSION', { user_id: undefined, id: undefined });
mock.get(/.*/, {
status: 200,
body: JSON.stringify({ user_id: null, id: 'current' }),
headers: { 'Content-Type': 'application/json' },
});
const stub = jest.spyOn(heartBeat, 'signOutDueToInactivity');
return heartBeat._checkSession().finally(() => {
expect(stub).toHaveBeenCalledTimes(0);
});
});
it('should call setServerTime with a clientNow value that is between the start and finish of the poll', function() {
coreStore.commit('CORE_SET_SESSION', { user_id: 'test', id: 'current' });
const serverTime = new Date().toJSON();
mock.get(/.*/, {
status: 200,
body: JSON.stringify({ user_id: 'test', id: 'current', server_time: serverTime }),
headers: { 'Content-Type': 'application/json' },
});
const stub = jest.spyOn(serverClock, 'setServerTime');
const start = new Date();
return heartBeat._checkSession().finally(() => {
const end = new Date();
expect(stub.mock.calls[0][0]).toEqual(serverTime);
expect(stub.mock.calls[0][1].getTime()).toBeGreaterThanOrEqual(start.getTime());
expect(stub.mock.calls[0][1].getTime()).toBeLessThan(end.getTime());
});
});
describe('when is connected', function() {
// Don't test for 0, as it is not a real error code.
// Rather it is the status code that our request client library returns
// when the connection is refused by the host, or is otherwise unable to connect.
// What happens for a zero code is tested later in this file.
disconnectionErrorCodes
.filter(code => code !== 0)
.forEach(errorCode => {
it('should call monitorDisconnect if it receives error code ' + errorCode, function() {
const monitorStub = jest.spyOn(heartBeat, 'monitorDisconnect');
mock.get(/.*/, {
status: errorCode,
headers: { 'Content-Type': 'application/json' },
});
return heartBeat._checkSession().finally(() => {
expect(monitorStub).toHaveBeenCalledTimes(1);
});
});
});
});
describe('when not connected', function() {
beforeEach(function() {
heartBeat.monitorDisconnect();
});
it('should set snackbar to trying to reconnect', function() {
heartBeat._checkSession();
expect(coreStore.getters.snackbarIsVisible).toEqual(true);
expect(coreStore.getters.snackbarOptions.text).toEqual(trs.$tr('tryingToReconnect'));
});
disconnectionErrorCodes
.filter(code => code !== 0)
.forEach(errorCode => {
it('should set snackbar to disconnected for error code ' + errorCode, function() {
jest.spyOn(heartBeat, 'monitorDisconnect');
mock.get(/.*/, {
status: errorCode,
headers: { 'Content-Type': 'application/json' },
});
heartBeat._wait = jest.fn();
return heartBeat._checkSession().finally(() => {
expect(coreStore.getters.snackbarIsVisible).toEqual(true);
expect(
coreStore.getters.snackbarOptions.text.startsWith(
'Disconnected from server. Will try to reconnect in'
)
).toEqual(true);
});
});
});
it('should set snackbar to disconnected for error code 0', function() {
jest.spyOn(heartBeat, 'monitorDisconnect');
mock.get(/.*/, () => Promise.reject(new Error()));
return heartBeat._checkSession().finally(() => {
expect(coreStore.getters.snackbarIsVisible).toEqual(true);
expect(
coreStore.getters.snackbarOptions.text.startsWith(
'Disconnected from server. Will try to reconnect in'
)
).toEqual(true);
});
});
it('should increase the reconnect time when it fails to connect', function() {
mock.get(/.*/, () => Promise.reject(new Error()));
coreStore.commit('CORE_SET_RECONNECT_TIME', 5);
return heartBeat._checkSession().finally(() => {
const oldReconnectTime = coreStore.getters.reconnectTime;
return heartBeat._checkSession().finally(() => {
expect(coreStore.getters.reconnectTime).toBeGreaterThan(oldReconnectTime);
});
});
});
describe('and then gets reconnected', function() {
beforeEach(function() {
mock.get(/.*/, {
status: 200,
headers: { 'Content-Type': 'application/json' },
});
});
it('should set snackbar to reconnected', function() {
return heartBeat._checkSession().finally(() => {
expect(coreStore.getters.snackbarIsVisible).toEqual(true);
expect(coreStore.getters.snackbarOptions.text).toEqual(
trs.$tr('successfullyReconnected')
);
});
});
it('should set connected to true', function() {
return heartBeat._checkSession().finally(() => {
expect(coreStore.getters.connected).toEqual(true);
});
});
it('should set reconnect time to null', function() {
return heartBeat._checkSession().finally(() => {
expect(coreStore.getters.reconnectTime).toEqual(null);
});
});
});
});
});
});
| {
"pile_set_name": "Github"
} |
<component name="libraryTable">
<library name="Maven: org.springframework.cloud:spring-cloud-netflix-eureka-client:2.1.0.RELEASE">
<CLASSES>
<root url="jar://$MAVEN_REPOSITORY$/org/springframework/cloud/spring-cloud-netflix-eureka-client/2.1.0.RELEASE/spring-cloud-netflix-eureka-client-2.1.0.RELEASE.jar!/" />
</CLASSES>
<JAVADOC>
<root url="jar://$MAVEN_REPOSITORY$/org/springframework/cloud/spring-cloud-netflix-eureka-client/2.1.0.RELEASE/spring-cloud-netflix-eureka-client-2.1.0.RELEASE-javadoc.jar!/" />
</JAVADOC>
<SOURCES>
<root url="jar://$MAVEN_REPOSITORY$/org/springframework/cloud/spring-cloud-netflix-eureka-client/2.1.0.RELEASE/spring-cloud-netflix-eureka-client-2.1.0.RELEASE-sources.jar!/" />
</SOURCES>
</library>
</component> | {
"pile_set_name": "Github"
} |
/*
* Synaptics RMI4 touchscreen driver
*
* Copyright (C) 2012 Synaptics Incorporated
*
* Copyright (C) 2012 Alexandra Chin <[email protected]>
* Copyright (C) 2012 Scott Lin <[email protected]>
* Copyright (c) 2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/input.h>
#include <linux/gpio.h>
#include <linux/regulator/consumer.h>
#include <linux/input/synaptics_dsx.h>
#include <linux/of_gpio.h>
#include "synaptics_i2c_rmi4.h"
#include <linux/input/mt.h>
#define DRIVER_NAME "synaptics_rmi4_i2c"
#define INPUT_PHYS_NAME "synaptics_rmi4_i2c/input0"
#define DEBUGFS_DIR_NAME "ts_debug"
#define RESET_DELAY 100
#define TYPE_B_PROTOCOL
#define NO_0D_WHILE_2D
/*
#define REPORT_2D_Z
*/
#define REPORT_2D_W
#define RPT_TYPE (1 << 0)
#define RPT_X_LSB (1 << 1)
#define RPT_X_MSB (1 << 2)
#define RPT_Y_LSB (1 << 3)
#define RPT_Y_MSB (1 << 4)
#define RPT_Z (1 << 5)
#define RPT_WX (1 << 6)
#define RPT_WY (1 << 7)
#define RPT_DEFAULT (RPT_TYPE | RPT_X_LSB | RPT_X_MSB | RPT_Y_LSB | RPT_Y_MSB)
#define EXP_FN_DET_INTERVAL 1000 /* ms */
#define POLLING_PERIOD 1 /* ms */
#define SYN_I2C_RETRY_TIMES 10
#define MAX_ABS_MT_TOUCH_MAJOR 15
#define F01_STD_QUERY_LEN 21
#define F01_BUID_ID_OFFSET 18
#define F11_STD_QUERY_LEN 9
#define F11_STD_CTRL_LEN 10
#define F11_STD_DATA_LEN 12
#define NORMAL_OPERATION 0
#define SENSOR_SLEEP 1
#define NO_SLEEP_OFF 0
#define NO_SLEEP_ON 1
enum device_status {
STATUS_NO_ERROR = 0x00,
STATUS_RESET_OCCURED = 0x01,
STATUS_INVALID_CONFIG = 0x02,
STATUS_DEVICE_FAILURE = 0x03,
STATUS_CONFIG_CRC_FAILURE = 0x04,
STATUS_FIRMWARE_CRC_FAILURE = 0x05,
STATUS_CRC_IN_PROGRESS = 0x06,
STATUS_UNCONFIGURED = 0x80
};
#define DEVICE_CONFIGURED 0x1
#define RMI4_VTG_MIN_UV 2700000
#define RMI4_VTG_MAX_UV 3300000
#define RMI4_ACTIVE_LOAD_UA 15000
#define RMI4_LPM_LOAD_UA 10
#define RMI4_I2C_VTG_MIN_UV 1800000
#define RMI4_I2C_VTG_MAX_UV 1800000
#define RMI4_I2C_LOAD_UA 10000
#define RMI4_I2C_LPM_LOAD_UA 10
#define RMI4_GPIO_SLEEP_LOW_US 10000
#define F12_FINGERS_TO_SUPPORT 10
#define MAX_F11_TOUCH_WIDTH 15
#define RMI4_COORDS_ARR_SIZE 4
static int synaptics_rmi4_i2c_read(struct synaptics_rmi4_data *rmi4_data,
unsigned short addr, unsigned char *data,
unsigned short length);
static int synaptics_rmi4_i2c_write(struct synaptics_rmi4_data *rmi4_data,
unsigned short addr, unsigned char *data,
unsigned short length);
static int synaptics_rmi4_reset_device(struct synaptics_rmi4_data *rmi4_data);
static void synaptics_rmi4_sensor_wake(struct synaptics_rmi4_data *rmi4_data);
static void synaptics_rmi4_sensor_sleep(struct synaptics_rmi4_data *rmi4_data);
static int synaptics_rmi4_check_configuration(struct synaptics_rmi4_data
*rmi4_data);
#ifdef CONFIG_PM
static int synaptics_rmi4_suspend(struct device *dev);
static int synaptics_rmi4_resume(struct device *dev);
static ssize_t synaptics_rmi4_full_pm_cycle_show(struct device *dev,
struct device_attribute *attr, char *buf);
static ssize_t synaptics_rmi4_full_pm_cycle_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count);
#if defined(CONFIG_FB)
static int fb_notifier_callback(struct notifier_block *self,
unsigned long event, void *data);
#elif defined(CONFIG_HAS_EARLYSUSPEND)
static void synaptics_rmi4_early_suspend(struct early_suspend *h);
static void synaptics_rmi4_late_resume(struct early_suspend *h);
#endif
#endif
static ssize_t synaptics_rmi4_f01_reset_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count);
static ssize_t synaptics_rmi4_f01_productinfo_show(struct device *dev,
struct device_attribute *attr, char *buf);
static ssize_t synaptics_rmi4_f01_buildid_show(struct device *dev,
struct device_attribute *attr, char *buf);
static ssize_t synaptics_rmi4_f01_flashprog_show(struct device *dev,
struct device_attribute *attr, char *buf);
static ssize_t synaptics_rmi4_0dbutton_show(struct device *dev,
struct device_attribute *attr, char *buf);
static ssize_t synaptics_rmi4_0dbutton_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count);
static ssize_t synaptics_rmi4_flipx_show(struct device *dev,
struct device_attribute *attr, char *buf);
static ssize_t synaptics_rmi4_flipx_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count);
static ssize_t synaptics_rmi4_flipy_show(struct device *dev,
struct device_attribute *attr, char *buf);
static ssize_t synaptics_rmi4_flipy_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count);
struct synaptics_rmi4_f01_device_status {
union {
struct {
unsigned char status_code:4;
unsigned char reserved:2;
unsigned char flash_prog:1;
unsigned char unconfigured:1;
} __packed;
unsigned char data[1];
};
};
struct synaptics_rmi4_f01_device_control_0 {
union {
struct {
unsigned char sleep_mode:2;
unsigned char nosleep:1;
unsigned char reserved:2;
unsigned char charger_input:1;
unsigned char report_rate:1;
unsigned char configured:1;
} __packed;
unsigned char data[1];
};
};
struct synaptics_rmi4_f12_query_5 {
union {
struct {
unsigned char size_of_query6;
struct {
unsigned char ctrl0_is_present:1;
unsigned char ctrl1_is_present:1;
unsigned char ctrl2_is_present:1;
unsigned char ctrl3_is_present:1;
unsigned char ctrl4_is_present:1;
unsigned char ctrl5_is_present:1;
unsigned char ctrl6_is_present:1;
unsigned char ctrl7_is_present:1;
} __packed;
struct {
unsigned char ctrl8_is_present:1;
unsigned char ctrl9_is_present:1;
unsigned char ctrl10_is_present:1;
unsigned char ctrl11_is_present:1;
unsigned char ctrl12_is_present:1;
unsigned char ctrl13_is_present:1;
unsigned char ctrl14_is_present:1;
unsigned char ctrl15_is_present:1;
} __packed;
struct {
unsigned char ctrl16_is_present:1;
unsigned char ctrl17_is_present:1;
unsigned char ctrl18_is_present:1;
unsigned char ctrl19_is_present:1;
unsigned char ctrl20_is_present:1;
unsigned char ctrl21_is_present:1;
unsigned char ctrl22_is_present:1;
unsigned char ctrl23_is_present:1;
} __packed;
struct {
unsigned char ctrl24_is_present:1;
unsigned char ctrl25_is_present:1;
unsigned char ctrl26_is_present:1;
unsigned char ctrl27_is_present:1;
unsigned char ctrl28_is_present:1;
unsigned char ctrl29_is_present:1;
unsigned char ctrl30_is_present:1;
unsigned char ctrl31_is_present:1;
} __packed;
};
unsigned char data[5];
};
};
struct synaptics_rmi4_f12_query_8 {
union {
struct {
unsigned char size_of_query9;
struct {
unsigned char data0_is_present:1;
unsigned char data1_is_present:1;
unsigned char data2_is_present:1;
unsigned char data3_is_present:1;
unsigned char data4_is_present:1;
unsigned char data5_is_present:1;
unsigned char data6_is_present:1;
unsigned char data7_is_present:1;
} __packed;
struct {
unsigned char data8_is_present:1;
unsigned char data9_is_present:1;
unsigned char data10_is_present:1;
unsigned char data11_is_present:1;
unsigned char data12_is_present:1;
unsigned char data13_is_present:1;
unsigned char data14_is_present:1;
unsigned char data15_is_present:1;
} __packed;
};
unsigned char data[3];
};
};
struct synaptics_rmi4_f12_ctrl_8 {
union {
struct {
unsigned char max_x_coord_lsb;
unsigned char max_x_coord_msb;
unsigned char max_y_coord_lsb;
unsigned char max_y_coord_msb;
unsigned char rx_pitch_lsb;
unsigned char rx_pitch_msb;
unsigned char tx_pitch_lsb;
unsigned char tx_pitch_msb;
unsigned char low_rx_clip;
unsigned char high_rx_clip;
unsigned char low_tx_clip;
unsigned char high_tx_clip;
unsigned char num_of_rx;
unsigned char num_of_tx;
};
unsigned char data[14];
};
};
struct synaptics_rmi4_f12_ctrl_23 {
union {
struct {
unsigned char obj_type_enable;
unsigned char max_reported_objects;
};
unsigned char data[2];
};
};
struct synaptics_rmi4_f12_finger_data {
unsigned char object_type_and_status;
unsigned char x_lsb;
unsigned char x_msb;
unsigned char y_lsb;
unsigned char y_msb;
#ifdef REPORT_2D_Z
unsigned char z;
#endif
#ifdef REPORT_2D_W
unsigned char wx;
unsigned char wy;
#endif
};
struct synaptics_rmi4_f1a_query {
union {
struct {
unsigned char max_button_count:3;
unsigned char reserved:5;
unsigned char has_general_control:1;
unsigned char has_interrupt_enable:1;
unsigned char has_multibutton_select:1;
unsigned char has_tx_rx_map:1;
unsigned char has_perbutton_threshold:1;
unsigned char has_release_threshold:1;
unsigned char has_strongestbtn_hysteresis:1;
unsigned char has_filter_strength:1;
} __packed;
unsigned char data[2];
};
};
struct synaptics_rmi4_f1a_control_0 {
union {
struct {
unsigned char multibutton_report:2;
unsigned char filter_mode:2;
unsigned char reserved:4;
} __packed;
unsigned char data[1];
};
};
struct synaptics_rmi4_f1a_control_3_4 {
unsigned char transmitterbutton;
unsigned char receiverbutton;
};
struct synaptics_rmi4_f1a_control {
struct synaptics_rmi4_f1a_control_0 general_control;
unsigned char *button_int_enable;
unsigned char *multi_button;
struct synaptics_rmi4_f1a_control_3_4 *electrode_map;
unsigned char *button_threshold;
unsigned char button_release_threshold;
unsigned char strongest_button_hysteresis;
unsigned char filter_strength;
};
struct synaptics_rmi4_f1a_handle {
int button_bitmask_size;
unsigned char button_count;
unsigned char valid_button_count;
unsigned char *button_data_buffer;
unsigned char *button_map;
struct synaptics_rmi4_f1a_query button_query;
struct synaptics_rmi4_f1a_control button_control;
};
struct synaptics_rmi4_f12_extra_data {
unsigned char data1_offset;
unsigned char data15_offset;
unsigned char data15_size;
unsigned char data15_data[(F12_FINGERS_TO_SUPPORT + 7) / 8];
};
struct synaptics_rmi4_exp_fn {
enum exp_fn fn_type;
bool inserted;
int (*func_init)(struct synaptics_rmi4_data *rmi4_data);
void (*func_remove)(struct synaptics_rmi4_data *rmi4_data);
void (*func_attn)(struct synaptics_rmi4_data *rmi4_data,
unsigned char intr_mask);
struct list_head link;
};
static struct device_attribute attrs[] = {
#ifdef CONFIG_PM
__ATTR(full_pm_cycle, (S_IRUGO | S_IWUSR | S_IWGRP),
synaptics_rmi4_full_pm_cycle_show,
synaptics_rmi4_full_pm_cycle_store),
#endif
__ATTR(reset, S_IWUSR | S_IWGRP,
NULL,
synaptics_rmi4_f01_reset_store),
__ATTR(productinfo, S_IRUGO,
synaptics_rmi4_f01_productinfo_show,
synaptics_rmi4_store_error),
__ATTR(buildid, S_IRUGO,
synaptics_rmi4_f01_buildid_show,
synaptics_rmi4_store_error),
__ATTR(flashprog, S_IRUGO,
synaptics_rmi4_f01_flashprog_show,
synaptics_rmi4_store_error),
__ATTR(0dbutton, (S_IRUGO | S_IWUSR | S_IWGRP),
synaptics_rmi4_0dbutton_show,
synaptics_rmi4_0dbutton_store),
__ATTR(flipx, (S_IRUGO | S_IWUSR | S_IWGRP),
synaptics_rmi4_flipx_show,
synaptics_rmi4_flipx_store),
__ATTR(flipy, (S_IRUGO | S_IWUSR | S_IWGRP),
synaptics_rmi4_flipy_show,
synaptics_rmi4_flipy_store),
};
static bool exp_fn_inited;
static struct mutex exp_fn_list_mutex;
static struct list_head exp_fn_list;
#ifdef CONFIG_PM
static ssize_t synaptics_rmi4_full_pm_cycle_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
return snprintf(buf, PAGE_SIZE, "%u\n",
rmi4_data->full_pm_cycle);
}
static ssize_t synaptics_rmi4_full_pm_cycle_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
unsigned int input;
struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
if (sscanf(buf, "%u", &input) != 1)
return -EINVAL;
rmi4_data->full_pm_cycle = input > 0 ? 1 : 0;
return count;
}
static int synaptics_rmi4_debug_suspend_set(void *_data, u64 val)
{
struct synaptics_rmi4_data *rmi4_data = _data;
if (val)
synaptics_rmi4_suspend(&rmi4_data->input_dev->dev);
else
synaptics_rmi4_resume(&rmi4_data->input_dev->dev);
return 0;
}
static ssize_t synaptics_rmi4_debug_suspend_get(void *_data, u64 *val)
{
struct synaptics_rmi4_data *rmi4_data = _data;
*val = rmi4_data->suspended;
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(debug_suspend_fops, synaptics_rmi4_debug_suspend_get,
synaptics_rmi4_debug_suspend_set, "%lld\n");
#ifdef CONFIG_FB
static void configure_sleep(struct synaptics_rmi4_data *rmi4_data)
{
int retval = 0;
rmi4_data->fb_notif.notifier_call = fb_notifier_callback;
retval = fb_register_client(&rmi4_data->fb_notif);
if (retval)
dev_err(&rmi4_data->i2c_client->dev,
"Unable to register fb_notifier: %d\n", retval);
return;
}
#elif defined CONFIG_HAS_EARLYSUSPEND
static void configure_sleep(struct synaptics_rmi4_data *rmi4_data)
{
rmi4_data->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 1;
rmi4_data->early_suspend.suspend = synaptics_rmi4_early_suspend;
rmi4_data->early_suspend.resume = synaptics_rmi4_late_resume;
register_early_suspend(&rmi4_data->early_suspend);
return;
}
#else
static void configure_sleep(struct synaptics_rmi4_data *rmi4_data)
{
return;
}
#endif
#endif
static ssize_t synaptics_rmi4_f01_reset_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
int retval;
unsigned int reset;
struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
if (sscanf(buf, "%u", &reset) != 1)
return -EINVAL;
if (reset != 1)
return -EINVAL;
retval = synaptics_rmi4_reset_device(rmi4_data);
if (retval < 0) {
dev_err(dev,
"%s: Failed to issue reset command, error = %d\n",
__func__, retval);
return retval;
}
return count;
}
static ssize_t synaptics_rmi4_f01_productinfo_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
return snprintf(buf, PAGE_SIZE, "0x%02x 0x%02x\n",
(rmi4_data->rmi4_mod_info.product_info[0]),
(rmi4_data->rmi4_mod_info.product_info[1]));
}
static ssize_t synaptics_rmi4_f01_buildid_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
unsigned int build_id;
struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
struct synaptics_rmi4_device_info *rmi;
rmi = &(rmi4_data->rmi4_mod_info);
build_id = (unsigned int)rmi->build_id[0] +
(unsigned int)rmi->build_id[1] * 0x100 +
(unsigned int)rmi->build_id[2] * 0x10000;
return snprintf(buf, PAGE_SIZE, "%u\n",
build_id);
}
static ssize_t synaptics_rmi4_f01_flashprog_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int retval;
struct synaptics_rmi4_f01_device_status device_status;
struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
retval = synaptics_rmi4_i2c_read(rmi4_data,
rmi4_data->f01_data_base_addr,
device_status.data,
sizeof(device_status.data));
if (retval < 0) {
dev_err(dev,
"%s: Failed to read device status, error = %d\n",
__func__, retval);
return retval;
}
return snprintf(buf, PAGE_SIZE, "%u\n",
device_status.flash_prog);
}
static ssize_t synaptics_rmi4_0dbutton_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
return snprintf(buf, PAGE_SIZE, "%u\n",
rmi4_data->button_0d_enabled);
}
static ssize_t synaptics_rmi4_0dbutton_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
int retval;
unsigned int input;
unsigned char ii;
unsigned char intr_enable;
struct synaptics_rmi4_fn *fhandler;
struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
struct synaptics_rmi4_device_info *rmi;
rmi = &(rmi4_data->rmi4_mod_info);
if (sscanf(buf, "%u", &input) != 1)
return -EINVAL;
input = input > 0 ? 1 : 0;
if (rmi4_data->button_0d_enabled == input)
return count;
mutex_lock(&rmi->support_fn_list_mutex);
if (!list_empty(&rmi->support_fn_list)) {
list_for_each_entry(fhandler, &rmi->support_fn_list, link) {
if (fhandler->fn_number == SYNAPTICS_RMI4_F1A) {
ii = fhandler->intr_reg_num;
retval = synaptics_rmi4_i2c_read(rmi4_data,
rmi4_data->f01_ctrl_base_addr +
1 + ii,
&intr_enable,
sizeof(intr_enable));
if (retval < 0)
goto exit;
if (input == 1)
intr_enable |= fhandler->intr_mask;
else
intr_enable &= ~fhandler->intr_mask;
retval = synaptics_rmi4_i2c_write(rmi4_data,
rmi4_data->f01_ctrl_base_addr +
1 + ii,
&intr_enable,
sizeof(intr_enable));
if (retval < 0)
goto exit;
}
}
}
mutex_unlock(&rmi->support_fn_list_mutex);
rmi4_data->button_0d_enabled = input;
return count;
exit:
mutex_unlock(&rmi->support_fn_list_mutex);
return retval;
}
static ssize_t synaptics_rmi4_flipx_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
return snprintf(buf, PAGE_SIZE, "%u\n",
rmi4_data->flip_x);
}
static ssize_t synaptics_rmi4_flipx_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
unsigned int input;
struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
if (sscanf(buf, "%u", &input) != 1)
return -EINVAL;
rmi4_data->flip_x = input > 0 ? 1 : 0;
return count;
}
static ssize_t synaptics_rmi4_flipy_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
return snprintf(buf, PAGE_SIZE, "%u\n",
rmi4_data->flip_y);
}
static ssize_t synaptics_rmi4_flipy_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
unsigned int input;
struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
if (sscanf(buf, "%u", &input) != 1)
return -EINVAL;
rmi4_data->flip_y = input > 0 ? 1 : 0;
return count;
}
/**
* synaptics_rmi4_set_page()
*
* Called by synaptics_rmi4_i2c_read() and synaptics_rmi4_i2c_write().
*
* This function writes to the page select register to switch to the
* assigned page.
*/
static int synaptics_rmi4_set_page(struct synaptics_rmi4_data *rmi4_data,
unsigned int address)
{
int retval = 0;
unsigned char retry;
unsigned char buf[PAGE_SELECT_LEN];
unsigned char page;
struct i2c_client *i2c = rmi4_data->i2c_client;
page = ((address >> 8) & MASK_8BIT);
if (page != rmi4_data->current_page) {
buf[0] = MASK_8BIT;
buf[1] = page;
for (retry = 0; retry < SYN_I2C_RETRY_TIMES; retry++) {
retval = i2c_master_send(i2c, buf, PAGE_SELECT_LEN);
if (retval != PAGE_SELECT_LEN) {
dev_err(&i2c->dev,
"%s: I2C retry %d\n",
__func__, retry + 1);
msleep(20);
} else {
rmi4_data->current_page = page;
break;
}
}
} else
return PAGE_SELECT_LEN;
return (retval == PAGE_SELECT_LEN) ? retval : -EIO;
}
/**
* synaptics_rmi4_i2c_read()
*
* Called by various functions in this driver, and also exported to
* other expansion Function modules such as rmi_dev.
*
* This function reads data of an arbitrary length from the sensor,
* starting from an assigned register address of the sensor, via I2C
* with a retry mechanism.
*/
static int synaptics_rmi4_i2c_read(struct synaptics_rmi4_data *rmi4_data,
unsigned short addr, unsigned char *data, unsigned short length)
{
int retval;
unsigned char retry;
unsigned char buf;
struct i2c_msg msg[] = {
{
.addr = rmi4_data->i2c_client->addr,
.flags = 0,
.len = 1,
.buf = &buf,
},
{
.addr = rmi4_data->i2c_client->addr,
.flags = I2C_M_RD,
.len = length,
.buf = data,
},
};
buf = addr & MASK_8BIT;
mutex_lock(&(rmi4_data->rmi4_io_ctrl_mutex));
retval = synaptics_rmi4_set_page(rmi4_data, addr);
if (retval != PAGE_SELECT_LEN)
goto exit;
for (retry = 0; retry < SYN_I2C_RETRY_TIMES; retry++) {
if (i2c_transfer(rmi4_data->i2c_client->adapter, msg, 2) == 2) {
retval = length;
break;
}
dev_err(&rmi4_data->i2c_client->dev,
"%s: I2C retry %d\n",
__func__, retry + 1);
msleep(20);
}
if (retry == SYN_I2C_RETRY_TIMES) {
dev_err(&rmi4_data->i2c_client->dev,
"%s: I2C read over retry limit\n",
__func__);
retval = -EIO;
}
exit:
mutex_unlock(&(rmi4_data->rmi4_io_ctrl_mutex));
return retval;
}
/**
* synaptics_rmi4_i2c_write()
*
* Called by various functions in this driver, and also exported to
* other expansion Function modules such as rmi_dev.
*
* This function writes data of an arbitrary length to the sensor,
* starting from an assigned register address of the sensor, via I2C with
* a retry mechanism.
*/
static int synaptics_rmi4_i2c_write(struct synaptics_rmi4_data *rmi4_data,
unsigned short addr, unsigned char *data, unsigned short length)
{
int retval;
unsigned char retry;
unsigned char buf[length + 1];
struct i2c_msg msg[] = {
{
.addr = rmi4_data->i2c_client->addr,
.flags = 0,
.len = length + 1,
.buf = buf,
}
};
mutex_lock(&(rmi4_data->rmi4_io_ctrl_mutex));
retval = synaptics_rmi4_set_page(rmi4_data, addr);
if (retval != PAGE_SELECT_LEN)
goto exit;
buf[0] = addr & MASK_8BIT;
memcpy(&buf[1], &data[0], length);
for (retry = 0; retry < SYN_I2C_RETRY_TIMES; retry++) {
if (i2c_transfer(rmi4_data->i2c_client->adapter, msg, 1) == 1) {
retval = length;
break;
}
dev_err(&rmi4_data->i2c_client->dev,
"%s: I2C retry %d\n",
__func__, retry + 1);
msleep(20);
}
if (retry == SYN_I2C_RETRY_TIMES) {
dev_err(&rmi4_data->i2c_client->dev,
"%s: I2C write over retry limit\n",
__func__);
retval = -EIO;
}
exit:
mutex_unlock(&(rmi4_data->rmi4_io_ctrl_mutex));
return retval;
}
/**
* synaptics_rmi4_release_all()
*
* Called by synaptics_rmi4_suspend()
*
* Release all touch data during the touch device switch to suspend state.
*/
static void synaptics_rmi4_release_all(struct synaptics_rmi4_data *rmi4_data)
{
int finger;
int max_num_fingers = rmi4_data->num_of_fingers;
for (finger = 0; finger < max_num_fingers; finger++) {
input_mt_slot(rmi4_data->input_dev, finger);
input_mt_report_slot_state(rmi4_data->input_dev,
MT_TOOL_FINGER, 0);
}
input_report_key(rmi4_data->input_dev, BTN_TOUCH, 0);
input_report_key(rmi4_data->input_dev,
BTN_TOOL_FINGER, 0);
input_sync(rmi4_data->input_dev);
}
/**
* synaptics_rmi4_f11_abs_report()
*
* Called by synaptics_rmi4_report_touch() when valid Function $11
* finger data has been detected.
*
* This function reads the Function $11 data registers, determines the
* status of each finger supported by the Function, processes any
* necessary coordinate manipulation, reports the finger data to
* the input subsystem, and returns the number of fingers detected.
*/
static int synaptics_rmi4_f11_abs_report(struct synaptics_rmi4_data *rmi4_data,
struct synaptics_rmi4_fn *fhandler)
{
int retval;
unsigned char touch_count = 0; /* number of touch points */
unsigned char reg_index;
unsigned char finger;
unsigned char fingers_supported;
unsigned char num_of_finger_status_regs;
unsigned char finger_shift;
unsigned char finger_status;
unsigned char data_reg_blk_size;
unsigned char finger_status_reg[3];
unsigned char data[F11_STD_DATA_LEN];
unsigned short data_addr;
unsigned short data_offset;
int x;
int y;
int wx;
int wy;
int z;
/*
* The number of finger status registers is determined by the
* maximum number of fingers supported - 2 bits per finger. So
* the number of finger status registers to read is:
* register_count = ceil(max_num_of_fingers / 4)
*/
fingers_supported = fhandler->num_of_data_points;
num_of_finger_status_regs = (fingers_supported + 3) / 4;
data_addr = fhandler->full_addr.data_base;
data_reg_blk_size = fhandler->size_of_data_register_block;
retval = synaptics_rmi4_i2c_read(rmi4_data,
data_addr,
finger_status_reg,
num_of_finger_status_regs);
if (retval < 0)
return 0;
for (finger = 0; finger < fingers_supported; finger++) {
reg_index = finger / 4;
finger_shift = (finger % 4) * 2;
finger_status = (finger_status_reg[reg_index] >> finger_shift)
& MASK_2BIT;
/*
* Each 2-bit finger status field represents the following:
* 00 = finger not present
* 01 = finger present and data accurate
* 10 = finger present but data may be inaccurate
* 11 = reserved
*/
#ifdef TYPE_B_PROTOCOL
input_mt_slot(rmi4_data->input_dev, finger);
input_mt_report_slot_state(rmi4_data->input_dev,
MT_TOOL_FINGER, finger_status != 0);
#endif
if (finger_status) {
data_offset = data_addr +
num_of_finger_status_regs +
(finger * data_reg_blk_size);
retval = synaptics_rmi4_i2c_read(rmi4_data,
data_offset,
data,
data_reg_blk_size);
if (retval < 0)
return 0;
x = (data[0] << 4) | (data[2] & MASK_4BIT);
y = (data[1] << 4) | ((data[2] >> 4) & MASK_4BIT);
wx = (data[3] & MASK_4BIT);
wy = (data[3] >> 4) & MASK_4BIT;
z = data[4];
if (rmi4_data->flip_x)
x = rmi4_data->sensor_max_x - x;
if (rmi4_data->flip_y)
y = rmi4_data->sensor_max_y - y;
dev_dbg(&rmi4_data->i2c_client->dev,
"%s: Finger %d:\n"
"status = 0x%02x\n"
"x = %d\n"
"y = %d\n"
"wx = %d\n"
"wy = %d\n",
__func__, finger,
finger_status,
x, y, wx, wy);
input_report_abs(rmi4_data->input_dev,
ABS_MT_POSITION_X, x);
input_report_abs(rmi4_data->input_dev,
ABS_MT_POSITION_Y, y);
input_report_abs(rmi4_data->input_dev,
ABS_MT_PRESSURE, z);
#ifdef REPORT_2D_W
input_report_abs(rmi4_data->input_dev,
ABS_MT_TOUCH_MAJOR, max(wx, wy));
input_report_abs(rmi4_data->input_dev,
ABS_MT_TOUCH_MINOR, min(wx, wy));
#endif
#ifndef TYPE_B_PROTOCOL
input_mt_sync(rmi4_data->input_dev);
#endif
touch_count++;
}
}
input_report_key(rmi4_data->input_dev, BTN_TOUCH, touch_count > 0);
input_report_key(rmi4_data->input_dev,
BTN_TOOL_FINGER, touch_count > 0);
#ifndef TYPE_B_PROTOCOL
if (!touch_count)
input_mt_sync(rmi4_data->input_dev);
#else
input_mt_report_pointer_emulation(rmi4_data->input_dev, false);
#endif
input_sync(rmi4_data->input_dev);
return touch_count;
}
/**
* synaptics_rmi4_f12_abs_report()
*
* Called by synaptics_rmi4_report_touch() when valid Function $12
* finger data has been detected.
*
* This function reads the Function $12 data registers, determines the
* status of each finger supported by the Function, processes any
* necessary coordinate manipulation, reports the finger data to
* the input subsystem, and returns the number of fingers detected.
*/
static int synaptics_rmi4_f12_abs_report(struct synaptics_rmi4_data *rmi4_data,
struct synaptics_rmi4_fn *fhandler)
{
int retval;
unsigned char touch_count = 0; /* number of touch points */
unsigned char finger;
unsigned char fingers_to_process;
unsigned char finger_status;
unsigned char size_of_2d_data;
unsigned short data_addr;
int x;
int y;
int wx;
int wy;
struct synaptics_rmi4_f12_extra_data *extra_data;
struct synaptics_rmi4_f12_finger_data *data;
struct synaptics_rmi4_f12_finger_data *finger_data;
fingers_to_process = fhandler->num_of_data_points;
data_addr = fhandler->full_addr.data_base;
extra_data = (struct synaptics_rmi4_f12_extra_data *)fhandler->extra;
size_of_2d_data = sizeof(struct synaptics_rmi4_f12_finger_data);
retval = synaptics_rmi4_i2c_read(rmi4_data,
data_addr + extra_data->data1_offset,
(unsigned char *)fhandler->data,
fingers_to_process * size_of_2d_data);
if (retval < 0)
return 0;
data = (struct synaptics_rmi4_f12_finger_data *)fhandler->data;
for (finger = 0; finger < fingers_to_process; finger++) {
finger_data = data + finger;
finger_status = finger_data->object_type_and_status & MASK_2BIT;
/*
* Each 2-bit finger status field represents the following:
* 00 = finger not present
* 01 = finger present and data accurate
* 10 = finger present but data may be inaccurate
* 11 = reserved
*/
#ifdef TYPE_B_PROTOCOL
input_mt_slot(rmi4_data->input_dev, finger);
input_mt_report_slot_state(rmi4_data->input_dev,
MT_TOOL_FINGER, finger_status != 0);
#endif
if (finger_status) {
x = (finger_data->x_msb << 8) | (finger_data->x_lsb);
y = (finger_data->y_msb << 8) | (finger_data->y_lsb);
#ifdef REPORT_2D_W
wx = finger_data->wx;
wy = finger_data->wy;
#endif
if (rmi4_data->flip_x)
x = rmi4_data->sensor_max_x - x;
if (rmi4_data->flip_y)
y = rmi4_data->sensor_max_y - y;
dev_dbg(&rmi4_data->i2c_client->dev,
"%s: Finger %d:\n"
"status = 0x%02x\n"
"x = %d\n"
"y = %d\n"
"wx = %d\n"
"wy = %d\n",
__func__, finger,
finger_status,
x, y, wx, wy);
input_report_key(rmi4_data->input_dev,
BTN_TOUCH, 1);
input_report_key(rmi4_data->input_dev,
BTN_TOOL_FINGER, 1);
input_report_abs(rmi4_data->input_dev,
ABS_MT_POSITION_X, x);
input_report_abs(rmi4_data->input_dev,
ABS_MT_POSITION_Y, y);
#ifdef REPORT_2D_W
input_report_abs(rmi4_data->input_dev,
ABS_MT_TOUCH_MAJOR, max(wx, wy));
input_report_abs(rmi4_data->input_dev,
ABS_MT_TOUCH_MINOR, min(wx, wy));
#endif
#ifndef TYPE_B_PROTOCOL
input_mt_sync(rmi4_data->input_dev);
#endif
touch_count++;
}
}
input_report_key(rmi4_data->input_dev,
BTN_TOUCH, touch_count > 0);
input_report_key(rmi4_data->input_dev,
BTN_TOOL_FINGER, touch_count > 0);
#ifndef TYPE_B_PROTOCOL
if (!touch_count)
input_mt_sync(rmi4_data->input_dev);
#endif
input_mt_report_pointer_emulation(rmi4_data->input_dev, false);
input_sync(rmi4_data->input_dev);
return touch_count;
}
static void synaptics_rmi4_f1a_report(struct synaptics_rmi4_data *rmi4_data,
struct synaptics_rmi4_fn *fhandler)
{
int retval;
unsigned char button;
unsigned char index;
unsigned char shift;
unsigned char status;
unsigned char *data;
unsigned short data_addr = fhandler->full_addr.data_base;
struct synaptics_rmi4_f1a_handle *f1a = fhandler->data;
static unsigned char do_once = 1;
static bool current_status[MAX_NUMBER_OF_BUTTONS];
#ifdef NO_0D_WHILE_2D
static bool before_2d_status[MAX_NUMBER_OF_BUTTONS];
static bool while_2d_status[MAX_NUMBER_OF_BUTTONS];
#endif
if (do_once) {
memset(current_status, 0, sizeof(current_status));
#ifdef NO_0D_WHILE_2D
memset(before_2d_status, 0, sizeof(before_2d_status));
memset(while_2d_status, 0, sizeof(while_2d_status));
#endif
do_once = 0;
}
retval = synaptics_rmi4_i2c_read(rmi4_data,
data_addr,
f1a->button_data_buffer,
f1a->button_bitmask_size);
if (retval < 0) {
dev_err(&rmi4_data->i2c_client->dev,
"%s: Failed to read button data registers\n",
__func__);
return;
}
data = f1a->button_data_buffer;
for (button = 0; button < f1a->valid_button_count; button++) {
index = button / 8;
shift = button % 8;
status = ((data[index] >> shift) & MASK_1BIT);
if (current_status[button] == status)
continue;
else
current_status[button] = status;
dev_dbg(&rmi4_data->i2c_client->dev,
"%s: Button %d (code %d) ->%d\n",
__func__, button,
f1a->button_map[button],
status);
#ifdef NO_0D_WHILE_2D
if (rmi4_data->fingers_on_2d == false) {
if (status == 1) {
before_2d_status[button] = 1;
} else {
if (while_2d_status[button] == 1) {
while_2d_status[button] = 0;
continue;
} else {
before_2d_status[button] = 0;
}
}
input_report_key(rmi4_data->input_dev,
f1a->button_map[button],
status);
} else {
if (before_2d_status[button] == 1) {
before_2d_status[button] = 0;
input_report_key(rmi4_data->input_dev,
f1a->button_map[button],
status);
} else {
if (status == 1)
while_2d_status[button] = 1;
else
while_2d_status[button] = 0;
}
}
#else
input_report_key(rmi4_data->input_dev,
f1a->button_map[button],
status);
#endif
}
input_sync(rmi4_data->input_dev);
return;
}
/**
* synaptics_rmi4_report_touch()
*
* Called by synaptics_rmi4_sensor_report().
*
* This function calls the appropriate finger data reporting function
* based on the function handler it receives and returns the number of
* fingers detected.
*/
static void synaptics_rmi4_report_touch(struct synaptics_rmi4_data *rmi4_data,
struct synaptics_rmi4_fn *fhandler,
unsigned char *touch_count)
{
unsigned char touch_count_2d;
dev_dbg(&rmi4_data->i2c_client->dev,
"%s: Function %02x reporting\n",
__func__, fhandler->fn_number);
switch (fhandler->fn_number) {
case SYNAPTICS_RMI4_F11:
touch_count_2d = synaptics_rmi4_f11_abs_report(rmi4_data,
fhandler);
*touch_count += touch_count_2d;
if (touch_count_2d)
rmi4_data->fingers_on_2d = true;
else
rmi4_data->fingers_on_2d = false;
break;
case SYNAPTICS_RMI4_F12:
touch_count_2d = synaptics_rmi4_f12_abs_report(rmi4_data,
fhandler);
if (touch_count_2d)
rmi4_data->fingers_on_2d = true;
else
rmi4_data->fingers_on_2d = false;
break;
case SYNAPTICS_RMI4_F1A:
synaptics_rmi4_f1a_report(rmi4_data, fhandler);
break;
default:
break;
}
return;
}
/**
* synaptics_rmi4_sensor_report()
*
* Called by synaptics_rmi4_irq().
*
* This function determines the interrupt source(s) from the sensor
* and calls synaptics_rmi4_report_touch() with the appropriate
* function handler for each function with valid data inputs.
*/
static int synaptics_rmi4_sensor_report(struct synaptics_rmi4_data *rmi4_data)
{
int retval;
unsigned char touch_count = 0;
unsigned char intr[MAX_INTR_REGISTERS];
struct synaptics_rmi4_fn *fhandler;
struct synaptics_rmi4_exp_fn *exp_fhandler;
struct synaptics_rmi4_device_info *rmi;
rmi = &(rmi4_data->rmi4_mod_info);
/*
* Get interrupt status information from F01 Data1 register to
* determine the source(s) that are flagging the interrupt.
*/
retval = synaptics_rmi4_i2c_read(rmi4_data,
rmi4_data->f01_data_base_addr + 1,
intr,
rmi4_data->num_of_intr_regs);
if (retval < 0)
return retval;
/*
* Traverse the function handler list and service the source(s)
* of the interrupt accordingly.
*/
mutex_lock(&rmi->support_fn_list_mutex);
if (!list_empty(&rmi->support_fn_list)) {
list_for_each_entry(fhandler, &rmi->support_fn_list, link) {
if (fhandler->num_of_data_sources) {
if (fhandler->intr_mask &
intr[fhandler->intr_reg_num]) {
synaptics_rmi4_report_touch(rmi4_data,
fhandler, &touch_count);
}
}
}
}
mutex_unlock(&rmi->support_fn_list_mutex);
mutex_lock(&exp_fn_list_mutex);
if (!list_empty(&exp_fn_list)) {
list_for_each_entry(exp_fhandler, &exp_fn_list, link) {
if (exp_fhandler->inserted &&
(exp_fhandler->func_attn != NULL))
exp_fhandler->func_attn(rmi4_data, intr[0]);
}
}
mutex_unlock(&exp_fn_list_mutex);
return touch_count;
}
/**
* synaptics_rmi4_irq()
*
* Called by the kernel when an interrupt occurs (when the sensor
* asserts the attention irq).
*
* This function is the ISR thread and handles the acquisition
* and the reporting of finger data when the presence of fingers
* is detected.
*/
static irqreturn_t synaptics_rmi4_irq(int irq, void *data)
{
struct synaptics_rmi4_data *rmi4_data = data;
synaptics_rmi4_sensor_report(rmi4_data);
return IRQ_HANDLED;
}
#ifdef CONFIG_OF
static int synaptics_rmi4_get_dt_coords(struct device *dev, char *name,
struct synaptics_rmi4_platform_data *pdata)
{
u32 coords[RMI4_COORDS_ARR_SIZE];
struct property *prop;
struct device_node *np = dev->of_node;
int coords_size, rc;
prop = of_find_property(np, name, NULL);
if (!prop)
return -EINVAL;
if (!prop->value)
return -ENODATA;
coords_size = prop->length / sizeof(u32);
if (coords_size != RMI4_COORDS_ARR_SIZE) {
dev_err(dev, "invalid %s\n", name);
return -EINVAL;
}
rc = of_property_read_u32_array(np, name, coords, coords_size);
if (rc && (rc != -EINVAL)) {
dev_err(dev, "Unable to read %s\n", name);
return rc;
}
if (strcmp(name, "synaptics,panel-coords") == 0) {
pdata->panel_minx = coords[0];
pdata->panel_miny = coords[1];
pdata->panel_maxx = coords[2];
pdata->panel_maxy = coords[3];
} else if (strcmp(name, "synaptics,display-coords") == 0) {
pdata->disp_minx = coords[0];
pdata->disp_miny = coords[1];
pdata->disp_maxx = coords[2];
pdata->disp_maxy = coords[3];
} else {
dev_err(dev, "unsupported property %s\n", name);
return -EINVAL;
}
return 0;
}
static int synaptics_rmi4_parse_dt(struct device *dev,
struct synaptics_rmi4_platform_data *rmi4_pdata)
{
struct device_node *np = dev->of_node;
struct property *prop;
u32 temp_val, num_buttons;
u32 button_map[MAX_NUMBER_OF_BUTTONS];
int rc, i;
rmi4_pdata->i2c_pull_up = of_property_read_bool(np,
"synaptics,i2c-pull-up");
rmi4_pdata->power_down_enable = of_property_read_bool(np,
"synaptics,power-down");
rmi4_pdata->disable_gpios = of_property_read_bool(np,
"synaptics,disable-gpios");
rmi4_pdata->x_flip = of_property_read_bool(np, "synaptics,x-flip");
rmi4_pdata->y_flip = of_property_read_bool(np, "synaptics,y-flip");
rmi4_pdata->do_lockdown = of_property_read_bool(np,
"synaptics,do-lockdown");
rc = synaptics_rmi4_get_dt_coords(dev, "synaptics,display-coords",
rmi4_pdata);
if (rc && (rc != -EINVAL))
return rc;
rc = synaptics_rmi4_get_dt_coords(dev, "synaptics,panel-coords",
rmi4_pdata);
if (rc && (rc != -EINVAL))
return rc;
rmi4_pdata->reset_delay = RESET_DELAY;
rc = of_property_read_u32(np, "synaptics,reset-delay", &temp_val);
if (!rc)
rmi4_pdata->reset_delay = temp_val;
else if (rc != -EINVAL) {
dev_err(dev, "Unable to read reset delay\n");
return rc;
}
rc = of_property_read_string(np, "synaptics,fw-image-name",
&rmi4_pdata->fw_image_name);
if (rc && (rc != -EINVAL)) {
dev_err(dev, "Unable to read fw image name\n");
return rc;
}
/* reset, irq gpio info */
rmi4_pdata->reset_gpio = of_get_named_gpio_flags(np,
"synaptics,reset-gpio", 0, &rmi4_pdata->reset_flags);
rmi4_pdata->irq_gpio = of_get_named_gpio_flags(np,
"synaptics,irq-gpio", 0, &rmi4_pdata->irq_flags);
prop = of_find_property(np, "synaptics,button-map", NULL);
if (prop) {
num_buttons = prop->length / sizeof(temp_val);
rmi4_pdata->capacitance_button_map = devm_kzalloc(dev,
sizeof(*rmi4_pdata->capacitance_button_map),
GFP_KERNEL);
if (!rmi4_pdata->capacitance_button_map)
return -ENOMEM;
rmi4_pdata->capacitance_button_map->map = devm_kzalloc(dev,
sizeof(*rmi4_pdata->capacitance_button_map->map) *
MAX_NUMBER_OF_BUTTONS, GFP_KERNEL);
if (!rmi4_pdata->capacitance_button_map->map)
return -ENOMEM;
if (num_buttons <= MAX_NUMBER_OF_BUTTONS) {
rc = of_property_read_u32_array(np,
"synaptics,button-map", button_map,
num_buttons);
if (rc) {
dev_err(dev, "Unable to read key codes\n");
return rc;
}
for (i = 0; i < num_buttons; i++)
rmi4_pdata->capacitance_button_map->map[i] =
button_map[i];
rmi4_pdata->capacitance_button_map->nbuttons =
num_buttons;
} else {
return -EINVAL;
}
}
return 0;
}
#else
static inline int synaptics_rmi4_parse_dt(struct device *dev,
struct synaptics_rmi4_platform_data *rmi4_pdata)
{
return 0;
}
#endif
/**
* synaptics_rmi4_irq_enable()
*
* Called by synaptics_rmi4_probe() and the power management functions
* in this driver and also exported to other expansion Function modules
* such as rmi_dev.
*
* This function handles the enabling and disabling of the attention
* irq including the setting up of the ISR thread.
*/
static int synaptics_rmi4_irq_enable(struct synaptics_rmi4_data *rmi4_data,
bool enable)
{
int retval = 0;
unsigned char *intr_status;
if (enable) {
if (rmi4_data->irq_enabled)
return retval;
intr_status = kzalloc(rmi4_data->num_of_intr_regs, GFP_KERNEL);
if (!intr_status) {
dev_err(&rmi4_data->i2c_client->dev,
"%s: Failed to alloc memory\n",
__func__);
return -ENOMEM;
}
/* Clear interrupts first */
retval = synaptics_rmi4_i2c_read(rmi4_data,
rmi4_data->f01_data_base_addr + 1,
intr_status,
rmi4_data->num_of_intr_regs);
kfree(intr_status);
if (retval < 0)
return retval;
enable_irq(rmi4_data->irq);
rmi4_data->irq_enabled = true;
} else {
if (rmi4_data->irq_enabled) {
disable_irq(rmi4_data->irq);
rmi4_data->irq_enabled = false;
}
}
return retval;
}
/**
* synaptics_rmi4_f11_init()
*
* Called by synaptics_rmi4_query_device().
*
* This funtion parses information from the Function 11 registers
* and determines the number of fingers supported, x and y data ranges,
* offset to the associated interrupt status register, interrupt bit
* mask, and gathers finger data acquisition capabilities from the query
* registers.
*/
static int synaptics_rmi4_f11_init(struct synaptics_rmi4_data *rmi4_data,
struct synaptics_rmi4_fn *fhandler,
struct synaptics_rmi4_fn_desc *fd,
unsigned int intr_count)
{
int retval;
unsigned char ii;
unsigned char intr_offset;
unsigned char abs_data_size;
unsigned char abs_data_blk_size;
unsigned char query[F11_STD_QUERY_LEN];
unsigned char control[F11_STD_CTRL_LEN];
fhandler->fn_number = fd->fn_number;
fhandler->num_of_data_sources = fd->intr_src_count;
retval = synaptics_rmi4_i2c_read(rmi4_data,
fhandler->full_addr.query_base,
query,
sizeof(query));
if (retval < 0)
return retval;
/* Maximum number of fingers supported */
if ((query[1] & MASK_3BIT) <= 4)
fhandler->num_of_data_points = (query[1] & MASK_3BIT) + 1;
else if ((query[1] & MASK_3BIT) == 5)
fhandler->num_of_data_points = 10;
rmi4_data->num_of_fingers = fhandler->num_of_data_points;
retval = synaptics_rmi4_i2c_read(rmi4_data,
fhandler->full_addr.ctrl_base,
control,
sizeof(control));
if (retval < 0)
return retval;
/* Maximum x and y */
rmi4_data->sensor_max_x = ((control[6] & MASK_8BIT) << 0) |
((control[7] & MASK_4BIT) << 8);
rmi4_data->sensor_max_y = ((control[8] & MASK_8BIT) << 0) |
((control[9] & MASK_4BIT) << 8);
dev_dbg(&rmi4_data->i2c_client->dev,
"%s: Function %02x max x = %d max y = %d\n",
__func__, fhandler->fn_number,
rmi4_data->sensor_max_x,
rmi4_data->sensor_max_y);
rmi4_data->max_touch_width = MAX_F11_TOUCH_WIDTH;
fhandler->intr_reg_num = (intr_count + 7) / 8;
if (fhandler->intr_reg_num != 0)
fhandler->intr_reg_num -= 1;
/* Set an enable bit for each data source */
intr_offset = intr_count % 8;
fhandler->intr_mask = 0;
for (ii = intr_offset;
ii < ((fd->intr_src_count & MASK_3BIT) +
intr_offset);
ii++)
fhandler->intr_mask |= 1 << ii;
abs_data_size = query[5] & MASK_2BIT;
abs_data_blk_size = 3 + (2 * (abs_data_size == 0 ? 1 : 0));
fhandler->size_of_data_register_block = abs_data_blk_size;
return retval;
}
static int synaptics_rmi4_f12_set_enables(struct synaptics_rmi4_data *rmi4_data,
unsigned short ctrl28)
{
int retval;
static unsigned short ctrl_28_address;
if (ctrl28)
ctrl_28_address = ctrl28;
retval = synaptics_rmi4_i2c_write(rmi4_data,
ctrl_28_address,
&rmi4_data->report_enable,
sizeof(rmi4_data->report_enable));
if (retval < 0)
return retval;
return retval;
}
/**
* synaptics_rmi4_f12_init()
*
* Called by synaptics_rmi4_query_device().
*
* This funtion parses information from the Function 12 registers and
* determines the number of fingers supported, offset to the data1
* register, x and y data ranges, offset to the associated interrupt
* status register, interrupt bit mask, and allocates memory resources
* for finger data acquisition.
*/
static int synaptics_rmi4_f12_init(struct synaptics_rmi4_data *rmi4_data,
struct synaptics_rmi4_fn *fhandler,
struct synaptics_rmi4_fn_desc *fd,
unsigned int intr_count)
{
int retval;
unsigned char ii;
unsigned char intr_offset;
unsigned char size_of_2d_data;
unsigned char size_of_query8;
unsigned char ctrl_8_offset;
unsigned char ctrl_23_offset;
unsigned char ctrl_28_offset;
unsigned char num_of_fingers;
struct synaptics_rmi4_f12_extra_data *extra_data;
struct synaptics_rmi4_f12_query_5 query_5;
struct synaptics_rmi4_f12_query_8 query_8;
struct synaptics_rmi4_f12_ctrl_8 ctrl_8;
struct synaptics_rmi4_f12_ctrl_23 ctrl_23;
fhandler->fn_number = fd->fn_number;
fhandler->num_of_data_sources = fd->intr_src_count;
fhandler->extra = kmalloc(sizeof(*extra_data), GFP_KERNEL);
extra_data = (struct synaptics_rmi4_f12_extra_data *)fhandler->extra;
size_of_2d_data = sizeof(struct synaptics_rmi4_f12_finger_data);
retval = synaptics_rmi4_i2c_read(rmi4_data,
fhandler->full_addr.query_base + 5,
query_5.data,
sizeof(query_5.data));
if (retval < 0)
return retval;
ctrl_8_offset = query_5.ctrl0_is_present +
query_5.ctrl1_is_present +
query_5.ctrl2_is_present +
query_5.ctrl3_is_present +
query_5.ctrl4_is_present +
query_5.ctrl5_is_present +
query_5.ctrl6_is_present +
query_5.ctrl7_is_present;
ctrl_23_offset = ctrl_8_offset +
query_5.ctrl8_is_present +
query_5.ctrl9_is_present +
query_5.ctrl10_is_present +
query_5.ctrl11_is_present +
query_5.ctrl12_is_present +
query_5.ctrl13_is_present +
query_5.ctrl14_is_present +
query_5.ctrl15_is_present +
query_5.ctrl16_is_present +
query_5.ctrl17_is_present +
query_5.ctrl18_is_present +
query_5.ctrl19_is_present +
query_5.ctrl20_is_present +
query_5.ctrl21_is_present +
query_5.ctrl22_is_present;
ctrl_28_offset = ctrl_23_offset +
query_5.ctrl23_is_present +
query_5.ctrl24_is_present +
query_5.ctrl25_is_present +
query_5.ctrl26_is_present +
query_5.ctrl27_is_present;
retval = synaptics_rmi4_i2c_read(rmi4_data,
fhandler->full_addr.ctrl_base + ctrl_23_offset,
ctrl_23.data,
sizeof(ctrl_23.data));
if (retval < 0)
return retval;
/* Maximum number of fingers supported */
fhandler->num_of_data_points = min(ctrl_23.max_reported_objects,
(unsigned char)F12_FINGERS_TO_SUPPORT);
num_of_fingers = fhandler->num_of_data_points;
rmi4_data->num_of_fingers = num_of_fingers;
retval = synaptics_rmi4_i2c_read(rmi4_data,
fhandler->full_addr.query_base + 7,
&size_of_query8,
sizeof(size_of_query8));
if (retval < 0)
return retval;
retval = synaptics_rmi4_i2c_read(rmi4_data,
fhandler->full_addr.query_base + 8,
query_8.data,
size_of_query8);
if (retval < 0)
return retval;
/* Determine the presence of the Data0 register */
extra_data->data1_offset = query_8.data0_is_present;
if ((size_of_query8 >= 3) && (query_8.data15_is_present)) {
extra_data->data15_offset = query_8.data0_is_present +
query_8.data1_is_present +
query_8.data2_is_present +
query_8.data3_is_present +
query_8.data4_is_present +
query_8.data5_is_present +
query_8.data6_is_present +
query_8.data7_is_present +
query_8.data8_is_present +
query_8.data9_is_present +
query_8.data10_is_present +
query_8.data11_is_present +
query_8.data12_is_present +
query_8.data13_is_present +
query_8.data14_is_present;
extra_data->data15_size = (num_of_fingers + 7) / 8;
} else {
extra_data->data15_size = 0;
}
rmi4_data->report_enable = RPT_DEFAULT;
#ifdef REPORT_2D_Z
rmi4_data->report_enable |= RPT_Z;
#endif
#ifdef REPORT_2D_W
rmi4_data->report_enable |= (RPT_WX | RPT_WY);
#endif
retval = synaptics_rmi4_f12_set_enables(rmi4_data,
fhandler->full_addr.ctrl_base + ctrl_28_offset);
if (retval < 0)
return retval;
retval = synaptics_rmi4_i2c_read(rmi4_data,
fhandler->full_addr.ctrl_base + ctrl_8_offset,
ctrl_8.data,
sizeof(ctrl_8.data));
if (retval < 0)
return retval;
/* Maximum x and y */
rmi4_data->sensor_max_x =
((unsigned short)ctrl_8.max_x_coord_lsb << 0) |
((unsigned short)ctrl_8.max_x_coord_msb << 8);
rmi4_data->sensor_max_y =
((unsigned short)ctrl_8.max_y_coord_lsb << 0) |
((unsigned short)ctrl_8.max_y_coord_msb << 8);
dev_dbg(&rmi4_data->i2c_client->dev,
"%s: Function %02x max x = %d max y = %d\n",
__func__, fhandler->fn_number,
rmi4_data->sensor_max_x,
rmi4_data->sensor_max_y);
rmi4_data->num_of_rx = ctrl_8.num_of_rx;
rmi4_data->num_of_tx = ctrl_8.num_of_tx;
rmi4_data->max_touch_width = max(rmi4_data->num_of_rx,
rmi4_data->num_of_tx);
fhandler->intr_reg_num = (intr_count + 7) / 8;
if (fhandler->intr_reg_num != 0)
fhandler->intr_reg_num -= 1;
/* Set an enable bit for each data source */
intr_offset = intr_count % 8;
fhandler->intr_mask = 0;
for (ii = intr_offset;
ii < ((fd->intr_src_count & MASK_3BIT) +
intr_offset);
ii++)
fhandler->intr_mask |= 1 << ii;
/* Allocate memory for finger data storage space */
fhandler->data_size = num_of_fingers * size_of_2d_data;
fhandler->data = kmalloc(fhandler->data_size, GFP_KERNEL);
return retval;
}
static int synaptics_rmi4_f1a_alloc_mem(struct synaptics_rmi4_data *rmi4_data,
struct synaptics_rmi4_fn *fhandler)
{
int retval;
struct synaptics_rmi4_f1a_handle *f1a;
f1a = kzalloc(sizeof(*f1a), GFP_KERNEL);
if (!f1a) {
dev_err(&rmi4_data->i2c_client->dev,
"%s: Failed to alloc mem for function handle\n",
__func__);
return -ENOMEM;
}
fhandler->data = (void *)f1a;
retval = synaptics_rmi4_i2c_read(rmi4_data,
fhandler->full_addr.query_base,
f1a->button_query.data,
sizeof(f1a->button_query.data));
if (retval < 0) {
dev_err(&rmi4_data->i2c_client->dev,
"%s: Failed to read query registers\n",
__func__);
return retval;
}
f1a->button_count = f1a->button_query.max_button_count + 1;
f1a->button_bitmask_size = (f1a->button_count + 7) / 8;
f1a->button_data_buffer = kcalloc(f1a->button_bitmask_size,
sizeof(*(f1a->button_data_buffer)), GFP_KERNEL);
if (!f1a->button_data_buffer) {
dev_err(&rmi4_data->i2c_client->dev,
"%s: Failed to alloc mem for data buffer\n",
__func__);
return -ENOMEM;
}
f1a->button_map = kcalloc(f1a->button_count,
sizeof(*(f1a->button_map)), GFP_KERNEL);
if (!f1a->button_map) {
dev_err(&rmi4_data->i2c_client->dev,
"%s: Failed to alloc mem for button map\n",
__func__);
return -ENOMEM;
}
return 0;
}
static int synaptics_rmi4_capacitance_button_map(
struct synaptics_rmi4_data *rmi4_data,
struct synaptics_rmi4_fn *fhandler)
{
unsigned char ii;
struct synaptics_rmi4_f1a_handle *f1a = fhandler->data;
const struct synaptics_rmi4_platform_data *pdata = rmi4_data->board;
if (!pdata->capacitance_button_map) {
dev_info(&rmi4_data->i2c_client->dev,
"%s: capacitance_button_map not in use\n",
__func__);
return 0;
} else if (!pdata->capacitance_button_map->map) {
dev_err(&rmi4_data->i2c_client->dev,
"%s: Button map is missing in board file\n",
__func__);
return -ENODEV;
} else {
if (pdata->capacitance_button_map->nbuttons !=
f1a->button_count) {
f1a->valid_button_count = min(f1a->button_count,
pdata->capacitance_button_map->nbuttons);
} else {
f1a->valid_button_count = f1a->button_count;
}
for (ii = 0; ii < f1a->valid_button_count; ii++)
f1a->button_map[ii] =
pdata->capacitance_button_map->map[ii];
}
return 0;
}
static void synaptics_rmi4_f1a_kfree(struct synaptics_rmi4_fn *fhandler)
{
struct synaptics_rmi4_f1a_handle *f1a = fhandler->data;
if (f1a) {
kfree(f1a->button_data_buffer);
kfree(f1a->button_map);
kfree(f1a);
fhandler->data = NULL;
}
return;
}
static int synaptics_rmi4_f1a_init(struct synaptics_rmi4_data *rmi4_data,
struct synaptics_rmi4_fn *fhandler,
struct synaptics_rmi4_fn_desc *fd,
unsigned int intr_count)
{
int retval;
unsigned char ii;
unsigned short intr_offset;
fhandler->fn_number = fd->fn_number;
fhandler->num_of_data_sources = fd->intr_src_count;
fhandler->intr_reg_num = (intr_count + 7) / 8;
if (fhandler->intr_reg_num != 0)
fhandler->intr_reg_num -= 1;
/* Set an enable bit for each data source */
intr_offset = intr_count % 8;
fhandler->intr_mask = 0;
for (ii = intr_offset;
ii < ((fd->intr_src_count & MASK_3BIT) +
intr_offset);
ii++)
fhandler->intr_mask |= 1 << ii;
retval = synaptics_rmi4_f1a_alloc_mem(rmi4_data, fhandler);
if (retval < 0)
goto error_exit;
retval = synaptics_rmi4_capacitance_button_map(rmi4_data, fhandler);
if (retval < 0)
goto error_exit;
rmi4_data->button_0d_enabled = 1;
return 0;
error_exit:
synaptics_rmi4_f1a_kfree(fhandler);
return retval;
}
static int synaptics_rmi4_alloc_fh(struct synaptics_rmi4_fn **fhandler,
struct synaptics_rmi4_fn_desc *rmi_fd, int page_number)
{
*fhandler = kzalloc(sizeof(**fhandler), GFP_KERNEL);
if (!(*fhandler))
return -ENOMEM;
(*fhandler)->full_addr.data_base =
(rmi_fd->data_base_addr |
(page_number << 8));
(*fhandler)->full_addr.ctrl_base =
(rmi_fd->ctrl_base_addr |
(page_number << 8));
(*fhandler)->full_addr.cmd_base =
(rmi_fd->cmd_base_addr |
(page_number << 8));
(*fhandler)->full_addr.query_base =
(rmi_fd->query_base_addr |
(page_number << 8));
(*fhandler)->fn_number = rmi_fd->fn_number;
return 0;
}
/**
* synaptics_rmi4_query_device_info()
*
* Called by synaptics_rmi4_query_device().
*
*/
static int synaptics_rmi4_query_device_info(
struct synaptics_rmi4_data *rmi4_data)
{
int retval;
unsigned char f01_query[F01_STD_QUERY_LEN];
struct synaptics_rmi4_device_info *rmi = &(rmi4_data->rmi4_mod_info);
retval = synaptics_rmi4_i2c_read(rmi4_data,
rmi4_data->f01_query_base_addr,
f01_query,
sizeof(f01_query));
if (retval < 0)
return retval;
/* RMI Version 4.0 currently supported */
rmi->version_major = 4;
rmi->version_minor = 0;
rmi->manufacturer_id = f01_query[0];
rmi->product_props = f01_query[1];
rmi->product_info[0] = f01_query[2] & MASK_7BIT;
rmi->product_info[1] = f01_query[3] & MASK_7BIT;
rmi->date_code[0] = f01_query[4] & MASK_5BIT;
rmi->date_code[1] = f01_query[5] & MASK_4BIT;
rmi->date_code[2] = f01_query[6] & MASK_5BIT;
rmi->tester_id = ((f01_query[7] & MASK_7BIT) << 8) |
(f01_query[8] & MASK_7BIT);
rmi->serial_number = ((f01_query[9] & MASK_7BIT) << 8) |
(f01_query[10] & MASK_7BIT);
memcpy(rmi->product_id_string, &f01_query[11], 10);
if (rmi->manufacturer_id != 1) {
dev_err(&rmi4_data->i2c_client->dev,
"%s: Non-Synaptics device found, manufacturer ID = %d\n",
__func__, rmi->manufacturer_id);
}
retval = synaptics_rmi4_i2c_read(rmi4_data,
rmi4_data->f01_query_base_addr + F01_BUID_ID_OFFSET,
rmi->build_id,
sizeof(rmi->build_id));
if (retval < 0) {
dev_err(&rmi4_data->i2c_client->dev,
"%s: Failed to read firmware build id (code %d)\n",
__func__, retval);
return retval;
}
return 0;
}
/*
* This function checks whether the fhandler already existis in the
* support_fn_list or not.
* If it exists then return 1 as found or return 0 as not found.
*
* Called by synaptics_rmi4_query_device().
*/
static int synaptics_rmi4_check_fn_list(struct synaptics_rmi4_data *rmi4_data,
struct synaptics_rmi4_fn *fhandler)
{
int found = 0;
struct synaptics_rmi4_fn *new_fhandler;
struct synaptics_rmi4_device_info *rmi;
rmi = &(rmi4_data->rmi4_mod_info);
mutex_lock(&rmi->support_fn_list_mutex);
if (!list_empty(&rmi->support_fn_list))
list_for_each_entry(new_fhandler, &rmi->support_fn_list, link)
if (new_fhandler->fn_number == fhandler->fn_number)
found = 1;
mutex_unlock(&rmi->support_fn_list_mutex);
return found;
}
/**
* synaptics_rmi4_query_device()
*
* Called by synaptics_rmi4_probe().
*
* This funtion scans the page description table, records the offsets
* to the register types of Function $01, sets up the function handlers
* for Function $11 and Function $12, determines the number of interrupt
* sources from the sensor, adds valid Functions with data inputs to the
* Function linked list, parses information from the query registers of
* Function $01, and enables the interrupt sources from the valid Functions
* with data inputs.
*/
static int synaptics_rmi4_query_device(struct synaptics_rmi4_data *rmi4_data)
{
int retval, found;
unsigned char ii;
unsigned char page_number;
unsigned char intr_count = 0;
unsigned char data_sources = 0;
unsigned short pdt_entry_addr;
unsigned short intr_addr;
struct synaptics_rmi4_f01_device_status status;
struct synaptics_rmi4_fn_desc rmi_fd;
struct synaptics_rmi4_fn *fhandler;
struct synaptics_rmi4_device_info *rmi;
rmi = &(rmi4_data->rmi4_mod_info);
/* Scan the page description tables of the pages to service */
for (page_number = 0; page_number < PAGES_TO_SERVICE; page_number++) {
for (pdt_entry_addr = PDT_START; pdt_entry_addr > PDT_END;
pdt_entry_addr -= PDT_ENTRY_SIZE) {
pdt_entry_addr |= (page_number << 8);
retval = synaptics_rmi4_i2c_read(rmi4_data,
pdt_entry_addr,
(unsigned char *)&rmi_fd,
sizeof(rmi_fd));
if (retval < 0)
return retval;
fhandler = NULL;
found = 0;
if (rmi_fd.fn_number == 0) {
dev_dbg(&rmi4_data->i2c_client->dev,
"%s: Reached end of PDT\n",
__func__);
break;
}
dev_dbg(&rmi4_data->i2c_client->dev,
"%s: F%02x found (page %d)\n",
__func__, rmi_fd.fn_number,
page_number);
switch (rmi_fd.fn_number) {
case SYNAPTICS_RMI4_F01:
rmi4_data->f01_query_base_addr =
rmi_fd.query_base_addr;
rmi4_data->f01_ctrl_base_addr =
rmi_fd.ctrl_base_addr;
rmi4_data->f01_data_base_addr =
rmi_fd.data_base_addr;
rmi4_data->f01_cmd_base_addr =
rmi_fd.cmd_base_addr;
retval =
synaptics_rmi4_query_device_info(rmi4_data);
if (retval < 0)
return retval;
retval = synaptics_rmi4_i2c_read(rmi4_data,
rmi4_data->f01_data_base_addr,
status.data,
sizeof(status.data));
if (retval < 0)
return retval;
while (status.status_code == STATUS_CRC_IN_PROGRESS) {
msleep(1);
retval = synaptics_rmi4_i2c_read(rmi4_data,
rmi4_data->f01_data_base_addr,
status.data,
sizeof(status.data));
if (retval < 0)
return retval;
}
if (status.flash_prog == 1) {
pr_notice("%s: In flash prog mode, status = 0x%02x\n",
__func__,
status.status_code);
goto flash_prog_mode;
}
break;
case SYNAPTICS_RMI4_F11:
if (rmi_fd.intr_src_count == 0)
break;
retval = synaptics_rmi4_alloc_fh(&fhandler,
&rmi_fd, page_number);
if (retval < 0) {
dev_err(&rmi4_data->i2c_client->dev,
"%s: Failed to alloc for F%d\n",
__func__,
rmi_fd.fn_number);
return retval;
}
retval = synaptics_rmi4_f11_init(rmi4_data,
fhandler, &rmi_fd, intr_count);
if (retval < 0)
return retval;
break;
case SYNAPTICS_RMI4_F12:
if (rmi_fd.intr_src_count == 0)
break;
retval = synaptics_rmi4_alloc_fh(&fhandler,
&rmi_fd, page_number);
if (retval < 0) {
dev_err(&rmi4_data->i2c_client->dev,
"%s: Failed to alloc for F%d\n",
__func__,
rmi_fd.fn_number);
return retval;
}
retval = synaptics_rmi4_f12_init(rmi4_data,
fhandler, &rmi_fd, intr_count);
if (retval < 0)
return retval;
break;
case SYNAPTICS_RMI4_F1A:
if (rmi_fd.intr_src_count == 0)
break;
retval = synaptics_rmi4_alloc_fh(&fhandler,
&rmi_fd, page_number);
if (retval < 0) {
dev_err(&rmi4_data->i2c_client->dev,
"%s: Failed to alloc for F%d\n",
__func__,
rmi_fd.fn_number);
return retval;
}
retval = synaptics_rmi4_f1a_init(rmi4_data,
fhandler, &rmi_fd, intr_count);
if (retval < 0)
return retval;
break;
}
/* Accumulate the interrupt count */
intr_count += (rmi_fd.intr_src_count & MASK_3BIT);
if (fhandler && rmi_fd.intr_src_count) {
/* Want to check whether the fhandler already
exists in the support_fn_list or not.
If not found then add it to the list, otherwise
free the memory allocated to it.
*/
found = synaptics_rmi4_check_fn_list(rmi4_data,
fhandler);
if (!found) {
mutex_lock(&rmi->support_fn_list_mutex);
list_add_tail(&fhandler->link,
&rmi->support_fn_list);
mutex_unlock(
&rmi->support_fn_list_mutex);
} else {
if (fhandler->fn_number ==
SYNAPTICS_RMI4_F1A) {
synaptics_rmi4_f1a_kfree(
fhandler);
} else {
kfree(fhandler->data);
kfree(fhandler->extra);
}
kfree(fhandler);
}
}
}
}
flash_prog_mode:
rmi4_data->num_of_intr_regs = (intr_count + 7) / 8;
dev_dbg(&rmi4_data->i2c_client->dev,
"%s: Number of interrupt registers = %d\n",
__func__, rmi4_data->num_of_intr_regs);
memset(rmi4_data->intr_mask, 0x00, sizeof(rmi4_data->intr_mask));
/*
* Map out the interrupt bit masks for the interrupt sources
* from the registered function handlers.
*/
mutex_lock(&rmi->support_fn_list_mutex);
if (!list_empty(&rmi->support_fn_list)) {
list_for_each_entry(fhandler, &rmi->support_fn_list, link)
data_sources += fhandler->num_of_data_sources;
}
mutex_unlock(&rmi->support_fn_list_mutex);
if (data_sources) {
mutex_lock(&rmi->support_fn_list_mutex);
if (!list_empty(&rmi->support_fn_list)) {
list_for_each_entry(fhandler,
&rmi->support_fn_list, link) {
if (fhandler->num_of_data_sources) {
rmi4_data->intr_mask[fhandler->intr_reg_num] |=
fhandler->intr_mask;
}
}
}
mutex_unlock(&rmi->support_fn_list_mutex);
}
/* Enable the interrupt sources */
for (ii = 0; ii < rmi4_data->num_of_intr_regs; ii++) {
if (rmi4_data->intr_mask[ii] != 0x00) {
dev_dbg(&rmi4_data->i2c_client->dev,
"%s: Interrupt enable mask %d = 0x%02x\n",
__func__, ii, rmi4_data->intr_mask[ii]);
intr_addr = rmi4_data->f01_ctrl_base_addr + 1 + ii;
retval = synaptics_rmi4_i2c_write(rmi4_data,
intr_addr,
&(rmi4_data->intr_mask[ii]),
sizeof(rmi4_data->intr_mask[ii]));
if (retval < 0)
return retval;
}
}
return 0;
}
static int synaptics_rmi4_reset_command(struct synaptics_rmi4_data *rmi4_data)
{
int retval;
int page_number;
unsigned char command = 0x01;
unsigned short pdt_entry_addr;
struct synaptics_rmi4_fn_desc rmi_fd;
bool done = false;
/* Scan the page description tables of the pages to service */
for (page_number = 0; page_number < PAGES_TO_SERVICE; page_number++) {
for (pdt_entry_addr = PDT_START; pdt_entry_addr > PDT_END;
pdt_entry_addr -= PDT_ENTRY_SIZE) {
retval = synaptics_rmi4_i2c_read(rmi4_data,
pdt_entry_addr,
(unsigned char *)&rmi_fd,
sizeof(rmi_fd));
if (retval < 0)
return retval;
if (rmi_fd.fn_number == 0)
break;
switch (rmi_fd.fn_number) {
case SYNAPTICS_RMI4_F01:
rmi4_data->f01_cmd_base_addr =
rmi_fd.cmd_base_addr;
done = true;
break;
}
}
if (done) {
dev_info(&rmi4_data->i2c_client->dev,
"%s: Find F01 in page description table 0x%x\n",
__func__, rmi4_data->f01_cmd_base_addr);
break;
}
}
if (!done) {
dev_err(&rmi4_data->i2c_client->dev,
"%s: Cannot find F01 in page description table\n",
__func__);
return -EINVAL;
}
retval = synaptics_rmi4_i2c_write(rmi4_data,
rmi4_data->f01_cmd_base_addr,
&command,
sizeof(command));
if (retval < 0) {
dev_err(&rmi4_data->i2c_client->dev,
"%s: Failed to issue reset command, error = %d\n",
__func__, retval);
return retval;
}
msleep(rmi4_data->board->reset_delay);
return retval;
};
static int synaptics_rmi4_reset_device(struct synaptics_rmi4_data *rmi4_data)
{
int retval;
struct synaptics_rmi4_fn *fhandler;
struct synaptics_rmi4_fn *next_fhandler;
struct synaptics_rmi4_device_info *rmi;
rmi = &(rmi4_data->rmi4_mod_info);
retval = synaptics_rmi4_reset_command(rmi4_data);
if (retval < 0) {
dev_err(&rmi4_data->i2c_client->dev,
"%s: Failed to send command reset\n",
__func__);
return retval;
}
if (!list_empty(&rmi->support_fn_list)) {
list_for_each_entry_safe(fhandler, next_fhandler,
&rmi->support_fn_list, link) {
if (fhandler->fn_number == SYNAPTICS_RMI4_F1A)
synaptics_rmi4_f1a_kfree(fhandler);
else {
kfree(fhandler->data);
kfree(fhandler->extra);
}
kfree(fhandler);
}
}
INIT_LIST_HEAD(&rmi->support_fn_list);
retval = synaptics_rmi4_query_device(rmi4_data);
if (retval < 0) {
dev_err(&rmi4_data->i2c_client->dev,
"%s: Failed to query device\n",
__func__);
return retval;
}
return 0;
}
/**
* synaptics_rmi4_detection_work()
*
* Called by the kernel at the scheduled time.
*
* This function is a self-rearming work thread that checks for the
* insertion and removal of other expansion Function modules such as
* rmi_dev and calls their initialization and removal callback functions
* accordingly.
*/
static void synaptics_rmi4_detection_work(struct work_struct *work)
{
struct synaptics_rmi4_exp_fn *exp_fhandler, *next_list_entry;
struct synaptics_rmi4_data *rmi4_data =
container_of(work, struct synaptics_rmi4_data,
det_work.work);
mutex_lock(&exp_fn_list_mutex);
if (!list_empty(&exp_fn_list)) {
list_for_each_entry_safe(exp_fhandler,
next_list_entry,
&exp_fn_list,
link) {
if ((exp_fhandler->func_init != NULL) &&
(exp_fhandler->inserted == false)) {
if (exp_fhandler->func_init(rmi4_data) < 0) {
list_del(&exp_fhandler->link);
kfree(exp_fhandler);
} else {
exp_fhandler->inserted = true;
}
} else if ((exp_fhandler->func_init == NULL) &&
(exp_fhandler->inserted == true)) {
exp_fhandler->func_remove(rmi4_data);
list_del(&exp_fhandler->link);
kfree(exp_fhandler);
}
}
}
mutex_unlock(&exp_fn_list_mutex);
return;
}
/**
* synaptics_rmi4_new_function()
*
* Called by other expansion Function modules in their module init and
* module exit functions.
*
* This function is used by other expansion Function modules such as
* rmi_dev to register themselves with the driver by providing their
* initialization and removal callback function pointers so that they
* can be inserted or removed dynamically at module init and exit times,
* respectively.
*/
void synaptics_rmi4_new_function(enum exp_fn fn_type, bool insert,
int (*func_init)(struct synaptics_rmi4_data *rmi4_data),
void (*func_remove)(struct synaptics_rmi4_data *rmi4_data),
void (*func_attn)(struct synaptics_rmi4_data *rmi4_data,
unsigned char intr_mask))
{
struct synaptics_rmi4_exp_fn *exp_fhandler;
if (!exp_fn_inited) {
mutex_init(&exp_fn_list_mutex);
INIT_LIST_HEAD(&exp_fn_list);
exp_fn_inited = 1;
}
mutex_lock(&exp_fn_list_mutex);
if (insert) {
exp_fhandler = kzalloc(sizeof(*exp_fhandler), GFP_KERNEL);
if (!exp_fhandler) {
pr_err("%s: Failed to alloc mem for expansion function\n",
__func__);
goto exit;
}
exp_fhandler->fn_type = fn_type;
exp_fhandler->func_init = func_init;
exp_fhandler->func_attn = func_attn;
exp_fhandler->func_remove = func_remove;
exp_fhandler->inserted = false;
list_add_tail(&exp_fhandler->link, &exp_fn_list);
} else {
if (!list_empty(&exp_fn_list)) {
list_for_each_entry(exp_fhandler, &exp_fn_list, link) {
if (exp_fhandler->func_init == func_init) {
exp_fhandler->inserted = false;
exp_fhandler->func_init = NULL;
exp_fhandler->func_attn = NULL;
goto exit;
}
}
}
}
exit:
mutex_unlock(&exp_fn_list_mutex);
return;
}
EXPORT_SYMBOL(synaptics_rmi4_new_function);
static int reg_set_optimum_mode_check(struct regulator *reg, int load_uA)
{
return (regulator_count_voltages(reg) > 0) ?
regulator_set_optimum_mode(reg, load_uA) : 0;
}
static int synaptics_rmi4_regulator_configure(struct synaptics_rmi4_data
*rmi4_data, bool on)
{
int retval;
if (on == false)
goto hw_shutdown;
rmi4_data->vdd = regulator_get(&rmi4_data->i2c_client->dev,
"vdd");
if (IS_ERR(rmi4_data->vdd)) {
dev_err(&rmi4_data->i2c_client->dev,
"%s: Failed to get vdd regulator\n",
__func__);
return PTR_ERR(rmi4_data->vdd);
}
if (regulator_count_voltages(rmi4_data->vdd) > 0) {
retval = regulator_set_voltage(rmi4_data->vdd,
RMI4_VTG_MIN_UV, RMI4_VTG_MAX_UV);
if (retval) {
dev_err(&rmi4_data->i2c_client->dev,
"regulator set_vtg failed retval =%d\n",
retval);
goto err_set_vtg_vdd;
}
}
if (rmi4_data->board->i2c_pull_up) {
rmi4_data->vcc_i2c = regulator_get(&rmi4_data->i2c_client->dev,
"vcc_i2c");
if (IS_ERR(rmi4_data->vcc_i2c)) {
dev_err(&rmi4_data->i2c_client->dev,
"%s: Failed to get i2c regulator\n",
__func__);
retval = PTR_ERR(rmi4_data->vcc_i2c);
goto err_get_vtg_i2c;
}
if (regulator_count_voltages(rmi4_data->vcc_i2c) > 0) {
retval = regulator_set_voltage(rmi4_data->vcc_i2c,
RMI4_I2C_VTG_MIN_UV, RMI4_I2C_VTG_MAX_UV);
if (retval) {
dev_err(&rmi4_data->i2c_client->dev,
"reg set i2c vtg failed retval =%d\n",
retval);
goto err_set_vtg_i2c;
}
}
}
return 0;
err_set_vtg_i2c:
if (rmi4_data->board->i2c_pull_up)
regulator_put(rmi4_data->vcc_i2c);
err_get_vtg_i2c:
if (regulator_count_voltages(rmi4_data->vdd) > 0)
regulator_set_voltage(rmi4_data->vdd, 0,
RMI4_VTG_MAX_UV);
err_set_vtg_vdd:
regulator_put(rmi4_data->vdd);
return retval;
hw_shutdown:
if (regulator_count_voltages(rmi4_data->vdd) > 0)
regulator_set_voltage(rmi4_data->vdd, 0,
RMI4_VTG_MAX_UV);
regulator_put(rmi4_data->vdd);
if (rmi4_data->board->i2c_pull_up) {
if (regulator_count_voltages(rmi4_data->vcc_i2c) > 0)
regulator_set_voltage(rmi4_data->vcc_i2c, 0,
RMI4_I2C_VTG_MAX_UV);
regulator_put(rmi4_data->vcc_i2c);
}
return 0;
};
static int synaptics_rmi4_power_on(struct synaptics_rmi4_data *rmi4_data,
bool on) {
int retval;
if (on == false)
goto power_off;
retval = reg_set_optimum_mode_check(rmi4_data->vdd,
RMI4_ACTIVE_LOAD_UA);
if (retval < 0) {
dev_err(&rmi4_data->i2c_client->dev,
"Regulator vdd set_opt failed rc=%d\n",
retval);
return retval;
}
retval = regulator_enable(rmi4_data->vdd);
if (retval) {
dev_err(&rmi4_data->i2c_client->dev,
"Regulator vdd enable failed rc=%d\n",
retval);
goto error_reg_en_vdd;
}
if (rmi4_data->board->i2c_pull_up) {
retval = reg_set_optimum_mode_check(rmi4_data->vcc_i2c,
RMI4_I2C_LOAD_UA);
if (retval < 0) {
dev_err(&rmi4_data->i2c_client->dev,
"Regulator vcc_i2c set_opt failed rc=%d\n",
retval);
goto error_reg_opt_i2c;
}
retval = regulator_enable(rmi4_data->vcc_i2c);
if (retval) {
dev_err(&rmi4_data->i2c_client->dev,
"Regulator vcc_i2c enable failed rc=%d\n",
retval);
goto error_reg_en_vcc_i2c;
}
}
return 0;
error_reg_en_vcc_i2c:
if (rmi4_data->board->i2c_pull_up)
reg_set_optimum_mode_check(rmi4_data->vcc_i2c, 0);
error_reg_opt_i2c:
regulator_disable(rmi4_data->vdd);
error_reg_en_vdd:
reg_set_optimum_mode_check(rmi4_data->vdd, 0);
return retval;
power_off:
reg_set_optimum_mode_check(rmi4_data->vdd, 0);
regulator_disable(rmi4_data->vdd);
if (rmi4_data->board->i2c_pull_up) {
reg_set_optimum_mode_check(rmi4_data->vcc_i2c, 0);
regulator_disable(rmi4_data->vcc_i2c);
}
return 0;
}
static int synaptics_rmi4_gpio_configure(struct synaptics_rmi4_data *rmi4_data,
bool on)
{
int retval = 0;
if (on) {
if (gpio_is_valid(rmi4_data->board->irq_gpio)) {
/* configure touchscreen irq gpio */
retval = gpio_request(rmi4_data->board->irq_gpio,
"rmi4_irq_gpio");
if (retval) {
dev_err(&rmi4_data->i2c_client->dev,
"unable to request gpio [%d]\n",
rmi4_data->board->irq_gpio);
goto err_irq_gpio_req;
}
retval = gpio_direction_input(rmi4_data->board->\
irq_gpio);
if (retval) {
dev_err(&rmi4_data->i2c_client->dev,
"unable to set direction for gpio " \
"[%d]\n", rmi4_data->board->irq_gpio);
goto err_irq_gpio_dir;
}
} else {
dev_err(&rmi4_data->i2c_client->dev,
"irq gpio not provided\n");
goto err_irq_gpio_req;
}
if (gpio_is_valid(rmi4_data->board->reset_gpio)) {
/* configure touchscreen reset out gpio */
retval = gpio_request(rmi4_data->board->reset_gpio,
"rmi4_reset_gpio");
if (retval) {
dev_err(&rmi4_data->i2c_client->dev,
"unable to request gpio [%d]\n",
rmi4_data->board->reset_gpio);
goto err_irq_gpio_dir;
}
retval = gpio_direction_output(rmi4_data->board->\
reset_gpio, 1);
if (retval) {
dev_err(&rmi4_data->i2c_client->dev,
"unable to set direction for gpio " \
"[%d]\n", rmi4_data->board->reset_gpio);
goto err_reset_gpio_dir;
}
gpio_set_value(rmi4_data->board->reset_gpio, 1);
msleep(rmi4_data->board->reset_delay);
} else
synaptics_rmi4_reset_command(rmi4_data);
return 0;
} else {
if (rmi4_data->board->disable_gpios) {
if (gpio_is_valid(rmi4_data->board->irq_gpio))
gpio_free(rmi4_data->board->irq_gpio);
if (gpio_is_valid(rmi4_data->board->reset_gpio)) {
/*
* This is intended to save leakage current
* only. Even if the call(gpio_direction_input)
* fails, only leakage current will be more but
* functionality will not be affected.
*/
retval = gpio_direction_input(rmi4_data->
board->reset_gpio);
if (retval) {
dev_err(&rmi4_data->i2c_client->dev,
"unable to set direction for gpio "
"[%d]\n", rmi4_data->board->irq_gpio);
}
gpio_free(rmi4_data->board->reset_gpio);
}
}
return 0;
}
err_reset_gpio_dir:
if (gpio_is_valid(rmi4_data->board->reset_gpio))
gpio_free(rmi4_data->board->reset_gpio);
err_irq_gpio_dir:
if (gpio_is_valid(rmi4_data->board->irq_gpio))
gpio_free(rmi4_data->board->irq_gpio);
err_irq_gpio_req:
return retval;
}
/**
* synaptics_rmi4_probe()
*
* Called by the kernel when an association with an I2C device of the
* same name is made (after doing i2c_add_driver).
*
* This funtion allocates and initializes the resources for the driver
* as an input driver, turns on the power to the sensor, queries the
* sensor for its supported Functions and characteristics, registers
* the driver to the input subsystem, sets up the interrupt, handles
* the registration of the early_suspend and late_resume functions,
* and creates a work queue for detection of other expansion Function
* modules.
*/
static int __devinit synaptics_rmi4_probe(struct i2c_client *client,
const struct i2c_device_id *dev_id)
{
int retval = 0;
unsigned char ii;
unsigned char attr_count;
struct synaptics_rmi4_f1a_handle *f1a;
struct synaptics_rmi4_fn *fhandler;
struct synaptics_rmi4_fn *next_fhandler;
struct synaptics_rmi4_data *rmi4_data;
struct synaptics_rmi4_device_info *rmi;
struct synaptics_rmi4_platform_data *platform_data =
client->dev.platform_data;
struct dentry *temp;
if (!i2c_check_functionality(client->adapter,
I2C_FUNC_SMBUS_BYTE_DATA)) {
dev_err(&client->dev,
"%s: SMBus byte data not supported\n",
__func__);
return -EIO;
}
if (client->dev.of_node) {
platform_data = devm_kzalloc(&client->dev,
sizeof(*platform_data),
GFP_KERNEL);
if (!platform_data) {
dev_err(&client->dev, "Failed to allocate memory\n");
return -ENOMEM;
}
retval = synaptics_rmi4_parse_dt(&client->dev, platform_data);
if (retval)
return retval;
} else {
platform_data = client->dev.platform_data;
}
if (!platform_data) {
dev_err(&client->dev,
"%s: No platform data found\n",
__func__);
return -EINVAL;
}
rmi4_data = kzalloc(sizeof(*rmi4_data) * 2, GFP_KERNEL);
if (!rmi4_data) {
dev_err(&client->dev,
"%s: Failed to alloc mem for rmi4_data\n",
__func__);
return -ENOMEM;
}
rmi = &(rmi4_data->rmi4_mod_info);
rmi4_data->input_dev = input_allocate_device();
if (rmi4_data->input_dev == NULL) {
dev_err(&client->dev,
"%s: Failed to allocate input device\n",
__func__);
retval = -ENOMEM;
goto err_input_device;
}
rmi4_data->i2c_client = client;
rmi4_data->current_page = MASK_8BIT;
rmi4_data->board = platform_data;
rmi4_data->touch_stopped = false;
rmi4_data->sensor_sleep = false;
rmi4_data->irq_enabled = false;
rmi4_data->fw_updating = false;
rmi4_data->suspended = false;
rmi4_data->i2c_read = synaptics_rmi4_i2c_read;
rmi4_data->i2c_write = synaptics_rmi4_i2c_write;
rmi4_data->irq_enable = synaptics_rmi4_irq_enable;
rmi4_data->reset_device = synaptics_rmi4_reset_device;
rmi4_data->flip_x = rmi4_data->board->x_flip;
rmi4_data->flip_y = rmi4_data->board->y_flip;
if (rmi4_data->board->fw_image_name)
snprintf(rmi4_data->fw_image_name, NAME_BUFFER_SIZE, "%s",
rmi4_data->board->fw_image_name);
rmi4_data->input_dev->name = DRIVER_NAME;
rmi4_data->input_dev->phys = INPUT_PHYS_NAME;
rmi4_data->input_dev->id.bustype = BUS_I2C;
rmi4_data->input_dev->id.product = SYNAPTICS_DSX_DRIVER_PRODUCT;
rmi4_data->input_dev->id.version = SYNAPTICS_DSX_DRIVER_VERSION;
rmi4_data->input_dev->dev.parent = &client->dev;
input_set_drvdata(rmi4_data->input_dev, rmi4_data);
set_bit(EV_SYN, rmi4_data->input_dev->evbit);
set_bit(EV_KEY, rmi4_data->input_dev->evbit);
set_bit(EV_ABS, rmi4_data->input_dev->evbit);
set_bit(BTN_TOUCH, rmi4_data->input_dev->keybit);
set_bit(BTN_TOOL_FINGER, rmi4_data->input_dev->keybit);
#ifdef INPUT_PROP_DIRECT
set_bit(INPUT_PROP_DIRECT, rmi4_data->input_dev->propbit);
#endif
retval = synaptics_rmi4_regulator_configure(rmi4_data, true);
if (retval < 0) {
dev_err(&client->dev, "Failed to configure regulators\n");
goto err_reg_configure;
}
retval = synaptics_rmi4_power_on(rmi4_data, true);
if (retval < 0) {
dev_err(&client->dev, "Failed to power on\n");
goto err_power_device;
}
retval = synaptics_rmi4_gpio_configure(rmi4_data, true);
if (retval < 0) {
dev_err(&client->dev, "Failed to configure gpios\n");
goto err_gpio_config;
}
init_waitqueue_head(&rmi4_data->wait);
mutex_init(&(rmi4_data->rmi4_io_ctrl_mutex));
INIT_LIST_HEAD(&rmi->support_fn_list);
mutex_init(&rmi->support_fn_list_mutex);
retval = synaptics_rmi4_query_device(rmi4_data);
if (retval < 0) {
dev_err(&client->dev,
"%s: Failed to query device\n",
__func__);
goto err_free_gpios;
}
if (rmi4_data->board->disp_maxx)
rmi4_data->disp_maxx = rmi4_data->board->disp_maxx;
else
rmi4_data->disp_maxx = rmi4_data->sensor_max_x;
if (rmi4_data->board->disp_maxy)
rmi4_data->disp_maxy = rmi4_data->board->disp_maxy;
else
rmi4_data->disp_maxy = rmi4_data->sensor_max_y;
if (rmi4_data->board->disp_minx)
rmi4_data->disp_minx = rmi4_data->board->disp_minx;
else
rmi4_data->disp_minx = 0;
if (rmi4_data->board->disp_miny)
rmi4_data->disp_miny = rmi4_data->board->disp_miny;
else
rmi4_data->disp_miny = 0;
input_set_abs_params(rmi4_data->input_dev,
ABS_MT_POSITION_X, rmi4_data->disp_minx,
rmi4_data->disp_maxx, 0, 0);
input_set_abs_params(rmi4_data->input_dev,
ABS_MT_POSITION_Y, rmi4_data->disp_miny,
rmi4_data->disp_maxy, 0, 0);
input_set_abs_params(rmi4_data->input_dev,
ABS_PRESSURE, 0, 255, 0, 0);
#ifdef REPORT_2D_W
input_set_abs_params(rmi4_data->input_dev,
ABS_MT_TOUCH_MAJOR, 0,
rmi4_data->max_touch_width, 0, 0);
input_set_abs_params(rmi4_data->input_dev,
ABS_MT_TOUCH_MINOR, 0,
rmi4_data->max_touch_width, 0, 0);
#endif
#ifdef TYPE_B_PROTOCOL
input_mt_init_slots(rmi4_data->input_dev,
rmi4_data->num_of_fingers);
#endif
i2c_set_clientdata(client, rmi4_data);
f1a = NULL;
mutex_lock(&rmi->support_fn_list_mutex);
if (!list_empty(&rmi->support_fn_list)) {
list_for_each_entry(fhandler, &rmi->support_fn_list, link) {
if (fhandler->fn_number == SYNAPTICS_RMI4_F1A)
f1a = fhandler->data;
}
}
mutex_unlock(&rmi->support_fn_list_mutex);
if (f1a) {
for (ii = 0; ii < f1a->valid_button_count; ii++) {
set_bit(f1a->button_map[ii],
rmi4_data->input_dev->keybit);
input_set_capability(rmi4_data->input_dev,
EV_KEY, f1a->button_map[ii]);
}
}
retval = input_register_device(rmi4_data->input_dev);
if (retval) {
dev_err(&client->dev,
"%s: Failed to register input device\n",
__func__);
goto err_register_input;
}
configure_sleep(rmi4_data);
if (!exp_fn_inited) {
mutex_init(&exp_fn_list_mutex);
INIT_LIST_HEAD(&exp_fn_list);
exp_fn_inited = 1;
}
rmi4_data->det_workqueue =
create_singlethread_workqueue("rmi_det_workqueue");
INIT_DELAYED_WORK(&rmi4_data->det_work,
synaptics_rmi4_detection_work);
queue_delayed_work(rmi4_data->det_workqueue,
&rmi4_data->det_work,
msecs_to_jiffies(EXP_FN_DET_INTERVAL));
rmi4_data->irq = gpio_to_irq(platform_data->irq_gpio);
retval = request_threaded_irq(rmi4_data->irq, NULL,
synaptics_rmi4_irq, platform_data->irq_flags,
DRIVER_NAME, rmi4_data);
rmi4_data->irq_enabled = true;
if (retval < 0) {
dev_err(&client->dev,
"%s: Failed to create irq thread\n",
__func__);
goto err_enable_irq;
}
rmi4_data->dir = debugfs_create_dir(DEBUGFS_DIR_NAME, NULL);
if (rmi4_data->dir == NULL || IS_ERR(rmi4_data->dir)) {
dev_err(&client->dev,
"%s: Failed to create debugfs directory, rc = %ld\n",
__func__, PTR_ERR(rmi4_data->dir));
retval = PTR_ERR(rmi4_data->dir);
goto err_create_debugfs_dir;
}
temp = debugfs_create_file("suspend", S_IRUSR | S_IWUSR, rmi4_data->dir,
rmi4_data, &debug_suspend_fops);
if (temp == NULL || IS_ERR(temp)) {
dev_err(&client->dev,
"%s: Failed to create suspend debugfs file, rc = %ld\n",
__func__, PTR_ERR(temp));
retval = PTR_ERR(temp);
goto err_create_debugfs_file;
}
for (attr_count = 0; attr_count < ARRAY_SIZE(attrs); attr_count++) {
retval = sysfs_create_file(&client->dev.kobj,
&attrs[attr_count].attr);
if (retval < 0) {
dev_err(&client->dev,
"%s: Failed to create sysfs attributes\n",
__func__);
goto err_sysfs;
}
}
synaptics_rmi4_sensor_wake(rmi4_data);
retval = synaptics_rmi4_irq_enable(rmi4_data, true);
if (retval < 0) {
dev_err(&client->dev,
"%s: Failed to enable attention interrupt\n",
__func__);
goto err_sysfs;
}
retval = synaptics_rmi4_check_configuration(rmi4_data);
if (retval < 0) {
dev_err(&client->dev, "Failed to check configuration\n");
return retval;
}
return retval;
err_sysfs:
for (attr_count--; attr_count >= 0; attr_count--) {
sysfs_remove_file(&rmi4_data->input_dev->dev.kobj,
&attrs[attr_count].attr);
}
err_create_debugfs_file:
debugfs_remove_recursive(rmi4_data->dir);
err_create_debugfs_dir:
free_irq(rmi4_data->irq, rmi4_data);
err_enable_irq:
cancel_delayed_work_sync(&rmi4_data->det_work);
flush_workqueue(rmi4_data->det_workqueue);
destroy_workqueue(rmi4_data->det_workqueue);
input_unregister_device(rmi4_data->input_dev);
err_register_input:
mutex_lock(&rmi->support_fn_list_mutex);
if (!list_empty(&rmi->support_fn_list)) {
list_for_each_entry_safe(fhandler, next_fhandler,
&rmi->support_fn_list, link) {
if (fhandler->fn_number == SYNAPTICS_RMI4_F1A)
synaptics_rmi4_f1a_kfree(fhandler);
else {
kfree(fhandler->data);
kfree(fhandler->extra);
}
kfree(fhandler);
}
}
mutex_unlock(&rmi->support_fn_list_mutex);
err_free_gpios:
if (gpio_is_valid(rmi4_data->board->reset_gpio))
gpio_free(rmi4_data->board->reset_gpio);
if (gpio_is_valid(rmi4_data->board->irq_gpio))
gpio_free(rmi4_data->board->irq_gpio);
err_gpio_config:
synaptics_rmi4_power_on(rmi4_data, false);
err_power_device:
synaptics_rmi4_regulator_configure(rmi4_data, false);
err_reg_configure:
input_free_device(rmi4_data->input_dev);
rmi4_data->input_dev = NULL;
err_input_device:
kfree(rmi4_data);
return retval;
}
/**
* synaptics_rmi4_remove()
*
* Called by the kernel when the association with an I2C device of the
* same name is broken (when the driver is unloaded).
*
* This funtion terminates the work queue, stops sensor data acquisition,
* frees the interrupt, unregisters the driver from the input subsystem,
* turns off the power to the sensor, and frees other allocated resources.
*/
static int __devexit synaptics_rmi4_remove(struct i2c_client *client)
{
unsigned char attr_count;
struct synaptics_rmi4_fn *fhandler;
struct synaptics_rmi4_fn *next_fhandler;
struct synaptics_rmi4_data *rmi4_data = i2c_get_clientdata(client);
struct synaptics_rmi4_device_info *rmi;
rmi = &(rmi4_data->rmi4_mod_info);
debugfs_remove_recursive(rmi4_data->dir);
cancel_delayed_work_sync(&rmi4_data->det_work);
flush_workqueue(rmi4_data->det_workqueue);
destroy_workqueue(rmi4_data->det_workqueue);
rmi4_data->touch_stopped = true;
wake_up(&rmi4_data->wait);
free_irq(rmi4_data->irq, rmi4_data);
for (attr_count = 0; attr_count < ARRAY_SIZE(attrs); attr_count++) {
sysfs_remove_file(&rmi4_data->input_dev->dev.kobj,
&attrs[attr_count].attr);
}
input_unregister_device(rmi4_data->input_dev);
mutex_lock(&rmi->support_fn_list_mutex);
if (!list_empty(&rmi->support_fn_list)) {
list_for_each_entry_safe(fhandler, next_fhandler,
&rmi->support_fn_list, link) {
if (fhandler->fn_number == SYNAPTICS_RMI4_F1A)
synaptics_rmi4_f1a_kfree(fhandler);
else {
kfree(fhandler->data);
kfree(fhandler->extra);
}
kfree(fhandler);
}
}
mutex_unlock(&rmi->support_fn_list_mutex);
if (gpio_is_valid(rmi4_data->board->reset_gpio))
gpio_free(rmi4_data->board->reset_gpio);
if (gpio_is_valid(rmi4_data->board->irq_gpio))
gpio_free(rmi4_data->board->irq_gpio);
synaptics_rmi4_power_on(rmi4_data, false);
synaptics_rmi4_regulator_configure(rmi4_data, false);
kfree(rmi4_data);
return 0;
}
#ifdef CONFIG_PM
/**
* synaptics_rmi4_sensor_sleep()
*
* Called by synaptics_rmi4_early_suspend() and synaptics_rmi4_suspend().
*
* This function stops finger data acquisition and puts the sensor to sleep.
*/
static void synaptics_rmi4_sensor_sleep(struct synaptics_rmi4_data *rmi4_data)
{
int retval;
struct synaptics_rmi4_f01_device_control_0 device_ctrl;
retval = synaptics_rmi4_i2c_read(rmi4_data,
rmi4_data->f01_ctrl_base_addr,
device_ctrl.data,
sizeof(device_ctrl.data));
if (retval < 0) {
dev_err(&(rmi4_data->input_dev->dev),
"%s: Failed to enter sleep mode\n",
__func__);
rmi4_data->sensor_sleep = false;
return;
}
device_ctrl.sleep_mode = SENSOR_SLEEP;
device_ctrl.nosleep = NO_SLEEP_OFF;
retval = synaptics_rmi4_i2c_write(rmi4_data,
rmi4_data->f01_ctrl_base_addr,
device_ctrl.data,
sizeof(device_ctrl.data));
if (retval < 0) {
dev_err(&(rmi4_data->input_dev->dev),
"%s: Failed to enter sleep mode\n",
__func__);
rmi4_data->sensor_sleep = false;
return;
} else {
rmi4_data->sensor_sleep = true;
}
return;
}
/**
* synaptics_rmi4_sensor_wake()
*
* Called by synaptics_rmi4_resume() and synaptics_rmi4_late_resume().
*
* This function wakes the sensor from sleep.
*/
static void synaptics_rmi4_sensor_wake(struct synaptics_rmi4_data *rmi4_data)
{
int retval;
struct synaptics_rmi4_f01_device_control_0 device_ctrl;
retval = synaptics_rmi4_i2c_read(rmi4_data,
rmi4_data->f01_ctrl_base_addr,
device_ctrl.data,
sizeof(device_ctrl.data));
if (retval < 0) {
dev_err(&(rmi4_data->input_dev->dev),
"%s: Failed to wake from sleep mode\n",
__func__);
rmi4_data->sensor_sleep = true;
return;
}
if (device_ctrl.nosleep == NO_SLEEP_OFF &&
device_ctrl.sleep_mode == NORMAL_OPERATION) {
rmi4_data->sensor_sleep = false;
return;
}
device_ctrl.sleep_mode = NORMAL_OPERATION;
device_ctrl.nosleep = NO_SLEEP_OFF;
retval = synaptics_rmi4_i2c_write(rmi4_data,
rmi4_data->f01_ctrl_base_addr,
device_ctrl.data,
sizeof(device_ctrl.data));
if (retval < 0) {
dev_err(&(rmi4_data->input_dev->dev),
"%s: Failed to wake from sleep mode\n",
__func__);
rmi4_data->sensor_sleep = true;
return;
} else {
rmi4_data->sensor_sleep = false;
}
return;
}
#if defined(CONFIG_FB)
static int fb_notifier_callback(struct notifier_block *self,
unsigned long event, void *data)
{
struct fb_event *evdata = data;
int *blank;
struct synaptics_rmi4_data *rmi4_data =
container_of(self, struct synaptics_rmi4_data, fb_notif);
if (evdata && evdata->data && event == FB_EVENT_BLANK &&
rmi4_data && rmi4_data->i2c_client) {
blank = evdata->data;
if (*blank == FB_BLANK_UNBLANK)
synaptics_rmi4_resume(&(rmi4_data->input_dev->dev));
else if (*blank == FB_BLANK_POWERDOWN)
synaptics_rmi4_suspend(&(rmi4_data->input_dev->dev));
}
return 0;
}
#elif defined(CONFIG_HAS_EARLYSUSPEND)
/**
* synaptics_rmi4_early_suspend()
*
* Called by the kernel during the early suspend phase when the system
* enters suspend.
*
* This function calls synaptics_rmi4_sensor_sleep() to stop finger
* data acquisition and put the sensor to sleep.
*/
static void synaptics_rmi4_early_suspend(struct early_suspend *h)
{
struct synaptics_rmi4_data *rmi4_data =
container_of(h, struct synaptics_rmi4_data,
early_suspend);
if (rmi4_data->stay_awake)
rmi4_data->staying_awake = true;
else
rmi4_data->staying_awake = false;
rmi4_data->touch_stopped = true;
wake_up(&rmi4_data->wait);
synaptics_rmi4_irq_enable(rmi4_data, false);
synaptics_rmi4_sensor_sleep(rmi4_data);
if (rmi4_data->full_pm_cycle)
synaptics_rmi4_suspend(&(rmi4_data->input_dev->dev));
return;
}
/**
* synaptics_rmi4_late_resume()
*
* Called by the kernel during the late resume phase when the system
* wakes up from suspend.
*
* This function goes through the sensor wake process if the system wakes
* up from early suspend (without going into suspend).
*/
static void synaptics_rmi4_late_resume(struct early_suspend *h)
{
struct synaptics_rmi4_data *rmi4_data =
container_of(h, struct synaptics_rmi4_data,
early_suspend);
if (rmi4_data->staying_awake)
return;
if (rmi4_data->full_pm_cycle)
synaptics_rmi4_resume(&(rmi4_data->input_dev->dev));
if (rmi4_data->sensor_sleep == true) {
synaptics_rmi4_sensor_wake(rmi4_data);
rmi4_data->touch_stopped = false;
synaptics_rmi4_irq_enable(rmi4_data, true);
}
return;
}
#endif
static int synaptics_rmi4_regulator_lpm(struct synaptics_rmi4_data *rmi4_data,
bool on)
{
int retval;
int load_ua;
if (on == false)
goto regulator_hpm;
if (rmi4_data->board->i2c_pull_up) {
load_ua = rmi4_data->board->power_down_enable ?
0 : RMI4_I2C_LPM_LOAD_UA;
retval = reg_set_optimum_mode_check(rmi4_data->vcc_i2c,
load_ua);
if (retval < 0) {
dev_err(&rmi4_data->i2c_client->dev,
"Regulator vcc_i2c set_opt failed " \
"rc=%d\n", retval);
goto fail_regulator_lpm;
}
if (rmi4_data->board->power_down_enable) {
retval = regulator_disable(rmi4_data->vcc_i2c);
if (retval) {
dev_err(&rmi4_data->i2c_client->dev,
"Regulator vcc_i2c disable failed " \
"rc=%d\n", retval);
goto fail_regulator_lpm;
}
}
}
load_ua = rmi4_data->board->power_down_enable ? 0 : RMI4_LPM_LOAD_UA;
retval = reg_set_optimum_mode_check(rmi4_data->vdd, load_ua);
if (retval < 0) {
dev_err(&rmi4_data->i2c_client->dev,
"Regulator vdd_ana set_opt failed rc=%d\n",
retval);
goto fail_regulator_lpm;
}
if (rmi4_data->board->power_down_enable) {
retval = regulator_disable(rmi4_data->vdd);
if (retval) {
dev_err(&rmi4_data->i2c_client->dev,
"Regulator vdd disable failed rc=%d\n",
retval);
goto fail_regulator_lpm;
}
}
return 0;
regulator_hpm:
retval = reg_set_optimum_mode_check(rmi4_data->vdd,
RMI4_ACTIVE_LOAD_UA);
if (retval < 0) {
dev_err(&rmi4_data->i2c_client->dev,
"Regulator vcc_ana set_opt failed rc=%d\n",
retval);
goto fail_regulator_hpm;
}
if (rmi4_data->board->power_down_enable) {
retval = regulator_enable(rmi4_data->vdd);
if (retval) {
dev_err(&rmi4_data->i2c_client->dev,
"Regulator vdd enable failed rc=%d\n",
retval);
goto fail_regulator_hpm;
}
}
if (rmi4_data->board->i2c_pull_up) {
retval = reg_set_optimum_mode_check(rmi4_data->vcc_i2c,
RMI4_I2C_LOAD_UA);
if (retval < 0) {
dev_err(&rmi4_data->i2c_client->dev,
"Regulator vcc_i2c set_opt failed rc=%d\n",
retval);
goto fail_regulator_hpm;
}
if (rmi4_data->board->power_down_enable) {
retval = regulator_enable(rmi4_data->vcc_i2c);
if (retval) {
dev_err(&rmi4_data->i2c_client->dev,
"Regulator vcc_i2c enable failed " \
"rc=%d\n", retval);
goto fail_regulator_hpm;
}
}
}
return 0;
fail_regulator_lpm:
reg_set_optimum_mode_check(rmi4_data->vdd, RMI4_ACTIVE_LOAD_UA);
if (rmi4_data->board->i2c_pull_up)
reg_set_optimum_mode_check(rmi4_data->vcc_i2c,
RMI4_I2C_LOAD_UA);
return retval;
fail_regulator_hpm:
load_ua = rmi4_data->board->power_down_enable ? 0 : RMI4_LPM_LOAD_UA;
reg_set_optimum_mode_check(rmi4_data->vdd, load_ua);
if (rmi4_data->board->i2c_pull_up) {
load_ua = rmi4_data->board->power_down_enable ?
0 : RMI4_I2C_LPM_LOAD_UA;
reg_set_optimum_mode_check(rmi4_data->vcc_i2c, load_ua);
}
return retval;
}
static int synaptics_rmi4_check_configuration(struct synaptics_rmi4_data
*rmi4_data)
{
int retval;
struct synaptics_rmi4_f01_device_control_0 device_control;
struct synaptics_rmi4_f01_device_status device_status;
retval = synaptics_rmi4_i2c_read(rmi4_data,
rmi4_data->f01_data_base_addr,
device_status.data,
sizeof(device_status.data));
if (retval < 0) {
dev_err(&rmi4_data->i2c_client->dev,
"Failed to read device status, rc=%d\n", retval);
return retval;
}
if (device_status.unconfigured) {
retval = synaptics_rmi4_query_device(rmi4_data);
if (retval < 0) {
dev_err(&rmi4_data->i2c_client->dev,
"Failed to query device, rc=%d\n", retval);
return retval;
}
retval = synaptics_rmi4_i2c_read(rmi4_data,
rmi4_data->f01_ctrl_base_addr,
device_control.data,
sizeof(device_control.data));
if (retval < 0)
return retval;
device_control.configured = DEVICE_CONFIGURED;
retval = synaptics_rmi4_i2c_write(rmi4_data,
rmi4_data->f01_ctrl_base_addr,
device_control.data,
sizeof(device_control.data));
if (retval < 0)
return retval;
}
return 0;
}
/**
* synaptics_rmi4_suspend()
*
* Called by the kernel during the suspend phase when the system
* enters suspend.
*
* This function stops finger data acquisition and puts the sensor to
* sleep (if not already done so during the early suspend phase),
* disables the interrupt, and turns off the power to the sensor.
*/
static int synaptics_rmi4_suspend(struct device *dev)
{
struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
int retval;
if (rmi4_data->stay_awake) {
rmi4_data->staying_awake = true;
return 0;
} else
rmi4_data->staying_awake = false;
if (rmi4_data->suspended) {
dev_info(dev, "Already in suspend state\n");
return 0;
}
if (!rmi4_data->fw_updating) {
if (!rmi4_data->sensor_sleep) {
rmi4_data->touch_stopped = true;
wake_up(&rmi4_data->wait);
synaptics_rmi4_irq_enable(rmi4_data, false);
synaptics_rmi4_sensor_sleep(rmi4_data);
}
synaptics_rmi4_release_all(rmi4_data);
retval = synaptics_rmi4_regulator_lpm(rmi4_data, true);
if (retval < 0) {
dev_err(dev, "failed to enter low power mode\n");
return retval;
}
} else {
dev_err(dev,
"Firmware updating, cannot go into suspend mode\n");
return 0;
}
if (rmi4_data->board->disable_gpios) {
retval = synaptics_rmi4_gpio_configure(rmi4_data, false);
if (retval < 0) {
dev_err(dev, "failed to put gpios in suspend state\n");
return retval;
}
}
rmi4_data->suspended = true;
return 0;
}
/**
* synaptics_rmi4_resume()
*
* Called by the kernel during the resume phase when the system
* wakes up from suspend.
*
* This function turns on the power to the sensor, wakes the sensor
* from sleep, enables the interrupt, and starts finger data
* acquisition.
*/
static int synaptics_rmi4_resume(struct device *dev)
{
struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
int retval;
if (rmi4_data->staying_awake)
return 0;
if (!rmi4_data->suspended) {
dev_info(dev, "Already in awake state\n");
return 0;
}
retval = synaptics_rmi4_regulator_lpm(rmi4_data, false);
if (retval < 0) {
dev_err(dev, "Failed to enter active power mode\n");
return retval;
}
if (rmi4_data->board->disable_gpios) {
retval = synaptics_rmi4_gpio_configure(rmi4_data, true);
if (retval < 0) {
dev_err(dev, "Failed to put gpios in active state\n");
return retval;
}
}
synaptics_rmi4_sensor_wake(rmi4_data);
rmi4_data->touch_stopped = false;
synaptics_rmi4_irq_enable(rmi4_data, true);
retval = synaptics_rmi4_check_configuration(rmi4_data);
if (retval < 0) {
dev_err(dev, "Failed to check configuration\n");
return retval;
}
rmi4_data->suspended = false;
return 0;
}
#if (!defined(CONFIG_FB) && !defined(CONFIG_HAS_EARLYSUSPEND))
static const struct dev_pm_ops synaptics_rmi4_dev_pm_ops = {
.suspend = synaptics_rmi4_suspend,
.resume = synaptics_rmi4_resume,
};
#else
static const struct dev_pm_ops synaptics_rmi4_dev_pm_ops = {
};
#endif
#else
static void synaptics_rmi4_sensor_wake(struct synaptics_rmi4_data *rmi4_data)
{
return;
};
static void synaptics_rmi4_sensor_sleep(struct synaptics_rmi4_data *rmi4_data)
{
return;
};
static int synaptics_rmi4_check_configuration(struct synaptics_rmi4_data
*rmi4_data)
{
return 0;
};
#endif
static const struct i2c_device_id synaptics_rmi4_id_table[] = {
{DRIVER_NAME, 0},
{},
};
MODULE_DEVICE_TABLE(i2c, synaptics_rmi4_id_table);
#ifdef CONFIG_OF
static struct of_device_id rmi4_match_table[] = {
{ .compatible = "synaptics,rmi4",},
{ },
};
#else
#define rmi4_match_table NULL
#endif
static struct i2c_driver synaptics_rmi4_driver = {
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
.of_match_table = rmi4_match_table,
#ifdef CONFIG_PM
.pm = &synaptics_rmi4_dev_pm_ops,
#endif
},
.probe = synaptics_rmi4_probe,
.remove = __devexit_p(synaptics_rmi4_remove),
.id_table = synaptics_rmi4_id_table,
};
/**
* synaptics_rmi4_init()
*
* Called by the kernel during do_initcalls (if built-in)
* or when the driver is loaded (if a module).
*
* This function registers the driver to the I2C subsystem.
*
*/
static int __init synaptics_rmi4_init(void)
{
return i2c_add_driver(&synaptics_rmi4_driver);
}
/**
* synaptics_rmi4_exit()
*
* Called by the kernel when the driver is unloaded.
*
* This funtion unregisters the driver from the I2C subsystem.
*
*/
static void __exit synaptics_rmi4_exit(void)
{
i2c_del_driver(&synaptics_rmi4_driver);
}
module_init(synaptics_rmi4_init);
module_exit(synaptics_rmi4_exit);
MODULE_AUTHOR("Synaptics, Inc.");
MODULE_DESCRIPTION("Synaptics RMI4 I2C Touch Driver");
MODULE_LICENSE("GPL v2");
| {
"pile_set_name": "Github"
} |
# Needed for binfmt_misc service: "Arbitrary Executable File Formats File System"
CONFIG_BINFMT_MISC=y
| {
"pile_set_name": "Github"
} |
# Configure paths for libopus
# Gregory Maxwell <[email protected]> 08-30-2012
# Shamelessly stolen from Jack Moffitt (libogg) who
# Shamelessly stole from Owen Taylor and Manish Singh
dnl XIPH_PATH_OPUS([ACTION-IF-FOUND [, ACTION-IF-NOT-FOUND]])
dnl Test for libopus, and define OPUS_CFLAGS and OPUS_LIBS
dnl
AC_DEFUN([XIPH_PATH_OPUS],
[dnl
dnl Get the cflags and libraries
dnl
AC_ARG_WITH(opus,AC_HELP_STRING([--with-opus=PFX],[Prefix where opus is installed (optional)]), opus_prefix="$withval", opus_prefix="")
AC_ARG_WITH(opus-libraries,AC_HELP_STRING([--with-opus-libraries=DIR],[Directory where the opus library is installed (optional)]), opus_libraries="$withval", opus_libraries="")
AC_ARG_WITH(opus-includes,AC_HELP_STRING([--with-opus-includes=DIR],[Directory where the opus header files are installed (optional)]), opus_includes="$withval", opus_includes="")
AC_ARG_ENABLE(opustest,AC_HELP_STRING([--disable-opustest],[Do not try to compile and run a test opus program]),, enable_opustest=yes)
if test "x$opus_libraries" != "x" ; then
OPUS_LIBS="-L$opus_libraries"
elif test "x$opus_prefix" = "xno" || test "x$opus_prefix" = "xyes" ; then
OPUS_LIBS=""
elif test "x$opus_prefix" != "x" ; then
OPUS_LIBS="-L$opus_prefix/lib"
elif test "x$prefix" != "xNONE" ; then
OPUS_LIBS="-L$prefix/lib"
fi
if test "x$opus_prefix" != "xno" ; then
OPUS_LIBS="$OPUS_LIBS -lopus"
fi
if test "x$opus_includes" != "x" ; then
OPUS_CFLAGS="-I$opus_includes"
elif test "x$opus_prefix" = "xno" || test "x$opus_prefix" = "xyes" ; then
OPUS_CFLAGS=""
elif test "x$opus_prefix" != "x" ; then
OPUS_CFLAGS="-I$opus_prefix/include"
elif test "x$prefix" != "xNONE"; then
OPUS_CFLAGS="-I$prefix/include"
fi
AC_MSG_CHECKING(for Opus)
if test "x$opus_prefix" = "xno" ; then
no_opus="disabled"
enable_opustest="no"
else
no_opus=""
fi
if test "x$enable_opustest" = "xyes" ; then
ac_save_CFLAGS="$CFLAGS"
ac_save_LIBS="$LIBS"
CFLAGS="$CFLAGS $OPUS_CFLAGS"
LIBS="$LIBS $OPUS_LIBS"
dnl
dnl Now check if the installed Opus is sufficiently new.
dnl
rm -f conf.opustest
AC_TRY_RUN([
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <opus.h>
int main ()
{
system("touch conf.opustest");
return 0;
}
],, no_opus=yes,[echo $ac_n "cross compiling; assumed OK... $ac_c"])
CFLAGS="$ac_save_CFLAGS"
LIBS="$ac_save_LIBS"
fi
if test "x$no_opus" = "xdisabled" ; then
AC_MSG_RESULT(no)
ifelse([$2], , :, [$2])
elif test "x$no_opus" = "x" ; then
AC_MSG_RESULT(yes)
ifelse([$1], , :, [$1])
else
AC_MSG_RESULT(no)
if test -f conf.opustest ; then
:
else
echo "*** Could not run Opus test program, checking why..."
CFLAGS="$CFLAGS $OPUS_CFLAGS"
LIBS="$LIBS $OPUS_LIBS"
AC_TRY_LINK([
#include <stdio.h>
#include <opus.h>
], [ return 0; ],
[ echo "*** The test program compiled, but did not run. This usually means"
echo "*** that the run-time linker is not finding Opus or finding the wrong"
echo "*** version of Opus. If it is not finding Opus, you'll need to set your"
echo "*** LD_LIBRARY_PATH environment variable, or edit /etc/ld.so.conf to point"
echo "*** to the installed location Also, make sure you have run ldconfig if that"
echo "*** is required on your system"
echo "***"
echo "*** If you have an old version installed, it is best to remove it, although"
echo "*** you may also be able to get things to work by modifying LD_LIBRARY_PATH"],
[ echo "*** The test program failed to compile or link. See the file config.log for the"
echo "*** exact error that occurred. This usually means Opus was incorrectly installed"
echo "*** or that you have moved Opus since it was installed." ])
CFLAGS="$ac_save_CFLAGS"
LIBS="$ac_save_LIBS"
fi
OPUS_CFLAGS=""
OPUS_LIBS=""
ifelse([$2], , :, [$2])
fi
AC_SUBST(OPUS_CFLAGS)
AC_SUBST(OPUS_LIBS)
rm -f conf.opustest
])
| {
"pile_set_name": "Github"
} |
--TEST--
Test is_subclass_of() function : wrong number of args
--FILE--
<?php
/* Prototype : proto bool is_subclass_of(object object, string class_name)
* Description: Returns true if the object has this class as one of its parents
* Source code: Zend/zend_builtin_functions.c
* Alias to functions:
*/
echo "*** Testing is_subclass_of() : error conditions ***\n";
//Test is_subclass_of with one more than the expected number of arguments
echo "\n-- Testing is_subclass_of() function with more than expected no. of arguments --\n";
$object = new stdclass();
$class_name = 'string_val';
$allow_string = false;
$extra_arg = 10;
var_dump( is_subclass_of($object, $class_name, $allow_string, $extra_arg) );
//Test is_subclass_of with invalid last argument
echo "\n-- Testing is_subclass_of() function with more than typo style invalid 3rd argument --\n";
var_dump( is_subclass_of($object, $class_name, $class_name) );
//Test is_subclass_of with invalid last argument
echo "\n-- Testing is_subclass_of() function with more than invalid 3rd argument --\n";
var_dump( is_subclass_of($object, $class_name, $object) );
// Testing is_subclass_of with one less than the expected number of arguments
echo "\n-- Testing is_subclass_of() function with less than expected no. of arguments --\n";
$object = new stdclass();
var_dump( is_subclass_of($object) );
echo "Done";
?>
--EXPECTF--
*** Testing is_subclass_of() : error conditions ***
-- Testing is_subclass_of() function with more than expected no. of arguments --
Warning: is_subclass_of() expects at most 3 parameters, 4 given in %s on line 17
NULL
-- Testing is_subclass_of() function with more than typo style invalid 3rd argument --
bool(false)
-- Testing is_subclass_of() function with more than invalid 3rd argument --
Warning: is_subclass_of() expects parameter 3 to be boolean, object given in %s on line 26
NULL
-- Testing is_subclass_of() function with less than expected no. of arguments --
Warning: is_subclass_of() expects at least 2 parameters, 1 given in %s on line 31
NULL
Done
| {
"pile_set_name": "Github"
} |
__license__ = 'GPL v3'
__copyright__ = '2009-2010, Darko Miletic <darko.miletic at gmail.com>'
'''
odb.org
'''
from calibre.web.feeds.news import BasicNewsRecipe
import uuid
from lxml import html
class OurDailyBread(BasicNewsRecipe):
title = 'Our Daily Bread'
__author__ = 'Kovid Goyal'
description = "Our Daily Bread is a daily devotional from RBC Ministries which helps readers spend time each day in God's Word."
oldest_article = 15
language = 'en'
max_articles_per_feed = 100
no_stylesheets = True
use_embedded_content = False
encoding = 'utf-8'
conversion_options = {
'comments': description, 'language': language, 'linearize_tables': True
}
keep_only_tags = [dict(attrs={'class': 'calibre-inserted-psalm'}),
{'id': 'content'}]
remove_tags = [
dict(attrs={'class': ['listen-box', 'entry-zoom',
'entry-footer']}),
{'id': 'nav-single'},
dict(attrs={'class': lambda x: x and ' sharing ' in x}),
]
extra_css = '''
.text{font-family:Arial,Helvetica,sans-serif;font-size:x-small;}
.devotionalTitle{font-family:Arial,Helvetica,sans-serif; font-size:large; font-weight: bold;}
.devotionalDate{font-family:Arial,Helvetica,sans-serif; font-size:xx-small;}
.devotionalVerse{font-family:Arial,Helvetica,sans-serif; font-size:xx-small; }
a{color:#000000;font-family:Arial,Helvetica,sans-serif; font-size:x-small;}
'''
feeds = [(u'Our Daily Bread', u'http://odb.org/feed/')]
def preprocess_raw_html(self, raw, url):
# Convert links to referenced Psalms to the actual psalms
root = html.fromstring(raw)
for a in root.xpath(
'//a[starts-with(@href, "http://www.biblegateway.com")]'):
uid = type(u'')(uuid.uuid4())
raw = self.index_to_soup(a.get('href'), raw=True)
iroot = html.fromstring(raw)
matches = iroot.xpath(
'//div[contains(@class, "result-text-style-normal")]')
if matches:
div = matches[0]
div.getparent().remove(div)
root.xpath('//body')[0].append(div)
a.set('href', '#' + uid)
del a.attrib['target']
div.set('id', uid)
div.set('class', 'calibre-inserted-psalm')
hr = div.makeelement('hr')
div.insert(0, hr)
# print html.tostring(div)
raw = html.tostring(root, encoding='unicode')
return raw
def preprocess_html(self, soup):
d = soup.find(id='content')
d.extract()
soup.find('body').insert(0, d)
return soup
| {
"pile_set_name": "Github"
} |
/*
* linux/fs/ext3/dir.c
*
* Copyright (C) 1992, 1993, 1994, 1995
* Remy Card ([email protected])
* Laboratoire MASI - Institut Blaise Pascal
* Universite Pierre et Marie Curie (Paris VI)
*
* from
*
* linux/fs/minix/dir.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
*
* ext3 directory handling functions
*
* Big-endian to little-endian byte-swapping/bitmaps by
* David S. Miller ([email protected]), 1995
*
* Hash Tree Directory indexing (c) 2001 Daniel Phillips
*
*/
#include "ext3.h"
static unsigned char ext3_filetype_table[] = {
DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
};
static int ext3_readdir(struct file *, void *, filldir_t);
static int ext3_dx_readdir(struct file * filp,
void * dirent, filldir_t filldir);
static int ext3_release_dir (struct inode * inode,
struct file * filp);
const struct file_operations ext3_dir_operations = {
.llseek = generic_file_llseek,
.read = generic_read_dir,
.readdir = ext3_readdir, /* we take BKL. needed?*/
.unlocked_ioctl = ext3_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = ext3_compat_ioctl,
#endif
.fsync = ext3_sync_file, /* BKL held */
.release = ext3_release_dir,
};
static unsigned char get_dtype(struct super_block *sb, int filetype)
{
if (!EXT3_HAS_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_FILETYPE) ||
(filetype >= EXT3_FT_MAX))
return DT_UNKNOWN;
return (ext3_filetype_table[filetype]);
}
int ext3_check_dir_entry (const char * function, struct inode * dir,
struct ext3_dir_entry_2 * de,
struct buffer_head * bh,
unsigned long offset)
{
const char * error_msg = NULL;
const int rlen = ext3_rec_len_from_disk(de->rec_len);
if (unlikely(rlen < EXT3_DIR_REC_LEN(1)))
error_msg = "rec_len is smaller than minimal";
else if (unlikely(rlen % 4 != 0))
error_msg = "rec_len % 4 != 0";
else if (unlikely(rlen < EXT3_DIR_REC_LEN(de->name_len)))
error_msg = "rec_len is too small for name_len";
else if (unlikely((((char *) de - bh->b_data) + rlen > dir->i_sb->s_blocksize)))
error_msg = "directory entry across blocks";
else if (unlikely(le32_to_cpu(de->inode) >
le32_to_cpu(EXT3_SB(dir->i_sb)->s_es->s_inodes_count)))
error_msg = "inode out of bounds";
if (unlikely(error_msg != NULL))
ext3_error (dir->i_sb, function,
"bad entry in directory #%lu: %s - "
"offset=%lu, inode=%lu, rec_len=%d, name_len=%d",
dir->i_ino, error_msg, offset,
(unsigned long) le32_to_cpu(de->inode),
rlen, de->name_len);
return error_msg == NULL ? 1 : 0;
}
static int ext3_readdir(struct file * filp,
void * dirent, filldir_t filldir)
{
int error = 0;
unsigned long offset;
int i, stored;
struct ext3_dir_entry_2 *de;
struct super_block *sb;
int err;
struct inode *inode = filp->f_path.dentry->d_inode;
int ret = 0;
int dir_has_error = 0;
sb = inode->i_sb;
if (EXT3_HAS_COMPAT_FEATURE(inode->i_sb,
EXT3_FEATURE_COMPAT_DIR_INDEX) &&
((EXT3_I(inode)->i_flags & EXT3_INDEX_FL) ||
((inode->i_size >> sb->s_blocksize_bits) == 1))) {
err = ext3_dx_readdir(filp, dirent, filldir);
if (err != ERR_BAD_DX_DIR) {
ret = err;
goto out;
}
/*
* We don't set the inode dirty flag since it's not
* critical that it get flushed back to the disk.
*/
EXT3_I(filp->f_path.dentry->d_inode)->i_flags &= ~EXT3_INDEX_FL;
}
stored = 0;
offset = filp->f_pos & (sb->s_blocksize - 1);
while (!error && !stored && filp->f_pos < inode->i_size) {
unsigned long blk = filp->f_pos >> EXT3_BLOCK_SIZE_BITS(sb);
struct buffer_head map_bh;
struct buffer_head *bh = NULL;
map_bh.b_state = 0;
err = ext3_get_blocks_handle(NULL, inode, blk, 1, &map_bh, 0);
if (err > 0) {
pgoff_t index = map_bh.b_blocknr >>
(PAGE_CACHE_SHIFT - inode->i_blkbits);
if (!ra_has_index(&filp->f_ra, index))
page_cache_sync_readahead(
sb->s_bdev->bd_inode->i_mapping,
&filp->f_ra, filp,
index, 1);
filp->f_ra.prev_pos = (loff_t)index << PAGE_CACHE_SHIFT;
bh = ext3_bread(NULL, inode, blk, 0, &err);
}
/*
* We ignore I/O errors on directories so users have a chance
* of recovering data when there's a bad sector
*/
if (!bh) {
if (!dir_has_error) {
ext3_error(sb, __func__, "directory #%lu "
"contains a hole at offset %lld",
inode->i_ino, filp->f_pos);
dir_has_error = 1;
}
/* corrupt size? Maybe no more blocks to read */
if (filp->f_pos > inode->i_blocks << 9)
break;
filp->f_pos += sb->s_blocksize - offset;
continue;
}
revalidate:
/* If the dir block has changed since the last call to
* readdir(2), then we might be pointing to an invalid
* dirent right now. Scan from the start of the block
* to make sure. */
if (filp->f_version != inode->i_version) {
for (i = 0; i < sb->s_blocksize && i < offset; ) {
de = (struct ext3_dir_entry_2 *)
(bh->b_data + i);
/* It's too expensive to do a full
* dirent test each time round this
* loop, but we do have to test at
* least that it is non-zero. A
* failure will be detected in the
* dirent test below. */
if (ext3_rec_len_from_disk(de->rec_len) <
EXT3_DIR_REC_LEN(1))
break;
i += ext3_rec_len_from_disk(de->rec_len);
}
offset = i;
filp->f_pos = (filp->f_pos & ~(sb->s_blocksize - 1))
| offset;
filp->f_version = inode->i_version;
}
while (!error && filp->f_pos < inode->i_size
&& offset < sb->s_blocksize) {
de = (struct ext3_dir_entry_2 *) (bh->b_data + offset);
if (!ext3_check_dir_entry ("ext3_readdir", inode, de,
bh, offset)) {
/* On error, skip the f_pos to the
next block. */
filp->f_pos = (filp->f_pos |
(sb->s_blocksize - 1)) + 1;
brelse (bh);
ret = stored;
goto out;
}
offset += ext3_rec_len_from_disk(de->rec_len);
if (le32_to_cpu(de->inode)) {
/* We might block in the next section
* if the data destination is
* currently swapped out. So, use a
* version stamp to detect whether or
* not the directory has been modified
* during the copy operation.
*/
u64 version = filp->f_version;
error = filldir(dirent, de->name,
de->name_len,
filp->f_pos,
le32_to_cpu(de->inode),
get_dtype(sb, de->file_type));
if (error)
break;
if (version != filp->f_version)
goto revalidate;
stored ++;
}
filp->f_pos += ext3_rec_len_from_disk(de->rec_len);
}
offset = 0;
brelse (bh);
}
out:
return ret;
}
/*
* These functions convert from the major/minor hash to an f_pos
* value.
*
* Currently we only use major hash numer. This is unfortunate, but
* on 32-bit machines, the same VFS interface is used for lseek and
* llseek, so if we use the 64 bit offset, then the 32-bit versions of
* lseek/telldir/seekdir will blow out spectacularly, and from within
* the ext2 low-level routine, we don't know if we're being called by
* a 64-bit version of the system call or the 32-bit version of the
* system call. Worse yet, NFSv2 only allows for a 32-bit readdir
* cookie. Sigh.
*/
#define hash2pos(major, minor) (major >> 1)
#define pos2maj_hash(pos) ((pos << 1) & 0xffffffff)
#define pos2min_hash(pos) (0)
/*
* This structure holds the nodes of the red-black tree used to store
* the directory entry in hash order.
*/
struct fname {
__u32 hash;
__u32 minor_hash;
struct rb_node rb_hash;
struct fname *next;
__u32 inode;
__u8 name_len;
__u8 file_type;
char name[0];
};
/*
* This functoin implements a non-recursive way of freeing all of the
* nodes in the red-black tree.
*/
static void free_rb_tree_fname(struct rb_root *root)
{
struct rb_node *n = root->rb_node;
struct rb_node *parent;
struct fname *fname;
while (n) {
/* Do the node's children first */
if (n->rb_left) {
n = n->rb_left;
continue;
}
if (n->rb_right) {
n = n->rb_right;
continue;
}
/*
* The node has no children; free it, and then zero
* out parent's link to it. Finally go to the
* beginning of the loop and try to free the parent
* node.
*/
parent = rb_parent(n);
fname = rb_entry(n, struct fname, rb_hash);
while (fname) {
struct fname * old = fname;
fname = fname->next;
kfree (old);
}
if (!parent)
*root = RB_ROOT;
else if (parent->rb_left == n)
parent->rb_left = NULL;
else if (parent->rb_right == n)
parent->rb_right = NULL;
n = parent;
}
}
static struct dir_private_info *ext3_htree_create_dir_info(loff_t pos)
{
struct dir_private_info *p;
p = kzalloc(sizeof(struct dir_private_info), GFP_KERNEL);
if (!p)
return NULL;
p->curr_hash = pos2maj_hash(pos);
p->curr_minor_hash = pos2min_hash(pos);
return p;
}
void ext3_htree_free_dir_info(struct dir_private_info *p)
{
free_rb_tree_fname(&p->root);
kfree(p);
}
/*
* Given a directory entry, enter it into the fname rb tree.
*/
int ext3_htree_store_dirent(struct file *dir_file, __u32 hash,
__u32 minor_hash,
struct ext3_dir_entry_2 *dirent)
{
struct rb_node **p, *parent = NULL;
struct fname * fname, *new_fn;
struct dir_private_info *info;
int len;
info = (struct dir_private_info *) dir_file->private_data;
p = &info->root.rb_node;
/* Create and allocate the fname structure */
len = sizeof(struct fname) + dirent->name_len + 1;
new_fn = kzalloc(len, GFP_KERNEL);
if (!new_fn)
return -ENOMEM;
new_fn->hash = hash;
new_fn->minor_hash = minor_hash;
new_fn->inode = le32_to_cpu(dirent->inode);
new_fn->name_len = dirent->name_len;
new_fn->file_type = dirent->file_type;
memcpy(new_fn->name, dirent->name, dirent->name_len);
new_fn->name[dirent->name_len] = 0;
while (*p) {
parent = *p;
fname = rb_entry(parent, struct fname, rb_hash);
/*
* If the hash and minor hash match up, then we put
* them on a linked list. This rarely happens...
*/
if ((new_fn->hash == fname->hash) &&
(new_fn->minor_hash == fname->minor_hash)) {
new_fn->next = fname->next;
fname->next = new_fn;
return 0;
}
if (new_fn->hash < fname->hash)
p = &(*p)->rb_left;
else if (new_fn->hash > fname->hash)
p = &(*p)->rb_right;
else if (new_fn->minor_hash < fname->minor_hash)
p = &(*p)->rb_left;
else /* if (new_fn->minor_hash > fname->minor_hash) */
p = &(*p)->rb_right;
}
rb_link_node(&new_fn->rb_hash, parent, p);
rb_insert_color(&new_fn->rb_hash, &info->root);
return 0;
}
/*
* This is a helper function for ext3_dx_readdir. It calls filldir
* for all entres on the fname linked list. (Normally there is only
* one entry on the linked list, unless there are 62 bit hash collisions.)
*/
static int call_filldir(struct file * filp, void * dirent,
filldir_t filldir, struct fname *fname)
{
struct dir_private_info *info = filp->private_data;
loff_t curr_pos;
struct inode *inode = filp->f_path.dentry->d_inode;
struct super_block * sb;
int error;
sb = inode->i_sb;
if (!fname) {
printk("call_filldir: called with null fname?!?\n");
return 0;
}
curr_pos = hash2pos(fname->hash, fname->minor_hash);
while (fname) {
error = filldir(dirent, fname->name,
fname->name_len, curr_pos,
fname->inode,
get_dtype(sb, fname->file_type));
if (error) {
filp->f_pos = curr_pos;
info->extra_fname = fname;
return error;
}
fname = fname->next;
}
return 0;
}
static int ext3_dx_readdir(struct file * filp,
void * dirent, filldir_t filldir)
{
struct dir_private_info *info = filp->private_data;
struct inode *inode = filp->f_path.dentry->d_inode;
struct fname *fname;
int ret;
if (!info) {
info = ext3_htree_create_dir_info(filp->f_pos);
if (!info)
return -ENOMEM;
filp->private_data = info;
}
if (filp->f_pos == EXT3_HTREE_EOF)
return 0; /* EOF */
/* Some one has messed with f_pos; reset the world */
if (info->last_pos != filp->f_pos) {
free_rb_tree_fname(&info->root);
info->curr_node = NULL;
info->extra_fname = NULL;
info->curr_hash = pos2maj_hash(filp->f_pos);
info->curr_minor_hash = pos2min_hash(filp->f_pos);
}
/*
* If there are any leftover names on the hash collision
* chain, return them first.
*/
if (info->extra_fname) {
if (call_filldir(filp, dirent, filldir, info->extra_fname))
goto finished;
info->extra_fname = NULL;
goto next_node;
} else if (!info->curr_node)
info->curr_node = rb_first(&info->root);
while (1) {
/*
* Fill the rbtree if we have no more entries,
* or the inode has changed since we last read in the
* cached entries.
*/
if ((!info->curr_node) ||
(filp->f_version != inode->i_version)) {
info->curr_node = NULL;
free_rb_tree_fname(&info->root);
filp->f_version = inode->i_version;
ret = ext3_htree_fill_tree(filp, info->curr_hash,
info->curr_minor_hash,
&info->next_hash);
if (ret < 0)
return ret;
if (ret == 0) {
filp->f_pos = EXT3_HTREE_EOF;
break;
}
info->curr_node = rb_first(&info->root);
}
fname = rb_entry(info->curr_node, struct fname, rb_hash);
info->curr_hash = fname->hash;
info->curr_minor_hash = fname->minor_hash;
if (call_filldir(filp, dirent, filldir, fname))
break;
next_node:
info->curr_node = rb_next(info->curr_node);
if (info->curr_node) {
fname = rb_entry(info->curr_node, struct fname,
rb_hash);
info->curr_hash = fname->hash;
info->curr_minor_hash = fname->minor_hash;
} else {
if (info->next_hash == ~0) {
filp->f_pos = EXT3_HTREE_EOF;
break;
}
info->curr_hash = info->next_hash;
info->curr_minor_hash = 0;
}
}
finished:
info->last_pos = filp->f_pos;
return 0;
}
static int ext3_release_dir (struct inode * inode, struct file * filp)
{
if (filp->private_data)
ext3_htree_free_dir_info(filp->private_data);
return 0;
}
| {
"pile_set_name": "Github"
} |
<!--
#%L
ADempiere ERP - Desktop Client
%%
Copyright (C) 2015 metas GmbH
%%
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as
published by the Free Software Foundation, either version 2 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public
License along with this program. If not, see
<http://www.gnu.org/licenses/gpl-2.0.html>.
#L%
-->
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">
<html>
<head>
<!--
$Id: package.html,v 1.1 2006/04/21 17:43:12 jjanke Exp $
Copyright 1999-2002 Jorg Janke, Inc.
40 Old Tannery Rd, Monroe, CT 06468, U.S.A.
All Rights Reserved.
-->
</head>
<body>
<h2>Package org.compiere.apps.search</h2>
Implement Search, Info Viewer, Lookup and Resource Schedule Viewer
<h2>Related Documentation</h2>
For overviews, tutorials, examples, guides, and tool documentation, please see:
<ul>
<li><a href="http://www.adempiere.com/wiki/index.php/ADempiere">Adempiere Wiki</a>
</ul>
<!-- Put @see and @since tags down here. -->
</body>
</html>
| {
"pile_set_name": "Github"
} |
/**
* Copyright (c) 2017-present, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree. An additional grant
* of patent rights can be found in the PATENTS file in the same directory.
*
* @flow strict-local
* @format
*/
import getDisplayName from '../getDisplayName';
test('function declaration name', () => {
expect(getDisplayName(function Foo() {})).toBe('Foo');
});
test('function expression assigned to a variable', () => {
const Foo = function() {};
expect(getDisplayName(Foo)).toBe('Foo');
});
test('anonymous function expression', () => {
// eslint-disable-next-line prefer-arrow-callback
expect(getDisplayName(function() {})).toBe('Unknown');
});
test('anonymous arrow function', () => {
expect(getDisplayName(() => {})).toBe('Unknown');
});
test('function expression with displayName assigned to a variable', () => {
const Foo = function() {};
Foo.displayName = 'Bar';
expect(getDisplayName(Foo)).toBe('Bar');
});
test('function expression with displayName assigned to a variable', () => {
const Foo = function() {};
Foo.displayName = 'Bar';
expect(getDisplayName(Foo)).toBe('Bar');
});
test('arrow function with displayName assigned to a variable', () => {
const Foo = () => {};
Foo.displayName = 'Bar';
expect(getDisplayName(Foo)).toBe('Bar');
});
test('class component', () => {
class Foo {}
expect(getDisplayName(Foo)).toBe('Foo');
});
test('class component with static displayName', () => {
class Foo {
static displayName = 'Bar';
}
expect(getDisplayName(Foo)).toBe('Bar');
});
| {
"pile_set_name": "Github"
} |
/** @file
* IPRT - Build Configuration Information
*/
/*
* Copyright (C) 2009 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
* you can redistribute it and/or modify it under the terms of the GNU
* General Public License (GPL) as published by the Free Software
* Foundation, in version 2 as it comes in the "COPYING" file of the
* VirtualBox OSE distribution. VirtualBox OSE is distributed in the
* hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
*
* The contents of this file may alternatively be used under the terms
* of the Common Development and Distribution License Version 1.0
* (CDDL) only, as it comes in the "COPYING.CDDL" file of the
* VirtualBox OSE distribution, in which case the provisions of the
* CDDL are applicable instead of those of the GPL.
*
* You may elect to license modified versions of this file under the
* terms and conditions of either the GPL or the CDDL or both.
*/
#ifndef ___iprt_buildconfig_h
#define ___iprt_buildconfig_h
#include <iprt/cdefs.h>
#include <iprt/types.h>
RT_C_DECLS_BEGIN
/** @defgroup grp_rt_buildconfig RTBldCfg - Build Configuration Information
* @ingroup grp_rt
* @{
*/
/**
* Gets the source code management revision of the IPRT build.
* @returns Source code management revision number.
*/
RTDECL(uint32_t) RTBldCfgRevision(void);
/**
* Gets the source code management revision of the IPRT build.
* @returns Read only string containing the revision number.
*/
RTDECL(const char *) RTBldCfgRevisionStr(void);
/**
* Gets the product version string.
*
* This will be a string on the form "x.y.z[_string]".
*
* @returns Read only version string.
*
* @remarks This is a build time configuration thing that the product using IPRT
* will set. It is therefore not any IPRT version, but rather the
* version of that product.
*/
RTDECL(const char *) RTBldCfgVersion(void);
/**
* Gets the major product version number.
* @returns Major product version number.
* @remarks See RTBldCfgVersion.
*/
RTDECL(uint32_t) RTBldCfgVersionMajor(void);
/**
* Gets the minor product version number.
* @returns Minor product version number.
* @remarks See RTBldCfgVersion.
*/
RTDECL(uint32_t) RTBldCfgVersionMinor(void);
/**
* Gets the product build number.
* @returns Product build number.
* @remarks See RTBldCfgVersion.
*/
RTDECL(uint32_t) RTBldCfgVersionBuild(void);
/**
* Gets the build target name.
*
* @returns Read only build target string.
*/
RTDECL(const char *) RTBldCfgTarget(void);
/**
* Gets the build target architecture name.
*
* @returns Read only build target architecture string.
*/
RTDECL(const char *) RTBldCfgTargetArch(void);
/**
* Gets the build target-dot-architecture name.
*
* @returns Read only build target-dot-architecture string.
*/
RTDECL(const char *) RTBldCfgTargetDotArch(void);
/**
* Gets the build type name.
*
* @returns Read only build type string.
*/
RTDECL(const char *) RTBldCfgType(void);
/**
* Gets the name of the compiler used for building IPRT.
*
* @returns Read only compiler name.
*/
RTDECL(const char *) RTBldCfgCompiler(void);
/** @} */
RT_C_DECLS_END
#endif
| {
"pile_set_name": "Github"
} |
#ifndef IxOsalOs_H
#define IxOsalOs_H
#ifndef IX_OSAL_CACHED
#error "Uncached memory not supported in linux environment"
#endif
static inline unsigned long __v2p(unsigned long v)
{
if (v < 0x40000000)
return (v & 0xfffffff);
else
return v;
}
#define IX_OSAL_OS_MMU_VIRT_TO_PHYS(addr) __v2p((u32)addr)
#define IX_OSAL_OS_MMU_PHYS_TO_VIRT(addr) (addr)
/*
* Data cache not enabled (hopefully)
*/
#define IX_OSAL_OS_CACHE_INVALIDATE(addr, size)
#define IX_OSAL_OS_CACHE_FLUSH(addr, size)
#define HAL_DCACHE_INVALIDATE(addr, size)
#define HAL_DCACHE_FLUSH(addr, size)
#define __ixp42X /* sr: U-Boot needs this define */
#endif /* IxOsalOs_H */
| {
"pile_set_name": "Github"
} |
<configuration>
<appender name="console" class="ch.qos.logback.core.ConsoleAppender">
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<level>info</level>
</filter>
<encoder>
<pattern>%date{HH:mm:ss} %-5level [%X{akkaSource}] - %msg%n</pattern>
</encoder>
</appender>
<logger name="akka.cluster.pi" level="info" additivity="false">
<appender-ref ref="console"/>
</logger>
<logger name="akka.actor.RepointableActorRef" level="info" additivity="false">
<appender-ref ref="console"/>
</logger>
<root level="info">
<appender-ref ref="console"/>
</root>
</configuration>
| {
"pile_set_name": "Github"
} |
#include "macros.inc"
test_suite s32c1i
test s32c1i_nowrite
movi a2, 1f
movi a3, 1
wsr a3, scompare1
movi a1, 2
s32c1i a1, a2, 0
assert ne, a1, a3
l32i a1, a2, 0
assert eqi, a1, 3
.data
.align 4
1:
.word 3
.text
test_end
test s32c1i_write
movi a2, 1f
movi a3, 3
wsr a3, scompare1
movi a1, 2
s32c1i a1, a2, 0
assert eq, a1, a3
l32i a1, a2, 0
assert eqi, a1, 2
.data
.align 4
1:
.word 3
.text
test_end
test_suite_end
| {
"pile_set_name": "Github"
} |
// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package org.chromium.chrome.browser.signin;
import android.accounts.Account;
import android.os.AsyncTask;
import org.chromium.base.Callback;
import org.chromium.base.Log;
import org.chromium.base.ObserverList;
import org.chromium.base.ThreadUtils;
import org.chromium.base.VisibleForTesting;
import org.chromium.base.annotations.JNINamespace;
import org.chromium.components.signin.AccountManagerHelper;
/**
* Android wrapper of AccountTrackerService which provides access from the java layer.
* It offers the capability of fetching and seeding system accounts into AccountTrackerService in C++
* layer, and notifies observers when it is complete.
*/
@JNINamespace("signin::android")
public class AccountTrackerService {
private static final String TAG = "AccountService";
private static AccountTrackerService sAccountTrackerService;
private SystemAccountsSeedingStatus mSystemAccountsSeedingStatus;
private boolean mSystemAccountsChanged;
private boolean mSyncForceRefreshedForTest;
private enum SystemAccountsSeedingStatus {
SEEDING_NOT_STARTED,
SEEDING_IN_PROGRESS,
SEEDING_DONE,
SEEDING_VALIDATING
}
/**
* Classes that want to listen for system accounts fetching and seeding should implement
* this interface and register with {@link #addSystemAccountsSeededListener}.
*/
public interface OnSystemAccountsSeededListener {
// Called at the end of seedSystemAccounts().
void onSystemAccountsSeedingComplete();
// Called in invalidateAccountSeedStatus() indicating that accounts have changed.
void onSystemAccountsChanged();
}
private final ObserverList<OnSystemAccountsSeededListener> mSystemAccountsSeedingObservers =
new ObserverList<>();
public static AccountTrackerService get() {
ThreadUtils.assertOnUiThread();
if (sAccountTrackerService == null) {
sAccountTrackerService = new AccountTrackerService();
}
return sAccountTrackerService;
}
private AccountTrackerService() {
mSystemAccountsSeedingStatus = SystemAccountsSeedingStatus.SEEDING_NOT_STARTED;
mSystemAccountsChanged = false;
}
/**
* Checks whether the account id <-> email mapping has been seeded into C++ layer.
* If not, it automatically starts fetching the mapping and seeds it.
* @return Whether the accounts have been seeded already.
*/
public boolean checkAndSeedSystemAccounts() {
ThreadUtils.assertOnUiThread();
if (mSystemAccountsSeedingStatus == SystemAccountsSeedingStatus.SEEDING_DONE
&& !mSystemAccountsChanged) {
return true;
}
if ((mSystemAccountsSeedingStatus == SystemAccountsSeedingStatus.SEEDING_NOT_STARTED
|| mSystemAccountsChanged)
&& mSystemAccountsSeedingStatus
!= SystemAccountsSeedingStatus.SEEDING_IN_PROGRESS) {
seedSystemAccounts();
}
return false;
}
/**
* Register an |observer| to observe system accounts seeding status.
*/
public void addSystemAccountsSeededListener(OnSystemAccountsSeededListener observer) {
ThreadUtils.assertOnUiThread();
mSystemAccountsSeedingObservers.addObserver(observer);
if (mSystemAccountsSeedingStatus == SystemAccountsSeedingStatus.SEEDING_DONE) {
observer.onSystemAccountsSeedingComplete();
}
}
/**
* Remove an |observer| from the list of observers.
*/
public void removeSystemAccountsSeededListener(OnSystemAccountsSeededListener observer) {
ThreadUtils.assertOnUiThread();
mSystemAccountsSeedingObservers.removeObserver(observer);
}
private void seedSystemAccounts() {
ThreadUtils.assertOnUiThread();
mSystemAccountsChanged = false;
mSyncForceRefreshedForTest = false;
final AccountIdProvider accountIdProvider = AccountIdProvider.getInstance();
if (accountIdProvider.canBeUsed()) {
mSystemAccountsSeedingStatus = SystemAccountsSeedingStatus.SEEDING_IN_PROGRESS;
} else {
mSystemAccountsSeedingStatus = SystemAccountsSeedingStatus.SEEDING_NOT_STARTED;
return;
}
AccountManagerHelper.get().getGoogleAccounts(new Callback<Account[]>() {
@Override
public void onResult(final Account[] accounts) {
new AsyncTask<Void, Void, String[][]>() {
@Override
public String[][] doInBackground(Void... params) {
Log.d(TAG, "Getting id/email mapping");
String[][] accountIdNameMap = new String[2][accounts.length];
for (int i = 0; i < accounts.length; ++i) {
accountIdNameMap[0][i] =
accountIdProvider.getAccountId(accounts[i].name);
accountIdNameMap[1][i] = accounts[i].name;
}
return accountIdNameMap;
}
@Override
public void onPostExecute(String[][] accountIdNameMap) {
if (mSyncForceRefreshedForTest) return;
if (mSystemAccountsChanged) {
seedSystemAccounts();
return;
}
if (areAccountIdsValid(accountIdNameMap[0])) {
nativeSeedAccountsInfo(accountIdNameMap[0], accountIdNameMap[1]);
mSystemAccountsSeedingStatus = SystemAccountsSeedingStatus.SEEDING_DONE;
notifyObserversOnSeedingComplete();
} else {
Log.w(TAG, "Invalid mapping of id/email");
seedSystemAccounts();
}
}
}.executeOnExecutor(AsyncTask.THREAD_POOL_EXECUTOR);
}
});
}
private boolean areAccountIdsValid(String[] accountIds) {
for (int i = 0; i < accountIds.length; ++i) {
if (accountIds[i] == null) return false;
}
return true;
}
private void notifyObserversOnSeedingComplete() {
for (OnSystemAccountsSeededListener observer : mSystemAccountsSeedingObservers) {
observer.onSystemAccountsSeedingComplete();
}
}
/**
* Seed system accounts into AccountTrackerService synchronously for test purpose.
*/
@VisibleForTesting
public void syncForceRefreshForTest(String[] accountIds, String[] accountNames) {
ThreadUtils.assertOnUiThread();
mSystemAccountsSeedingStatus = SystemAccountsSeedingStatus.SEEDING_IN_PROGRESS;
mSyncForceRefreshedForTest = true;
nativeSeedAccountsInfo(accountIds, accountNames);
mSystemAccountsSeedingStatus = SystemAccountsSeedingStatus.SEEDING_DONE;
}
/**
* Notifies the AccountTrackerService about changed system accounts. without actually triggering
* @param reSeedAccounts Whether to also start seeding the new account information immediately.
*/
public void invalidateAccountSeedStatus(boolean reSeedAccounts) {
ThreadUtils.assertOnUiThread();
mSystemAccountsChanged = true;
notifyObserversOnAccountsChange();
if (reSeedAccounts) checkAndSeedSystemAccounts();
}
/**
* Verifies whether seeded accounts in AccountTrackerService are up-to-date with the accounts in
* Android. It sets seeding status to SEEDING_VALIDATING temporarily to block services depending
* on it and sets it back to SEEDING_DONE after passing the verification. This function is
* created because accounts changed notification from Android to Chrome has latency.
*/
public void validateSystemAccounts() {
ThreadUtils.assertOnUiThread();
if (!checkAndSeedSystemAccounts()) {
// Do nothing if seeding is not done.
return;
}
mSystemAccountsSeedingStatus = SystemAccountsSeedingStatus.SEEDING_VALIDATING;
AccountManagerHelper.get().getGoogleAccounts(new Callback<Account[]>() {
@Override
public void onResult(final Account[] accounts) {
if (mSystemAccountsChanged
|| mSystemAccountsSeedingStatus
!= SystemAccountsSeedingStatus.SEEDING_VALIDATING) {
return;
}
String[] accountNames = new String[accounts.length];
for (int i = 0; i < accounts.length; ++i) {
accountNames[i] = accounts[i].name;
}
if (nativeAreAccountsSeeded(accountNames)) {
mSystemAccountsSeedingStatus = SystemAccountsSeedingStatus.SEEDING_DONE;
notifyObserversOnSeedingComplete();
}
}
});
}
private void notifyObserversOnAccountsChange() {
for (OnSystemAccountsSeededListener observer : mSystemAccountsSeedingObservers) {
observer.onSystemAccountsChanged();
}
}
private static native void nativeSeedAccountsInfo(String[] gaiaIds, String[] accountNames);
private static native boolean nativeAreAccountsSeeded(String[] accountNames);
}
| {
"pile_set_name": "Github"
} |
'use strict';
require('../../setup');
import { accounts } from '@openzeppelin/test-environment';
import Contracts from '../../../src/artifacts/Contracts';
import ProxyAdmin from '../../../src/proxy/ProxyAdmin';
import AppProject from '../../../src/project/AppProject';
import SimpleProject from '../../../src/project/SimpleProject';
import shouldManageProxies from './ProxyProject.behaviour';
import shouldManageDependencies from './DependenciesProject.behaviour';
import shouldBehaveLikePackageProject from './PackageProject.behavior';
import shouldManageAdminProxy from './AdminProxy.behaviour';
import assertRevert from '../../../src/test/helpers/assertRevert';
import { toAddress } from '../../../src/utils/Addresses';
import { Package } from '../../../src';
import ProxyFactory from '../../../src/proxy/ProxyFactory';
const ImplV1 = Contracts.getFromLocal('DummyImplementation');
const ImplV2 = Contracts.getFromLocal('DummyImplementationV2');
async function setImplementations() {
await this.project.setImplementation(ImplV1, 'DummyImplementation');
await this.project.setImplementation(ImplV2, 'DummyImplementationV2');
}
describe('AppProject', function() {
const [owner, another] = accounts;
const name = 'MyProject';
const version = '0.2.0';
const newVersion = '0.3.0';
describe('new AppProject', function() {
beforeEach('deploying', async function() {
this.proxyAdmin = await ProxyAdmin.deploy({ from: owner });
this.proxyFactory = await ProxyFactory.deploy({ from: owner });
this.project = await AppProject.fetchOrDeploy(
name,
version,
{ from: owner },
{
proxyAdminAddress: this.proxyAdmin.address,
proxyFactoryAddress: this.proxyFactory.address,
},
);
this.adminAddress = this.project.proxyAdmin.address;
});
it('should have a proxyAdmin initialized', function() {
this.project.proxyAdmin.should.be.an.instanceof(ProxyAdmin);
this.project.proxyAdmin.address.should.equalIgnoreCase(this.proxyAdmin.address);
});
it('should have a proxyFactory initialized', function() {
this.project.proxyFactory.should.be.an.instanceof(ProxyFactory);
this.project.proxyFactory.address.should.equalIgnoreCase(this.proxyFactory.address);
});
describe('instance methods', function() {
beforeEach('deploy implementations', async function() {
this.implementation = await this.project.setImplementation(ImplV1, 'DummyImplementation');
this.proxy = await this.project.createProxy(ImplV1);
});
describe('#upgradeProxy', function() {
it('fails to upgrade a proxy for unregistered package', async function() {
await assertRevert(
this.project.upgradeProxy(this.proxy.address, ImplV1, {
contractName: 'NOTEXISTS',
}),
);
});
it('fails to upgrade a proxy for unregistered contract', async function() {
await assertRevert(
this.project.upgradeProxy(this.proxy.address, ImplV1, {
packageName: 'NOTEXISTS',
}),
);
});
it('fails to upgrade a non-proxy contract', async function() {
await assertRevert(this.project.upgradeProxy(this.implementation.address, ImplV1));
});
});
});
shouldBehaveLikePackageProject({
fetch: async function() {
this.appAddress = this.project.getApp().address;
this.project = await AppProject.fetchOrDeploy(name, version, { from: owner }, { appAddress: this.appAddress });
},
onNewVersion: function() {
it('registers the new package version in the app', async function() {
const app = this.project.getApp();
const thepackage = await this.project.getProjectPackage();
const packageInfo = await app.getPackage(name);
packageInfo.version.should.be.semverEqual(newVersion);
packageInfo.package.address.should.eq(thepackage.address);
});
},
onInitialize: function() {
it('has a name', async function() {
this.project.name.should.eq(name);
});
},
});
shouldManageProxies({
supportsNames: true,
otherAdmin: another,
setImplementations,
});
shouldManageDependencies();
shouldManageAdminProxy({
otherAdmin: another,
setImplementations,
});
});
describe('fromSimpleProject', function() {
const name = 'myProject';
const version = '1.4.0';
const dependencyVersion = '1.6.0';
const dependencyName = 'myDependency';
const contractName = 'DummyImplementation';
beforeEach('setting up dependency', async function() {
this.dependency = await Package.deploy();
await this.dependency.newVersion(dependencyVersion);
});
beforeEach('setting up simple project', async function() {
this.simple = new SimpleProject(name, null, { from: owner });
this.implementation = await this.simple.setImplementation(ImplV1, contractName);
await this.simple.setDependency(dependencyName, this.dependency.address, dependencyVersion);
});
it('creates a new app project from a simple project', async function() {
this.project = await AppProject.fromSimpleProject(this.simple);
(await this.project.getImplementation({ contractName })).should.eq(toAddress(this.implementation));
(await this.project.getDependencyVersion(dependencyName)).should.be.semverEqual(dependencyVersion);
(await this.project.getDependencyPackage(dependencyName)).address.should.be.eq(this.dependency.address);
});
});
});
| {
"pile_set_name": "Github"
} |
/* Set data type implemented by a hash table with a linked list.
Copyright (C) 2006, 2009-2020 Free Software Foundation, Inc.
Written by Bruno Haible <[email protected]>, 2018.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>. */
#ifndef _GL_LINKEDHASH_SET_H
#define _GL_LINKEDHASH_SET_H
#include "gl_set.h"
#ifdef __cplusplus
extern "C" {
#endif
extern const struct gl_set_implementation gl_linkedhash_set_implementation;
#define GL_LINKEDHASH_SET &gl_linkedhash_set_implementation
#ifdef __cplusplus
}
#endif
#endif /* _GL_LINKEDHASH_SET_H */
| {
"pile_set_name": "Github"
} |
/**
* Bug 1621677 - A test for making sure getting the correct (higher) precision
* when it's cross-origin-isolated.
*/
// ================================================================================================
// ================================================================================================
// This test case is mostly copy-and-paste from the test case for window in
// test_reduce_time_precision.html. The main difference is this test case
// verifies DOM API has more precsion when it's in cross-origin-isolated and
// cross-origin-isolated doesn't affect RFP.
add_task(async function runRTPTestDOM() {
let runTests = async function(data) {
let expectedPrecision = data.precision;
// eslint beleives that isrounded is available in this scope, but if you
// remove the assignment, you will see it is not
// eslint-disable-next-line
let isRounded = eval(data.isRoundedFunc);
// Prepare for test of AudioContext.currentTime
// eslint-disable-next-line
let audioContext = new content.AudioContext();
// Known ways to generate time stamps, in milliseconds
const timeStampCodes = [
"content.performance.now()",
"new content.Date().getTime()",
'new content.Event("").timeStamp',
'new content.File([], "").lastModified',
];
// These are measured in seconds, so we need to scale them up
var timeStampCodesDOM = timeStampCodes.concat([
"audioContext.currentTime * 1000",
]);
// Loop through each timeStampCode, evaluate it,
// and check if it is rounded
for (let timeStampCode of timeStampCodesDOM) {
// eslint-disable-next-line
let timeStamp = eval(timeStampCode);
// Audio Contexts increment in intervals of (minimum) 5.4ms, so we don't
// clamp/jitter if the timer precision is les than that.
// (Technically on MBPs they increment in intervals of 2.6 but this is
// non-standard and will eventually be changed. We don't cover this situation
// because we don't really support arbitrary Timer Precision, especially in
// the 2.6 - 5.4ms interval.)
if (timeStampCode.includes("audioContext") && expectedPrecision < 5.4) {
continue;
}
ok(
isRounded(timeStamp, expectedPrecision),
`Should be rounded to nearest ${expectedPrecision} ms; saw ${timeStamp}`
);
}
};
// RFP
await setupAndRunCrossOriginIsolatedTest(true, true, true, 100, runTests);
await setupAndRunCrossOriginIsolatedTest(true, false, true, 13, runTests);
await setupAndRunCrossOriginIsolatedTest(true, false, true, 0.13, runTests);
// RTP
await setupAndRunCrossOriginIsolatedTest(false, true, false, 0.13, runTests);
await setupAndRunCrossOriginIsolatedTest(false, true, true, 0.005, runTests);
});
// ================================================================================================
// ================================================================================================
// This test case is mostly copy-and-paste from the test case for worker in
// test_reduce_time_precision.html. The main difference is this test case
// verifies DOM API has more precsion when it's in cross-origin-isolated and
// cross-origin-isolated doesn't affect RFP.
let runWorkerTest = async function(data) {
let expectedPrecision = data.precision;
await new Promise(resolve => {
// eslint beleives that isrounded is available in this scope, but if you
// remove the assignment, you will see it is not
// eslint-disable-next-line
let isRounded = eval(data.isRoundedFunc);
let worker = new content.Worker(
"coop_header.sjs?crossOriginIsolated=true&worker=true"
);
// Known ways to generate time stamps, in milliseconds
const timeStampCodes = [
"performance.now()",
"new Date().getTime()",
'new Event("").timeStamp',
'new File([], "").lastModified',
];
let promises = [];
for (let timeStampCode of timeStampCodes) {
promises.push(
new Promise(res => {
worker.postMessage({
type: "runCmdAndGetResult",
cmd: timeStampCode,
});
worker.addEventListener("message", function(e) {
if (e.data.type == "result") {
if (e.data.resultOf == timeStampCode) {
ok(
isRounded(e.data.result, expectedPrecision),
`The result of ${e.data.resultOf} should be rounded to ` +
` nearest ${expectedPrecision} ms in workers; saw ` +
`${e.data.result}`
);
worker.removeEventListener("message", this);
res();
}
return;
}
ok(false, `Unknown message type. Got ${e.data.type}`);
res();
});
})
);
}
Promise.all(promises).then(_ => {
worker.terminate();
resolve();
});
});
};
add_task(async function runRTPTestsForWorker() {
// RFP
await setupAndRunCrossOriginIsolatedTest(
true,
true,
true,
100,
runWorkerTest
);
await setupAndRunCrossOriginIsolatedTest(
true,
false,
true,
13,
runWorkerTest
);
await setupAndRunCrossOriginIsolatedTest(
true,
false,
true,
0.13,
runWorkerTest
);
// RTP
await setupAndRunCrossOriginIsolatedTest(
false,
true,
false,
0.13,
runWorkerTest
);
await setupAndRunCrossOriginIsolatedTest(
false,
true,
true,
0.005,
runWorkerTest
);
});
| {
"pile_set_name": "Github"
} |
package ecs
//Licensed under the Apache License, Version 2.0 (the "License");
//you may not use this file except in compliance with the License.
//You may obtain a copy of the License at
//
//http://www.apache.org/licenses/LICENSE-2.0
//
//Unless required by applicable law or agreed to in writing, software
//distributed under the License is distributed on an "AS IS" BASIS,
//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//See the License for the specific language governing permissions and
//limitations under the License.
//
// Code generated by Alibaba Cloud SDK Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
import (
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
)
// CreateVpc invokes the ecs.CreateVpc API synchronously
// api document: https://help.aliyun.com/api/ecs/createvpc.html
func (client *Client) CreateVpc(request *CreateVpcRequest) (response *CreateVpcResponse, err error) {
response = CreateCreateVpcResponse()
err = client.DoAction(request, response)
return
}
// CreateVpcWithChan invokes the ecs.CreateVpc API asynchronously
// api document: https://help.aliyun.com/api/ecs/createvpc.html
// asynchronous document: https://help.aliyun.com/document_detail/66220.html
func (client *Client) CreateVpcWithChan(request *CreateVpcRequest) (<-chan *CreateVpcResponse, <-chan error) {
responseChan := make(chan *CreateVpcResponse, 1)
errChan := make(chan error, 1)
err := client.AddAsyncTask(func() {
defer close(responseChan)
defer close(errChan)
response, err := client.CreateVpc(request)
if err != nil {
errChan <- err
} else {
responseChan <- response
}
})
if err != nil {
errChan <- err
close(responseChan)
close(errChan)
}
return responseChan, errChan
}
// CreateVpcWithCallback invokes the ecs.CreateVpc API asynchronously
// api document: https://help.aliyun.com/api/ecs/createvpc.html
// asynchronous document: https://help.aliyun.com/document_detail/66220.html
func (client *Client) CreateVpcWithCallback(request *CreateVpcRequest, callback func(response *CreateVpcResponse, err error)) <-chan int {
result := make(chan int, 1)
err := client.AddAsyncTask(func() {
var response *CreateVpcResponse
var err error
defer close(result)
response, err = client.CreateVpc(request)
callback(response, err)
result <- 1
})
if err != nil {
defer close(result)
callback(nil, err)
result <- 0
}
return result
}
// CreateVpcRequest is the request struct for api CreateVpc
type CreateVpcRequest struct {
*requests.RpcRequest
VpcName string `position:"Query" name:"VpcName"`
ResourceOwnerId requests.Integer `position:"Query" name:"ResourceOwnerId"`
ResourceOwnerAccount string `position:"Query" name:"ResourceOwnerAccount"`
ClientToken string `position:"Query" name:"ClientToken"`
OwnerAccount string `position:"Query" name:"OwnerAccount"`
CidrBlock string `position:"Query" name:"CidrBlock"`
Description string `position:"Query" name:"Description"`
UserCidr string `position:"Query" name:"UserCidr"`
OwnerId requests.Integer `position:"Query" name:"OwnerId"`
}
// CreateVpcResponse is the response struct for api CreateVpc
type CreateVpcResponse struct {
*responses.BaseResponse
RequestId string `json:"RequestId" xml:"RequestId"`
VpcId string `json:"VpcId" xml:"VpcId"`
VRouterId string `json:"VRouterId" xml:"VRouterId"`
RouteTableId string `json:"RouteTableId" xml:"RouteTableId"`
}
// CreateCreateVpcRequest creates a request to invoke CreateVpc API
func CreateCreateVpcRequest() (request *CreateVpcRequest) {
request = &CreateVpcRequest{
RpcRequest: &requests.RpcRequest{},
}
request.InitWithApiInfo("Ecs", "2014-05-26", "CreateVpc", "ecs", "openAPI")
return
}
// CreateCreateVpcResponse creates a response to parse from CreateVpc response
func CreateCreateVpcResponse() (response *CreateVpcResponse) {
response = &CreateVpcResponse{
BaseResponse: &responses.BaseResponse{},
}
return
}
| {
"pile_set_name": "Github"
} |
# Autodetecting setup.py script for building the Python extensions
#
__version__ = "$Revision$"
import sys, os, imp, re, optparse
from glob import glob
from platform import machine as platform_machine
import sysconfig
from distutils import log
from distutils import text_file
from distutils.errors import *
from distutils.core import Extension, setup
from distutils.command.build_ext import build_ext
from distutils.command.install import install
from distutils.command.install_lib import install_lib
from distutils.spawn import find_executable
# Were we compiled --with-pydebug or with #define Py_DEBUG?
COMPILED_WITH_PYDEBUG = hasattr(sys, 'gettotalrefcount')
# This global variable is used to hold the list of modules to be disabled.
disabled_module_list = []
def add_dir_to_list(dirlist, dir):
"""Add the directory 'dir' to the list 'dirlist' (at the front) if
1) 'dir' is not already in 'dirlist'
2) 'dir' actually exists, and is a directory."""
if dir is not None and os.path.isdir(dir) and dir not in dirlist:
dirlist.insert(0, dir)
def macosx_sdk_root():
"""
Return the directory of the current OSX SDK,
or '/' if no SDK was specified.
"""
cflags = sysconfig.get_config_var('CFLAGS')
m = re.search(r'-isysroot\s+(\S+)', cflags)
if m is None:
sysroot = '/'
else:
sysroot = m.group(1)
return sysroot
def is_macosx_sdk_path(path):
"""
Returns True if 'path' can be located in an OSX SDK
"""
return (path.startswith('/usr/') and not path.startswith('/usr/local')) or path.startswith('/System/')
def find_file(filename, std_dirs, paths):
"""Searches for the directory where a given file is located,
and returns a possibly-empty list of additional directories, or None
if the file couldn't be found at all.
'filename' is the name of a file, such as readline.h or libcrypto.a.
'std_dirs' is the list of standard system directories; if the
file is found in one of them, no additional directives are needed.
'paths' is a list of additional locations to check; if the file is
found in one of them, the resulting list will contain the directory.
"""
if sys.platform == 'darwin':
# Honor the MacOSX SDK setting when one was specified.
# An SDK is a directory with the same structure as a real
# system, but with only header files and libraries.
sysroot = macosx_sdk_root()
# Check the standard locations
for dir in std_dirs:
f = os.path.join(dir, filename)
if sys.platform == 'darwin' and is_macosx_sdk_path(dir):
f = os.path.join(sysroot, dir[1:], filename)
if os.path.exists(f): return []
# Check the additional directories
for dir in paths:
f = os.path.join(dir, filename)
if sys.platform == 'darwin' and is_macosx_sdk_path(dir):
f = os.path.join(sysroot, dir[1:], filename)
if os.path.exists(f):
return [dir]
# Not found anywhere
return None
def find_library_file(compiler, libname, std_dirs, paths):
result = compiler.find_library_file(std_dirs + paths, libname)
if result is None:
return None
if sys.platform == 'darwin':
sysroot = macosx_sdk_root()
# Check whether the found file is in one of the standard directories
dirname = os.path.dirname(result)
for p in std_dirs:
# Ensure path doesn't end with path separator
p = p.rstrip(os.sep)
if sys.platform == 'darwin' and is_macosx_sdk_path(p):
if os.path.join(sysroot, p[1:]) == dirname:
return [ ]
if p == dirname:
return [ ]
# Otherwise, it must have been in one of the additional directories,
# so we have to figure out which one.
for p in paths:
# Ensure path doesn't end with path separator
p = p.rstrip(os.sep)
if sys.platform == 'darwin' and is_macosx_sdk_path(p):
if os.path.join(sysroot, p[1:]) == dirname:
return [ p ]
if p == dirname:
return [p]
else:
assert False, "Internal error: Path not found in std_dirs or paths"
def module_enabled(extlist, modname):
"""Returns whether the module 'modname' is present in the list
of extensions 'extlist'."""
extlist = [ext for ext in extlist if ext.name == modname]
return len(extlist)
def find_module_file(module, dirlist):
"""Find a module in a set of possible folders. If it is not found
return the unadorned filename"""
list = find_file(module, [], dirlist)
if not list:
return module
if len(list) > 1:
log.info("WARNING: multiple copies of %s found"%module)
return os.path.join(list[0], module)
class PyBuildExt(build_ext):
def __init__(self, dist):
build_ext.__init__(self, dist)
self.failed = []
def build_extensions(self):
# Detect which modules should be compiled
missing = self.detect_modules()
# Remove modules that are present on the disabled list
extensions = [ext for ext in self.extensions
if ext.name not in disabled_module_list]
# move ctypes to the end, it depends on other modules
ext_map = dict((ext.name, i) for i, ext in enumerate(extensions))
if "_ctypes" in ext_map:
ctypes = extensions.pop(ext_map["_ctypes"])
extensions.append(ctypes)
self.extensions = extensions
# Fix up the autodetected modules, prefixing all the source files
# with Modules/ and adding Python's include directory to the path.
(srcdir,) = sysconfig.get_config_vars('srcdir')
if not srcdir:
# Maybe running on Windows but not using CYGWIN?
raise ValueError("No source directory; cannot proceed.")
srcdir = os.path.abspath(srcdir)
moddirlist = [os.path.join(srcdir, 'Modules')]
# Platform-dependent module source and include directories
incdirlist = []
platform = self.get_platform()
if platform == 'darwin' and ("--disable-toolbox-glue" not in
sysconfig.get_config_var("CONFIG_ARGS")):
# Mac OS X also includes some mac-specific modules
macmoddir = os.path.join(srcdir, 'Mac/Modules')
moddirlist.append(macmoddir)
incdirlist.append(os.path.join(srcdir, 'Mac/Include'))
# Fix up the paths for scripts, too
self.distribution.scripts = [os.path.join(srcdir, filename)
for filename in self.distribution.scripts]
# Python header files
headers = [sysconfig.get_config_h_filename()]
headers += glob(os.path.join(sysconfig.get_path('platinclude'), "*.h"))
for ext in self.extensions[:]:
ext.sources = [ find_module_file(filename, moddirlist)
for filename in ext.sources ]
if ext.depends is not None:
ext.depends = [find_module_file(filename, moddirlist)
for filename in ext.depends]
else:
ext.depends = []
# re-compile extensions if a header file has been changed
ext.depends.extend(headers)
# platform specific include directories
ext.include_dirs.extend(incdirlist)
# If a module has already been built statically,
# don't build it here
if ext.name in sys.builtin_module_names:
self.extensions.remove(ext)
# Parse Modules/Setup and Modules/Setup.local to figure out which
# modules are turned on in the file.
remove_modules = []
for filename in ('Modules/Setup', 'Modules/Setup.local'):
input = text_file.TextFile(filename, join_lines=1)
while 1:
line = input.readline()
if not line: break
line = line.split()
remove_modules.append(line[0])
input.close()
for ext in self.extensions[:]:
if ext.name in remove_modules:
self.extensions.remove(ext)
# When you run "make CC=altcc" or something similar, you really want
# those environment variables passed into the setup.py phase. Here's
# a small set of useful ones.
compiler = os.environ.get('CC')
args = {}
# unfortunately, distutils doesn't let us provide separate C and C++
# compilers
if compiler is not None:
(ccshared,cflags) = sysconfig.get_config_vars('CCSHARED','CFLAGS')
args['compiler_so'] = compiler + ' ' + ccshared + ' ' + cflags
self.compiler.set_executables(**args)
build_ext.build_extensions(self)
longest = max([len(e.name) for e in self.extensions])
if self.failed:
longest = max(longest, max([len(name) for name in self.failed]))
def print_three_column(lst):
lst.sort(key=str.lower)
# guarantee zip() doesn't drop anything
while len(lst) % 3:
lst.append("")
for e, f, g in zip(lst[::3], lst[1::3], lst[2::3]):
print "%-*s %-*s %-*s" % (longest, e, longest, f,
longest, g)
if missing:
print
print ("Python build finished, but the necessary bits to build "
"these modules were not found:")
print_three_column(missing)
print ("To find the necessary bits, look in setup.py in"
" detect_modules() for the module's name.")
print
if self.failed:
failed = self.failed[:]
print
print "Failed to build these modules:"
print_three_column(failed)
print
def build_extension(self, ext):
if ext.name == '_ctypes':
if not self.configure_ctypes(ext):
return
try:
build_ext.build_extension(self, ext)
except (CCompilerError, DistutilsError), why:
self.announce('WARNING: building of extension "%s" failed: %s' %
(ext.name, sys.exc_info()[1]))
self.failed.append(ext.name)
return
# Workaround for Mac OS X: The Carbon-based modules cannot be
# reliably imported into a command-line Python
if 'Carbon' in ext.extra_link_args:
self.announce(
'WARNING: skipping import check for Carbon-based "%s"' %
ext.name)
return
if self.get_platform() == 'darwin' and (
sys.maxint > 2**32 and '-arch' in ext.extra_link_args):
# Don't bother doing an import check when an extension was
# build with an explicit '-arch' flag on OSX. That's currently
# only used to build 32-bit only extensions in a 4-way
# universal build and loading 32-bit code into a 64-bit
# process will fail.
self.announce(
'WARNING: skipping import check for "%s"' %
ext.name)
return
# Workaround for Cygwin: Cygwin currently has fork issues when many
# modules have been imported
if self.get_platform() == 'cygwin':
self.announce('WARNING: skipping import check for Cygwin-based "%s"'
% ext.name)
return
ext_filename = os.path.join(
self.build_lib,
self.get_ext_filename(self.get_ext_fullname(ext.name)))
try:
imp.load_dynamic(ext.name, ext_filename)
except ImportError, why:
self.failed.append(ext.name)
self.announce('*** WARNING: renaming "%s" since importing it'
' failed: %s' % (ext.name, why), level=3)
assert not self.inplace
basename, tail = os.path.splitext(ext_filename)
newname = basename + "_failed" + tail
if os.path.exists(newname):
os.remove(newname)
os.rename(ext_filename, newname)
# XXX -- This relies on a Vile HACK in
# distutils.command.build_ext.build_extension(). The
# _built_objects attribute is stored there strictly for
# use here.
# If there is a failure, _built_objects may not be there,
# so catch the AttributeError and move on.
try:
for filename in self._built_objects:
os.remove(filename)
except AttributeError:
self.announce('unable to remove files (ignored)')
except:
exc_type, why, tb = sys.exc_info()
self.announce('*** WARNING: importing extension "%s" '
'failed with %s: %s' % (ext.name, exc_type, why),
level=3)
self.failed.append(ext.name)
def get_platform(self):
# Get value of sys.platform
for platform in ['cygwin', 'beos', 'darwin', 'atheos', 'osf1']:
if sys.platform.startswith(platform):
return platform
return sys.platform
def add_multiarch_paths(self):
# Debian/Ubuntu multiarch support.
# https://wiki.ubuntu.com/MultiarchSpec
if not find_executable('dpkg-architecture'):
return
tmpfile = os.path.join(self.build_temp, 'multiarch')
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
ret = os.system(
'dpkg-architecture -qDEB_HOST_MULTIARCH > %s 2> /dev/null' %
tmpfile)
try:
if ret >> 8 == 0:
with open(tmpfile) as fp:
multiarch_path_component = fp.readline().strip()
add_dir_to_list(self.compiler.library_dirs,
'/usr/lib/' + multiarch_path_component)
add_dir_to_list(self.compiler.include_dirs,
'/usr/include/' + multiarch_path_component)
finally:
os.unlink(tmpfile)
def detect_modules(self):
# Ensure that /usr/local is always used
add_dir_to_list(self.compiler.library_dirs, '/usr/local/lib')
add_dir_to_list(self.compiler.include_dirs, '/usr/local/include')
self.add_multiarch_paths()
# Add paths specified in the environment variables LDFLAGS and
# CPPFLAGS for header and library files.
# We must get the values from the Makefile and not the environment
# directly since an inconsistently reproducible issue comes up where
# the environment variable is not set even though the value were passed
# into configure and stored in the Makefile (issue found on OS X 10.3).
for env_var, arg_name, dir_list in (
('LDFLAGS', '-R', self.compiler.runtime_library_dirs),
('LDFLAGS', '-L', self.compiler.library_dirs),
('CPPFLAGS', '-I', self.compiler.include_dirs)):
env_val = sysconfig.get_config_var(env_var)
if env_val:
# To prevent optparse from raising an exception about any
# options in env_val that it doesn't know about we strip out
# all double dashes and any dashes followed by a character
# that is not for the option we are dealing with.
#
# Please note that order of the regex is important! We must
# strip out double-dashes first so that we don't end up with
# substituting "--Long" to "-Long" and thus lead to "ong" being
# used for a library directory.
env_val = re.sub(r'(^|\s+)-(-|(?!%s))' % arg_name[1],
' ', env_val)
parser = optparse.OptionParser()
# Make sure that allowing args interspersed with options is
# allowed
parser.allow_interspersed_args = True
parser.error = lambda msg: None
parser.add_option(arg_name, dest="dirs", action="append")
options = parser.parse_args(env_val.split())[0]
if options.dirs:
for directory in reversed(options.dirs):
add_dir_to_list(dir_list, directory)
if os.path.normpath(sys.prefix) != '/usr' \
and not sysconfig.get_config_var('PYTHONFRAMEWORK'):
# OSX note: Don't add LIBDIR and INCLUDEDIR to building a framework
# (PYTHONFRAMEWORK is set) to avoid # linking problems when
# building a framework with different architectures than
# the one that is currently installed (issue #7473)
add_dir_to_list(self.compiler.library_dirs,
sysconfig.get_config_var("LIBDIR"))
add_dir_to_list(self.compiler.include_dirs,
sysconfig.get_config_var("INCLUDEDIR"))
try:
have_unicode = unicode
except NameError:
have_unicode = 0
# lib_dirs and inc_dirs are used to search for files;
# if a file is found in one of those directories, it can
# be assumed that no additional -I,-L directives are needed.
lib_dirs = self.compiler.library_dirs + [
'/lib64', '/usr/lib64',
'/lib', '/usr/lib',
]
inc_dirs = self.compiler.include_dirs + ['/usr/include']
exts = []
missing = []
config_h = sysconfig.get_config_h_filename()
config_h_vars = sysconfig.parse_config_h(open(config_h))
platform = self.get_platform()
srcdir = sysconfig.get_config_var('srcdir')
# Check for AtheOS which has libraries in non-standard locations
if platform == 'atheos':
lib_dirs += ['/system/libs', '/atheos/autolnk/lib']
lib_dirs += os.getenv('LIBRARY_PATH', '').split(os.pathsep)
inc_dirs += ['/system/include', '/atheos/autolnk/include']
inc_dirs += os.getenv('C_INCLUDE_PATH', '').split(os.pathsep)
# OSF/1 and Unixware have some stuff in /usr/ccs/lib (like -ldb)
if platform in ['osf1', 'unixware7', 'openunix8']:
lib_dirs += ['/usr/ccs/lib']
if platform == 'darwin':
# This should work on any unixy platform ;-)
# If the user has bothered specifying additional -I and -L flags
# in OPT and LDFLAGS we might as well use them here.
# NOTE: using shlex.split would technically be more correct, but
# also gives a bootstrap problem. Let's hope nobody uses directories
# with whitespace in the name to store libraries.
cflags, ldflags = sysconfig.get_config_vars(
'CFLAGS', 'LDFLAGS')
for item in cflags.split():
if item.startswith('-I'):
inc_dirs.append(item[2:])
for item in ldflags.split():
if item.startswith('-L'):
lib_dirs.append(item[2:])
# Check for MacOS X, which doesn't need libm.a at all
math_libs = ['m']
if platform in ['darwin', 'beos']:
math_libs = []
# XXX Omitted modules: gl, pure, dl, SGI-specific modules
#
# The following modules are all pretty straightforward, and compile
# on pretty much any POSIXish platform.
#
# Some modules that are normally always on:
#exts.append( Extension('_weakref', ['_weakref.c']) )
# array objects
exts.append( Extension('array', ['arraymodule.c']) )
# complex math library functions
exts.append( Extension('cmath', ['cmathmodule.c', '_math.c'],
depends=['_math.h'],
libraries=math_libs) )
# math library functions, e.g. sin()
exts.append( Extension('math', ['mathmodule.c', '_math.c'],
depends=['_math.h'],
libraries=math_libs) )
# fast string operations implemented in C
exts.append( Extension('strop', ['stropmodule.c']) )
# time operations and variables
exts.append( Extension('time', ['timemodule.c'],
libraries=math_libs) )
exts.append( Extension('datetime', ['datetimemodule.c', 'timemodule.c'],
libraries=math_libs) )
# fast iterator tools implemented in C
exts.append( Extension("itertools", ["itertoolsmodule.c"]) )
# code that will be builtins in the future, but conflict with the
# current builtins
exts.append( Extension('future_builtins', ['future_builtins.c']) )
# random number generator implemented in C
exts.append( Extension("_random", ["_randommodule.c"]) )
# high-performance collections
exts.append( Extension("_collections", ["_collectionsmodule.c"]) )
# bisect
exts.append( Extension("_bisect", ["_bisectmodule.c"]) )
# heapq
exts.append( Extension("_heapq", ["_heapqmodule.c"]) )
# operator.add() and similar goodies
exts.append( Extension('operator', ['operator.c']) )
# Python 3.1 _io library
exts.append( Extension("_io",
["_io/bufferedio.c", "_io/bytesio.c", "_io/fileio.c",
"_io/iobase.c", "_io/_iomodule.c", "_io/stringio.c", "_io/textio.c"],
depends=["_io/_iomodule.h"], include_dirs=["Modules/_io"]))
# _functools
exts.append( Extension("_functools", ["_functoolsmodule.c"]) )
# _json speedups
exts.append( Extension("_json", ["_json.c"]) )
# Python C API test module
exts.append( Extension('_testcapi', ['_testcapimodule.c'],
depends=['testcapi_long.h']) )
# profilers (_lsprof is for cProfile.py)
exts.append( Extension('_hotshot', ['_hotshot.c']) )
exts.append( Extension('_lsprof', ['_lsprof.c', 'rotatingtree.c']) )
# static Unicode character database
if have_unicode:
exts.append( Extension('unicodedata', ['unicodedata.c']) )
else:
missing.append('unicodedata')
# access to ISO C locale support
data = open('pyconfig.h').read()
m = re.search(r"#s*define\s+WITH_LIBINTL\s+1\s*", data)
if m is not None:
locale_libs = ['intl']
else:
locale_libs = []
if platform == 'darwin':
locale_extra_link_args = ['-framework', 'CoreFoundation']
else:
locale_extra_link_args = []
exts.append( Extension('_locale', ['_localemodule.c'],
libraries=locale_libs,
extra_link_args=locale_extra_link_args) )
# Modules with some UNIX dependencies -- on by default:
# (If you have a really backward UNIX, select and socket may not be
# supported...)
# fcntl(2) and ioctl(2)
libs = []
if (config_h_vars.get('FLOCK_NEEDS_LIBBSD', False)):
# May be necessary on AIX for flock function
libs = ['bsd']
exts.append( Extension('fcntl', ['fcntlmodule.c'], libraries=libs) )
# pwd(3)
exts.append( Extension('pwd', ['pwdmodule.c']) )
# grp(3)
exts.append( Extension('grp', ['grpmodule.c']) )
# spwd, shadow passwords
if (config_h_vars.get('HAVE_GETSPNAM', False) or
config_h_vars.get('HAVE_GETSPENT', False)):
exts.append( Extension('spwd', ['spwdmodule.c']) )
else:
missing.append('spwd')
# select(2); not on ancient System V
exts.append( Extension('select', ['selectmodule.c']) )
# Fred Drake's interface to the Python parser
exts.append( Extension('parser', ['parsermodule.c']) )
# cStringIO and cPickle
exts.append( Extension('cStringIO', ['cStringIO.c']) )
exts.append( Extension('cPickle', ['cPickle.c']) )
# Memory-mapped files (also works on Win32).
if platform not in ['atheos']:
exts.append( Extension('mmap', ['mmapmodule.c']) )
else:
missing.append('mmap')
# Lance Ellinghaus's syslog module
# syslog daemon interface
exts.append( Extension('syslog', ['syslogmodule.c']) )
# George Neville-Neil's timing module:
# Deprecated in PEP 4 http://www.python.org/peps/pep-0004.html
# http://mail.python.org/pipermail/python-dev/2006-January/060023.html
#exts.append( Extension('timing', ['timingmodule.c']) )
#
# Here ends the simple stuff. From here on, modules need certain
# libraries, are platform-specific, or present other surprises.
#
# Multimedia modules
# These don't work for 64-bit platforms!!!
# These represent audio samples or images as strings:
# Operations on audio samples
# According to #993173, this one should actually work fine on
# 64-bit platforms.
exts.append( Extension('audioop', ['audioop.c']) )
# Disabled on 64-bit platforms
if sys.maxint != 9223372036854775807L:
# Operations on images
exts.append( Extension('imageop', ['imageop.c']) )
else:
missing.extend(['imageop'])
# readline
do_readline = self.compiler.find_library_file(lib_dirs, 'readline')
readline_termcap_library = ""
curses_library = ""
# Determine if readline is already linked against curses or tinfo.
if do_readline and find_executable('ldd'):
fp = os.popen("ldd %s" % do_readline)
ldd_output = fp.readlines()
ret = fp.close()
if ret is None or ret >> 8 == 0:
for ln in ldd_output:
if 'curses' in ln:
readline_termcap_library = re.sub(
r'.*lib(n?cursesw?)\.so.*', r'\1', ln
).rstrip()
break
if 'tinfo' in ln: # termcap interface split out from ncurses
readline_termcap_library = 'tinfo'
break
# Issue 7384: If readline is already linked against curses,
# use the same library for the readline and curses modules.
if 'curses' in readline_termcap_library:
curses_library = readline_termcap_library
elif self.compiler.find_library_file(lib_dirs, 'ncursesw'):
curses_library = 'ncursesw'
elif self.compiler.find_library_file(lib_dirs, 'ncurses'):
curses_library = 'ncurses'
elif self.compiler.find_library_file(lib_dirs, 'curses'):
curses_library = 'curses'
if platform == 'darwin':
os_release = int(os.uname()[2].split('.')[0])
dep_target = sysconfig.get_config_var('MACOSX_DEPLOYMENT_TARGET')
if dep_target and dep_target.split('.') < ['10', '5']:
os_release = 8
if os_release < 9:
# MacOSX 10.4 has a broken readline. Don't try to build
# the readline module unless the user has installed a fixed
# readline package
if find_file('readline/rlconf.h', inc_dirs, []) is None:
do_readline = False
if do_readline:
if platform == 'darwin' and os_release < 9:
# In every directory on the search path search for a dynamic
# library and then a static library, instead of first looking
# for dynamic libraries on the entiry path.
# This way a staticly linked custom readline gets picked up
# before the (possibly broken) dynamic library in /usr/lib.
readline_extra_link_args = ('-Wl,-search_paths_first',)
else:
readline_extra_link_args = ()
readline_libs = ['readline']
if readline_termcap_library:
pass # Issue 7384: Already linked against curses or tinfo.
elif curses_library:
readline_libs.append(curses_library)
elif self.compiler.find_library_file(lib_dirs +
['/usr/lib/termcap'],
'termcap'):
readline_libs.append('termcap')
exts.append( Extension('readline', ['readline.c'],
library_dirs=['/usr/lib/termcap'],
extra_link_args=readline_extra_link_args,
libraries=readline_libs) )
else:
missing.append('readline')
# crypt module.
if self.compiler.find_library_file(lib_dirs, 'crypt'):
libs = ['crypt']
else:
libs = []
exts.append( Extension('crypt', ['cryptmodule.c'], libraries=libs) )
# CSV files
exts.append( Extension('_csv', ['_csv.c']) )
# socket(2)
exts.append( Extension('_socket', ['socketmodule.c'],
depends = ['socketmodule.h']) )
# Detect SSL support for the socket module (via _ssl)
search_for_ssl_incs_in = [
'/usr/local/ssl/include',
'/usr/contrib/ssl/include/'
]
ssl_incs = find_file('openssl/ssl.h', inc_dirs,
search_for_ssl_incs_in
)
if ssl_incs is not None:
krb5_h = find_file('krb5.h', inc_dirs,
['/usr/kerberos/include'])
if krb5_h:
ssl_incs += krb5_h
ssl_libs = find_library_file(self.compiler, 'ssl',lib_dirs,
['/usr/local/ssl/lib',
'/usr/contrib/ssl/lib/'
] )
if (ssl_incs is not None and
ssl_libs is not None):
exts.append( Extension('_ssl', ['_ssl.c'],
include_dirs = ssl_incs,
library_dirs = ssl_libs,
libraries = ['ssl', 'crypto'],
depends = ['socketmodule.h']), )
else:
missing.append('_ssl')
# find out which version of OpenSSL we have
openssl_ver = 0
openssl_ver_re = re.compile(
'^\s*#\s*define\s+OPENSSL_VERSION_NUMBER\s+(0x[0-9a-fA-F]+)' )
# look for the openssl version header on the compiler search path.
opensslv_h = find_file('openssl/opensslv.h', [],
inc_dirs + search_for_ssl_incs_in)
if opensslv_h:
name = os.path.join(opensslv_h[0], 'openssl/opensslv.h')
if sys.platform == 'darwin' and is_macosx_sdk_path(name):
name = os.path.join(macosx_sdk_root(), name[1:])
try:
incfile = open(name, 'r')
for line in incfile:
m = openssl_ver_re.match(line)
if m:
openssl_ver = eval(m.group(1))
except IOError, msg:
print "IOError while reading opensshv.h:", msg
pass
min_openssl_ver = 0x00907000
have_any_openssl = ssl_incs is not None and ssl_libs is not None
have_usable_openssl = (have_any_openssl and
openssl_ver >= min_openssl_ver)
if have_any_openssl:
if have_usable_openssl:
# The _hashlib module wraps optimized implementations
# of hash functions from the OpenSSL library.
exts.append( Extension('_hashlib', ['_hashopenssl.c'],
include_dirs = ssl_incs,
library_dirs = ssl_libs,
libraries = ['ssl', 'crypto']) )
else:
print ("warning: openssl 0x%08x is too old for _hashlib" %
openssl_ver)
missing.append('_hashlib')
if COMPILED_WITH_PYDEBUG or not have_usable_openssl:
# The _sha module implements the SHA1 hash algorithm.
exts.append( Extension('_sha', ['shamodule.c']) )
# The _md5 module implements the RSA Data Security, Inc. MD5
# Message-Digest Algorithm, described in RFC 1321. The
# necessary files md5.c and md5.h are included here.
exts.append( Extension('_md5',
sources = ['md5module.c', 'md5.c'],
depends = ['md5.h']) )
min_sha2_openssl_ver = 0x00908000
if COMPILED_WITH_PYDEBUG or openssl_ver < min_sha2_openssl_ver:
# OpenSSL doesn't do these until 0.9.8 so we'll bring our own hash
exts.append( Extension('_sha256', ['sha256module.c']) )
exts.append( Extension('_sha512', ['sha512module.c']) )
# Modules that provide persistent dictionary-like semantics. You will
# probably want to arrange for at least one of them to be available on
# your machine, though none are defined by default because of library
# dependencies. The Python module anydbm.py provides an
# implementation independent wrapper for these; dumbdbm.py provides
# similar functionality (but slower of course) implemented in Python.
# Sleepycat^WOracle Berkeley DB interface.
# http://www.oracle.com/database/berkeley-db/db/index.html
#
# This requires the Sleepycat^WOracle DB code. The supported versions
# are set below. Visit the URL above to download
# a release. Most open source OSes come with one or more
# versions of BerkeleyDB already installed.
max_db_ver = (4, 8)
min_db_ver = (4, 1)
db_setup_debug = False # verbose debug prints from this script?
def allow_db_ver(db_ver):
"""Returns a boolean if the given BerkeleyDB version is acceptable.
Args:
db_ver: A tuple of the version to verify.
"""
if not (min_db_ver <= db_ver <= max_db_ver):
return False
# Use this function to filter out known bad configurations.
if (4, 6) == db_ver[:2]:
# BerkeleyDB 4.6.x is not stable on many architectures.
arch = platform_machine()
if arch not in ('i386', 'i486', 'i586', 'i686',
'x86_64', 'ia64'):
return False
return True
def gen_db_minor_ver_nums(major):
if major == 4:
for x in range(max_db_ver[1]+1):
if allow_db_ver((4, x)):
yield x
elif major == 3:
for x in (3,):
if allow_db_ver((3, x)):
yield x
else:
raise ValueError("unknown major BerkeleyDB version", major)
# construct a list of paths to look for the header file in on
# top of the normal inc_dirs.
db_inc_paths = [
'/usr/include/db4',
'/usr/local/include/db4',
'/opt/sfw/include/db4',
'/usr/include/db3',
'/usr/local/include/db3',
'/opt/sfw/include/db3',
# Fink defaults (http://fink.sourceforge.net/)
'/sw/include/db4',
'/sw/include/db3',
]
# 4.x minor number specific paths
for x in gen_db_minor_ver_nums(4):
db_inc_paths.append('/usr/include/db4%d' % x)
db_inc_paths.append('/usr/include/db4.%d' % x)
db_inc_paths.append('/usr/local/BerkeleyDB.4.%d/include' % x)
db_inc_paths.append('/usr/local/include/db4%d' % x)
db_inc_paths.append('/pkg/db-4.%d/include' % x)
db_inc_paths.append('/opt/db-4.%d/include' % x)
# MacPorts default (http://www.macports.org/)
db_inc_paths.append('/opt/local/include/db4%d' % x)
# 3.x minor number specific paths
for x in gen_db_minor_ver_nums(3):
db_inc_paths.append('/usr/include/db3%d' % x)
db_inc_paths.append('/usr/local/BerkeleyDB.3.%d/include' % x)
db_inc_paths.append('/usr/local/include/db3%d' % x)
db_inc_paths.append('/pkg/db-3.%d/include' % x)
db_inc_paths.append('/opt/db-3.%d/include' % x)
# Add some common subdirectories for Sleepycat DB to the list,
# based on the standard include directories. This way DB3/4 gets
# picked up when it is installed in a non-standard prefix and
# the user has added that prefix into inc_dirs.
std_variants = []
for dn in inc_dirs:
std_variants.append(os.path.join(dn, 'db3'))
std_variants.append(os.path.join(dn, 'db4'))
for x in gen_db_minor_ver_nums(4):
std_variants.append(os.path.join(dn, "db4%d"%x))
std_variants.append(os.path.join(dn, "db4.%d"%x))
for x in gen_db_minor_ver_nums(3):
std_variants.append(os.path.join(dn, "db3%d"%x))
std_variants.append(os.path.join(dn, "db3.%d"%x))
db_inc_paths = std_variants + db_inc_paths
db_inc_paths = [p for p in db_inc_paths if os.path.exists(p)]
db_ver_inc_map = {}
if sys.platform == 'darwin':
sysroot = macosx_sdk_root()
class db_found(Exception): pass
try:
# See whether there is a Sleepycat header in the standard
# search path.
for d in inc_dirs + db_inc_paths:
f = os.path.join(d, "db.h")
if sys.platform == 'darwin' and is_macosx_sdk_path(d):
f = os.path.join(sysroot, d[1:], "db.h")
if db_setup_debug: print "db: looking for db.h in", f
if os.path.exists(f):
f = open(f).read()
m = re.search(r"#define\WDB_VERSION_MAJOR\W(\d+)", f)
if m:
db_major = int(m.group(1))
m = re.search(r"#define\WDB_VERSION_MINOR\W(\d+)", f)
db_minor = int(m.group(1))
db_ver = (db_major, db_minor)
# Avoid 4.6 prior to 4.6.21 due to a BerkeleyDB bug
if db_ver == (4, 6):
m = re.search(r"#define\WDB_VERSION_PATCH\W(\d+)", f)
db_patch = int(m.group(1))
if db_patch < 21:
print "db.h:", db_ver, "patch", db_patch,
print "being ignored (4.6.x must be >= 4.6.21)"
continue
if ( (db_ver not in db_ver_inc_map) and
allow_db_ver(db_ver) ):
# save the include directory with the db.h version
# (first occurrence only)
db_ver_inc_map[db_ver] = d
if db_setup_debug:
print "db.h: found", db_ver, "in", d
else:
# we already found a header for this library version
if db_setup_debug: print "db.h: ignoring", d
else:
# ignore this header, it didn't contain a version number
if db_setup_debug:
print "db.h: no version number version in", d
db_found_vers = db_ver_inc_map.keys()
db_found_vers.sort()
while db_found_vers:
db_ver = db_found_vers.pop()
db_incdir = db_ver_inc_map[db_ver]
# check lib directories parallel to the location of the header
db_dirs_to_check = [
db_incdir.replace("include", 'lib64'),
db_incdir.replace("include", 'lib'),
]
if sys.platform != 'darwin':
db_dirs_to_check = filter(os.path.isdir, db_dirs_to_check)
else:
# Same as other branch, but takes OSX SDK into account
tmp = []
for dn in db_dirs_to_check:
if is_macosx_sdk_path(dn):
if os.path.isdir(os.path.join(sysroot, dn[1:])):
tmp.append(dn)
else:
if os.path.isdir(dn):
tmp.append(dn)
db_dirs_to_check = tmp
# Look for a version specific db-X.Y before an ambiguous dbX
# XXX should we -ever- look for a dbX name? Do any
# systems really not name their library by version and
# symlink to more general names?
for dblib in (('db-%d.%d' % db_ver),
('db%d%d' % db_ver),
('db%d' % db_ver[0])):
dblib_file = self.compiler.find_library_file(
db_dirs_to_check + lib_dirs, dblib )
if dblib_file:
dblib_dir = [ os.path.abspath(os.path.dirname(dblib_file)) ]
raise db_found
else:
if db_setup_debug: print "db lib: ", dblib, "not found"
except db_found:
if db_setup_debug:
print "bsddb using BerkeleyDB lib:", db_ver, dblib
print "bsddb lib dir:", dblib_dir, " inc dir:", db_incdir
db_incs = [db_incdir]
dblibs = [dblib]
# We add the runtime_library_dirs argument because the
# BerkeleyDB lib we're linking against often isn't in the
# system dynamic library search path. This is usually
# correct and most trouble free, but may cause problems in
# some unusual system configurations (e.g. the directory
# is on an NFS server that goes away).
exts.append(Extension('_bsddb', ['_bsddb.c'],
depends = ['bsddb.h'],
library_dirs=dblib_dir,
runtime_library_dirs=dblib_dir,
include_dirs=db_incs,
libraries=dblibs))
else:
if db_setup_debug: print "db: no appropriate library found"
db_incs = None
dblibs = []
dblib_dir = None
missing.append('_bsddb')
# The sqlite interface
sqlite_setup_debug = False # verbose debug prints from this script?
# We hunt for #define SQLITE_VERSION "n.n.n"
# We need to find >= sqlite version 3.0.8
sqlite_incdir = sqlite_libdir = None
sqlite_inc_paths = [ '/usr/include',
'/usr/include/sqlite',
'/usr/include/sqlite3',
'/usr/local/include',
'/usr/local/include/sqlite',
'/usr/local/include/sqlite3',
]
MIN_SQLITE_VERSION_NUMBER = (3, 0, 8)
MIN_SQLITE_VERSION = ".".join([str(x)
for x in MIN_SQLITE_VERSION_NUMBER])
# Scan the default include directories before the SQLite specific
# ones. This allows one to override the copy of sqlite on OSX,
# where /usr/include contains an old version of sqlite.
if sys.platform == 'darwin':
sysroot = macosx_sdk_root()
for d in inc_dirs + sqlite_inc_paths:
f = os.path.join(d, "sqlite3.h")
if sys.platform == 'darwin' and is_macosx_sdk_path(d):
f = os.path.join(sysroot, d[1:], "sqlite3.h")
if os.path.exists(f):
if sqlite_setup_debug: print "sqlite: found %s"%f
incf = open(f).read()
m = re.search(
r'\s*.*#\s*.*define\s.*SQLITE_VERSION\W*"(.*)"', incf)
if m:
sqlite_version = m.group(1)
sqlite_version_tuple = tuple([int(x)
for x in sqlite_version.split(".")])
if sqlite_version_tuple >= MIN_SQLITE_VERSION_NUMBER:
# we win!
if sqlite_setup_debug:
print "%s/sqlite3.h: version %s"%(d, sqlite_version)
sqlite_incdir = d
break
else:
if sqlite_setup_debug:
print "%s: version %d is too old, need >= %s"%(d,
sqlite_version, MIN_SQLITE_VERSION)
elif sqlite_setup_debug:
print "sqlite: %s had no SQLITE_VERSION"%(f,)
if sqlite_incdir:
sqlite_dirs_to_check = [
os.path.join(sqlite_incdir, '..', 'lib64'),
os.path.join(sqlite_incdir, '..', 'lib'),
os.path.join(sqlite_incdir, '..', '..', 'lib64'),
os.path.join(sqlite_incdir, '..', '..', 'lib'),
]
sqlite_libfile = self.compiler.find_library_file(
sqlite_dirs_to_check + lib_dirs, 'sqlite3')
if sqlite_libfile:
sqlite_libdir = [os.path.abspath(os.path.dirname(sqlite_libfile))]
if sqlite_incdir and sqlite_libdir:
sqlite_srcs = ['_sqlite/cache.c',
'_sqlite/connection.c',
'_sqlite/cursor.c',
'_sqlite/microprotocols.c',
'_sqlite/module.c',
'_sqlite/prepare_protocol.c',
'_sqlite/row.c',
'_sqlite/statement.c',
'_sqlite/util.c', ]
sqlite_defines = []
if sys.platform != "win32":
sqlite_defines.append(('MODULE_NAME', '"sqlite3"'))
else:
sqlite_defines.append(('MODULE_NAME', '\\"sqlite3\\"'))
# Comment this out if you want the sqlite3 module to be able to load extensions.
sqlite_defines.append(("SQLITE_OMIT_LOAD_EXTENSION", "1"))
if sys.platform == 'darwin':
# In every directory on the search path search for a dynamic
# library and then a static library, instead of first looking
# for dynamic libraries on the entire path.
# This way a statically linked custom sqlite gets picked up
# before the dynamic library in /usr/lib.
sqlite_extra_link_args = ('-Wl,-search_paths_first',)
else:
sqlite_extra_link_args = ()
exts.append(Extension('_sqlite3', sqlite_srcs,
define_macros=sqlite_defines,
include_dirs=["Modules/_sqlite",
sqlite_incdir],
library_dirs=sqlite_libdir,
runtime_library_dirs=sqlite_libdir,
extra_link_args=sqlite_extra_link_args,
libraries=["sqlite3",]))
else:
missing.append('_sqlite3')
# Look for Berkeley db 1.85. Note that it is built as a different
# module name so it can be included even when later versions are
# available. A very restrictive search is performed to avoid
# accidentally building this module with a later version of the
# underlying db library. May BSD-ish Unixes incorporate db 1.85
# symbols into libc and place the include file in /usr/include.
#
# If the better bsddb library can be built (db_incs is defined)
# we do not build this one. Otherwise this build will pick up
# the more recent berkeleydb's db.h file first in the include path
# when attempting to compile and it will fail.
f = "/usr/include/db.h"
if sys.platform == 'darwin':
if is_macosx_sdk_path(f):
sysroot = macosx_sdk_root()
f = os.path.join(sysroot, f[1:])
if os.path.exists(f) and not db_incs:
data = open(f).read()
m = re.search(r"#s*define\s+HASHVERSION\s+2\s*", data)
if m is not None:
# bingo - old version used hash file format version 2
### XXX this should be fixed to not be platform-dependent
### but I don't have direct access to an osf1 platform and
### seemed to be muffing the search somehow
libraries = platform == "osf1" and ['db'] or None
if libraries is not None:
exts.append(Extension('bsddb185', ['bsddbmodule.c'],
libraries=libraries))
else:
exts.append(Extension('bsddb185', ['bsddbmodule.c']))
else:
missing.append('bsddb185')
else:
missing.append('bsddb185')
dbm_order = ['gdbm']
# The standard Unix dbm module:
if platform not in ['cygwin']:
config_args = [arg.strip("'")
for arg in sysconfig.get_config_var("CONFIG_ARGS").split()]
dbm_args = [arg for arg in config_args
if arg.startswith('--with-dbmliborder=')]
if dbm_args:
dbm_order = [arg.split('=')[-1] for arg in dbm_args][-1].split(":")
else:
dbm_order = "ndbm:gdbm:bdb".split(":")
dbmext = None
for cand in dbm_order:
if cand == "ndbm":
if find_file("ndbm.h", inc_dirs, []) is not None:
# Some systems have -lndbm, others don't
if self.compiler.find_library_file(lib_dirs,
'ndbm'):
ndbm_libs = ['ndbm']
else:
ndbm_libs = []
print "building dbm using ndbm"
dbmext = Extension('dbm', ['dbmmodule.c'],
define_macros=[
('HAVE_NDBM_H',None),
],
libraries=ndbm_libs)
break
elif cand == "gdbm":
if self.compiler.find_library_file(lib_dirs, 'gdbm'):
gdbm_libs = ['gdbm']
if self.compiler.find_library_file(lib_dirs,
'gdbm_compat'):
gdbm_libs.append('gdbm_compat')
if find_file("gdbm/ndbm.h", inc_dirs, []) is not None:
print "building dbm using gdbm"
dbmext = Extension(
'dbm', ['dbmmodule.c'],
define_macros=[
('HAVE_GDBM_NDBM_H', None),
],
libraries = gdbm_libs)
break
if find_file("gdbm-ndbm.h", inc_dirs, []) is not None:
print "building dbm using gdbm"
dbmext = Extension(
'dbm', ['dbmmodule.c'],
define_macros=[
('HAVE_GDBM_DASH_NDBM_H', None),
],
libraries = gdbm_libs)
break
elif cand == "bdb":
if db_incs is not None:
print "building dbm using bdb"
dbmext = Extension('dbm', ['dbmmodule.c'],
library_dirs=dblib_dir,
runtime_library_dirs=dblib_dir,
include_dirs=db_incs,
define_macros=[
('HAVE_BERKDB_H', None),
('DB_DBM_HSEARCH', None),
],
libraries=dblibs)
break
if dbmext is not None:
exts.append(dbmext)
else:
missing.append('dbm')
# Anthony Baxter's gdbm module. GNU dbm(3) will require -lgdbm:
if ('gdbm' in dbm_order and
self.compiler.find_library_file(lib_dirs, 'gdbm')):
exts.append( Extension('gdbm', ['gdbmmodule.c'],
libraries = ['gdbm'] ) )
else:
missing.append('gdbm')
# Unix-only modules
if platform not in ['win32']:
# Steen Lumholt's termios module
exts.append( Extension('termios', ['termios.c']) )
# Jeremy Hylton's rlimit interface
if platform not in ['atheos']:
exts.append( Extension('resource', ['resource.c']) )
else:
missing.append('resource')
# Sun yellow pages. Some systems have the functions in libc.
if (platform not in ['cygwin', 'atheos', 'qnx6'] and
find_file('rpcsvc/yp_prot.h', inc_dirs, []) is not None):
if (self.compiler.find_library_file(lib_dirs, 'nsl')):
libs = ['nsl']
else:
libs = []
exts.append( Extension('nis', ['nismodule.c'],
libraries = libs) )
else:
missing.append('nis')
else:
missing.extend(['nis', 'resource', 'termios'])
# Curses support, requiring the System V version of curses, often
# provided by the ncurses library.
panel_library = 'panel'
if curses_library.startswith('ncurses'):
if curses_library == 'ncursesw':
# Bug 1464056: If _curses.so links with ncursesw,
# _curses_panel.so must link with panelw.
panel_library = 'panelw'
curses_libs = [curses_library]
exts.append( Extension('_curses', ['_cursesmodule.c'],
libraries = curses_libs) )
elif curses_library == 'curses' and platform != 'darwin':
# OSX has an old Berkeley curses, not good enough for
# the _curses module.
if (self.compiler.find_library_file(lib_dirs, 'terminfo')):
curses_libs = ['curses', 'terminfo']
elif (self.compiler.find_library_file(lib_dirs, 'termcap')):
curses_libs = ['curses', 'termcap']
else:
curses_libs = ['curses']
exts.append( Extension('_curses', ['_cursesmodule.c'],
libraries = curses_libs) )
else:
missing.append('_curses')
# If the curses module is enabled, check for the panel module
if (module_enabled(exts, '_curses') and
self.compiler.find_library_file(lib_dirs, panel_library)):
exts.append( Extension('_curses_panel', ['_curses_panel.c'],
libraries = [panel_library] + curses_libs) )
else:
missing.append('_curses_panel')
# Andrew Kuchling's zlib module. Note that some versions of zlib
# 1.1.3 have security problems. See CERT Advisory CA-2002-07:
# http://www.cert.org/advisories/CA-2002-07.html
#
# zlib 1.1.4 is fixed, but at least one vendor (RedHat) has decided to
# patch its zlib 1.1.3 package instead of upgrading to 1.1.4. For
# now, we still accept 1.1.3, because we think it's difficult to
# exploit this in Python, and we'd rather make it RedHat's problem
# than our problem <wink>.
#
# You can upgrade zlib to version 1.1.4 yourself by going to
# http://www.gzip.org/zlib/
zlib_inc = find_file('zlib.h', [], inc_dirs)
have_zlib = False
if zlib_inc is not None:
zlib_h = zlib_inc[0] + '/zlib.h'
version = '"0.0.0"'
version_req = '"1.1.3"'
fp = open(zlib_h)
while 1:
line = fp.readline()
if not line:
break
if line.startswith('#define ZLIB_VERSION'):
version = line.split()[2]
break
if version >= version_req:
if (self.compiler.find_library_file(lib_dirs, 'z')):
if sys.platform == "darwin":
zlib_extra_link_args = ('-Wl,-search_paths_first',)
else:
zlib_extra_link_args = ()
exts.append( Extension('zlib', ['zlibmodule.c'],
libraries = ['z'],
extra_link_args = zlib_extra_link_args))
have_zlib = True
else:
missing.append('zlib')
else:
missing.append('zlib')
else:
missing.append('zlib')
# Helper module for various ascii-encoders. Uses zlib for an optimized
# crc32 if we have it. Otherwise binascii uses its own.
if have_zlib:
extra_compile_args = ['-DUSE_ZLIB_CRC32']
libraries = ['z']
extra_link_args = zlib_extra_link_args
else:
extra_compile_args = []
libraries = []
extra_link_args = []
exts.append( Extension('binascii', ['binascii.c'],
extra_compile_args = extra_compile_args,
libraries = libraries,
extra_link_args = extra_link_args) )
# Gustavo Niemeyer's bz2 module.
if (self.compiler.find_library_file(lib_dirs, 'bz2')):
if sys.platform == "darwin":
bz2_extra_link_args = ('-Wl,-search_paths_first',)
else:
bz2_extra_link_args = ()
exts.append( Extension('bz2', ['bz2module.c'],
libraries = ['bz2'],
extra_link_args = bz2_extra_link_args) )
else:
missing.append('bz2')
# Interface to the Expat XML parser
#
# Expat was written by James Clark and is now maintained by a group of
# developers on SourceForge; see www.libexpat.org for more information.
# The pyexpat module was written by Paul Prescod after a prototype by
# Jack Jansen. The Expat source is included in Modules/expat/. Usage
# of a system shared libexpat.so is possible with --with-system-expat
# configure option.
#
# More information on Expat can be found at www.libexpat.org.
#
if '--with-system-expat' in sysconfig.get_config_var("CONFIG_ARGS"):
expat_inc = []
define_macros = []
expat_lib = ['expat']
expat_sources = []
else:
expat_inc = [os.path.join(os.getcwd(), srcdir, 'Modules', 'expat')]
define_macros = [
('HAVE_EXPAT_CONFIG_H', '1'),
]
expat_lib = []
expat_sources = ['expat/xmlparse.c',
'expat/xmlrole.c',
'expat/xmltok.c']
exts.append(Extension('pyexpat',
define_macros = define_macros,
include_dirs = expat_inc,
libraries = expat_lib,
sources = ['pyexpat.c'] + expat_sources
))
# Fredrik Lundh's cElementTree module. Note that this also
# uses expat (via the CAPI hook in pyexpat).
if os.path.isfile(os.path.join(srcdir, 'Modules', '_elementtree.c')):
define_macros.append(('USE_PYEXPAT_CAPI', None))
exts.append(Extension('_elementtree',
define_macros = define_macros,
include_dirs = expat_inc,
libraries = expat_lib,
sources = ['_elementtree.c'],
))
else:
missing.append('_elementtree')
# Hye-Shik Chang's CJKCodecs modules.
if have_unicode:
exts.append(Extension('_multibytecodec',
['cjkcodecs/multibytecodec.c']))
for loc in ('kr', 'jp', 'cn', 'tw', 'hk', 'iso2022'):
exts.append(Extension('_codecs_%s' % loc,
['cjkcodecs/_codecs_%s.c' % loc]))
else:
missing.append('_multibytecodec')
for loc in ('kr', 'jp', 'cn', 'tw', 'hk', 'iso2022'):
missing.append('_codecs_%s' % loc)
# Dynamic loading module
if sys.maxint == 0x7fffffff:
# This requires sizeof(int) == sizeof(long) == sizeof(char*)
dl_inc = find_file('dlfcn.h', [], inc_dirs)
if (dl_inc is not None) and (platform not in ['atheos']):
exts.append( Extension('dl', ['dlmodule.c']) )
else:
missing.append('dl')
else:
missing.append('dl')
# Thomas Heller's _ctypes module
self.detect_ctypes(inc_dirs, lib_dirs)
# Richard Oudkerk's multiprocessing module
if platform == 'win32': # Windows
macros = dict()
libraries = ['ws2_32']
elif platform == 'darwin': # Mac OSX
macros = dict()
libraries = []
elif platform == 'cygwin': # Cygwin
macros = dict()
libraries = []
elif platform in ('freebsd4', 'freebsd5', 'freebsd6', 'freebsd7', 'freebsd8'):
# FreeBSD's P1003.1b semaphore support is very experimental
# and has many known problems. (as of June 2008)
macros = dict()
libraries = []
elif platform.startswith('openbsd'):
macros = dict()
libraries = []
elif platform.startswith('netbsd'):
macros = dict()
libraries = []
else: # Linux and other unices
macros = dict()
libraries = ['rt']
if platform == 'win32':
multiprocessing_srcs = [ '_multiprocessing/multiprocessing.c',
'_multiprocessing/semaphore.c',
'_multiprocessing/pipe_connection.c',
'_multiprocessing/socket_connection.c',
'_multiprocessing/win32_functions.c'
]
else:
multiprocessing_srcs = [ '_multiprocessing/multiprocessing.c',
'_multiprocessing/socket_connection.c'
]
if (sysconfig.get_config_var('HAVE_SEM_OPEN') and not
sysconfig.get_config_var('POSIX_SEMAPHORES_NOT_ENABLED')):
multiprocessing_srcs.append('_multiprocessing/semaphore.c')
if sysconfig.get_config_var('WITH_THREAD'):
exts.append ( Extension('_multiprocessing', multiprocessing_srcs,
define_macros=macros.items(),
include_dirs=["Modules/_multiprocessing"]))
else:
missing.append('_multiprocessing')
# End multiprocessing
# Platform-specific libraries
if platform == 'linux2':
# Linux-specific modules
exts.append( Extension('linuxaudiodev', ['linuxaudiodev.c']) )
else:
missing.append('linuxaudiodev')
if (platform in ('linux2', 'freebsd4', 'freebsd5', 'freebsd6',
'freebsd7', 'freebsd8')
or platform.startswith("gnukfreebsd")):
exts.append( Extension('ossaudiodev', ['ossaudiodev.c']) )
else:
missing.append('ossaudiodev')
if platform == 'sunos5':
# SunOS specific modules
exts.append( Extension('sunaudiodev', ['sunaudiodev.c']) )
else:
missing.append('sunaudiodev')
if platform == 'darwin':
# _scproxy
exts.append(Extension("_scproxy", [os.path.join(srcdir, "Mac/Modules/_scproxy.c")],
extra_link_args= [
'-framework', 'SystemConfiguration',
'-framework', 'CoreFoundation'
]))
if platform == 'darwin' and ("--disable-toolbox-glue" not in
sysconfig.get_config_var("CONFIG_ARGS")):
if int(os.uname()[2].split('.')[0]) >= 8:
# We're on Mac OS X 10.4 or later, the compiler should
# support '-Wno-deprecated-declarations'. This will
# surpress deprecation warnings for the Carbon extensions,
# these extensions wrap the Carbon APIs and even those
# parts that are deprecated.
carbon_extra_compile_args = ['-Wno-deprecated-declarations']
else:
carbon_extra_compile_args = []
# Mac OS X specific modules.
def macSrcExists(name1, name2=''):
if not name1:
return None
names = (name1,)
if name2:
names = (name1, name2)
path = os.path.join(srcdir, 'Mac', 'Modules', *names)
return os.path.exists(path)
def addMacExtension(name, kwds, extra_srcs=[]):
dirname = ''
if name[0] == '_':
dirname = name[1:].lower()
cname = name + '.c'
cmodulename = name + 'module.c'
# Check for NNN.c, NNNmodule.c, _nnn/NNN.c, _nnn/NNNmodule.c
if macSrcExists(cname):
srcs = [cname]
elif macSrcExists(cmodulename):
srcs = [cmodulename]
elif macSrcExists(dirname, cname):
# XXX(nnorwitz): If all the names ended with module, we
# wouldn't need this condition. ibcarbon is the only one.
srcs = [os.path.join(dirname, cname)]
elif macSrcExists(dirname, cmodulename):
srcs = [os.path.join(dirname, cmodulename)]
else:
raise RuntimeError("%s not found" % name)
# Here's the whole point: add the extension with sources
exts.append(Extension(name, srcs + extra_srcs, **kwds))
# Core Foundation
core_kwds = {'extra_compile_args': carbon_extra_compile_args,
'extra_link_args': ['-framework', 'CoreFoundation'],
}
addMacExtension('_CF', core_kwds, ['cf/pycfbridge.c'])
addMacExtension('autoGIL', core_kwds)
# Carbon
carbon_kwds = {'extra_compile_args': carbon_extra_compile_args,
'extra_link_args': ['-framework', 'Carbon'],
}
CARBON_EXTS = ['ColorPicker', 'gestalt', 'MacOS', 'Nav',
'OSATerminology', 'icglue',
# All these are in subdirs
'_AE', '_AH', '_App', '_CarbonEvt', '_Cm', '_Ctl',
'_Dlg', '_Drag', '_Evt', '_File', '_Folder', '_Fm',
'_Help', '_Icn', '_IBCarbon', '_List',
'_Menu', '_Mlte', '_OSA', '_Res', '_Qd', '_Qdoffs',
'_Scrap', '_Snd', '_TE',
]
for name in CARBON_EXTS:
addMacExtension(name, carbon_kwds)
# Workaround for a bug in the version of gcc shipped with Xcode 3.
# The _Win extension should build just like the other Carbon extensions, but
# this actually results in a hard crash of the linker.
#
if '-arch ppc64' in cflags and '-arch ppc' in cflags:
win_kwds = {'extra_compile_args': carbon_extra_compile_args + ['-arch', 'i386', '-arch', 'ppc'],
'extra_link_args': ['-framework', 'Carbon', '-arch', 'i386', '-arch', 'ppc'],
}
addMacExtension('_Win', win_kwds)
else:
addMacExtension('_Win', carbon_kwds)
# Application Services & QuickTime
app_kwds = {'extra_compile_args': carbon_extra_compile_args,
'extra_link_args': ['-framework','ApplicationServices'],
}
addMacExtension('_Launch', app_kwds)
addMacExtension('_CG', app_kwds)
exts.append( Extension('_Qt', ['qt/_Qtmodule.c'],
extra_compile_args=carbon_extra_compile_args,
extra_link_args=['-framework', 'QuickTime',
'-framework', 'Carbon']) )
self.extensions.extend(exts)
# Call the method for detecting whether _tkinter can be compiled
self.detect_tkinter(inc_dirs, lib_dirs)
if '_tkinter' not in [e.name for e in self.extensions]:
missing.append('_tkinter')
return missing
def detect_tkinter_darwin(self, inc_dirs, lib_dirs):
# The _tkinter module, using frameworks. Since frameworks are quite
# different the UNIX search logic is not sharable.
from os.path import join, exists
framework_dirs = [
'/Library/Frameworks',
'/System/Library/Frameworks/',
join(os.getenv('HOME'), '/Library/Frameworks')
]
sysroot = macosx_sdk_root()
# Find the directory that contains the Tcl.framework and Tk.framework
# bundles.
# XXX distutils should support -F!
for F in framework_dirs:
# both Tcl.framework and Tk.framework should be present
for fw in 'Tcl', 'Tk':
if is_macosx_sdk_path(F):
if not exists(join(sysroot, F[1:], fw + '.framework')):
break
else:
if not exists(join(F, fw + '.framework')):
break
else:
# ok, F is now directory with both frameworks. Continure
# building
break
else:
# Tk and Tcl frameworks not found. Normal "unix" tkinter search
# will now resume.
return 0
# For 8.4a2, we must add -I options that point inside the Tcl and Tk
# frameworks. In later release we should hopefully be able to pass
# the -F option to gcc, which specifies a framework lookup path.
#
include_dirs = [
join(F, fw + '.framework', H)
for fw in 'Tcl', 'Tk'
for H in 'Headers', 'Versions/Current/PrivateHeaders'
]
# For 8.4a2, the X11 headers are not included. Rather than include a
# complicated search, this is a hard-coded path. It could bail out
# if X11 libs are not found...
include_dirs.append('/usr/X11R6/include')
frameworks = ['-framework', 'Tcl', '-framework', 'Tk']
# All existing framework builds of Tcl/Tk don't support 64-bit
# architectures.
cflags = sysconfig.get_config_vars('CFLAGS')[0]
archs = re.findall('-arch\s+(\w+)', cflags)
if is_macosx_sdk_path(F):
fp = os.popen("file %s/Tk.framework/Tk | grep 'for architecture'"%(os.path.join(sysroot, F[1:]),))
else:
fp = os.popen("file %s/Tk.framework/Tk | grep 'for architecture'"%(F,))
detected_archs = []
for ln in fp:
a = ln.split()[-1]
if a in archs:
detected_archs.append(ln.split()[-1])
fp.close()
for a in detected_archs:
frameworks.append('-arch')
frameworks.append(a)
ext = Extension('_tkinter', ['_tkinter.c', 'tkappinit.c'],
define_macros=[('WITH_APPINIT', 1)],
include_dirs = include_dirs,
libraries = [],
extra_compile_args = frameworks[2:],
extra_link_args = frameworks,
)
self.extensions.append(ext)
return 1
def detect_tkinter(self, inc_dirs, lib_dirs):
# The _tkinter module.
# Rather than complicate the code below, detecting and building
# AquaTk is a separate method. Only one Tkinter will be built on
# Darwin - either AquaTk, if it is found, or X11 based Tk.
platform = self.get_platform()
if (platform == 'darwin' and
self.detect_tkinter_darwin(inc_dirs, lib_dirs)):
return
# Assume we haven't found any of the libraries or include files
# The versions with dots are used on Unix, and the versions without
# dots on Windows, for detection by cygwin.
tcllib = tklib = tcl_includes = tk_includes = None
for version in ['8.6', '86', '8.5', '85', '8.4', '84', '8.3', '83',
'8.2', '82', '8.1', '81', '8.0', '80']:
tklib = self.compiler.find_library_file(lib_dirs,
'tk' + version)
tcllib = self.compiler.find_library_file(lib_dirs,
'tcl' + version)
if tklib and tcllib:
# Exit the loop when we've found the Tcl/Tk libraries
break
# Now check for the header files
if tklib and tcllib:
# Check for the include files on Debian and {Free,Open}BSD, where
# they're put in /usr/include/{tcl,tk}X.Y
dotversion = version
if '.' not in dotversion and "bsd" in sys.platform.lower():
# OpenBSD and FreeBSD use Tcl/Tk library names like libtcl83.a,
# but the include subdirs are named like .../include/tcl8.3.
dotversion = dotversion[:-1] + '.' + dotversion[-1]
tcl_include_sub = []
tk_include_sub = []
for dir in inc_dirs:
tcl_include_sub += [dir + os.sep + "tcl" + dotversion]
tk_include_sub += [dir + os.sep + "tk" + dotversion]
tk_include_sub += tcl_include_sub
tcl_includes = find_file('tcl.h', inc_dirs, tcl_include_sub)
tk_includes = find_file('tk.h', inc_dirs, tk_include_sub)
if (tcllib is None or tklib is None or
tcl_includes is None or tk_includes is None):
self.announce("INFO: Can't locate Tcl/Tk libs and/or headers", 2)
return
# OK... everything seems to be present for Tcl/Tk.
include_dirs = [] ; libs = [] ; defs = [] ; added_lib_dirs = []
for dir in tcl_includes + tk_includes:
if dir not in include_dirs:
include_dirs.append(dir)
# Check for various platform-specific directories
if platform == 'sunos5':
include_dirs.append('/usr/openwin/include')
added_lib_dirs.append('/usr/openwin/lib')
elif os.path.exists('/usr/X11R6/include'):
include_dirs.append('/usr/X11R6/include')
added_lib_dirs.append('/usr/X11R6/lib64')
added_lib_dirs.append('/usr/X11R6/lib')
elif os.path.exists('/usr/X11R5/include'):
include_dirs.append('/usr/X11R5/include')
added_lib_dirs.append('/usr/X11R5/lib')
else:
# Assume default location for X11
include_dirs.append('/usr/X11/include')
added_lib_dirs.append('/usr/X11/lib')
# If Cygwin, then verify that X is installed before proceeding
if platform == 'cygwin':
x11_inc = find_file('X11/Xlib.h', [], include_dirs)
if x11_inc is None:
return
# Check for BLT extension
if self.compiler.find_library_file(lib_dirs + added_lib_dirs,
'BLT8.0'):
defs.append( ('WITH_BLT', 1) )
libs.append('BLT8.0')
elif self.compiler.find_library_file(lib_dirs + added_lib_dirs,
'BLT'):
defs.append( ('WITH_BLT', 1) )
libs.append('BLT')
# Add the Tcl/Tk libraries
libs.append('tk'+ version)
libs.append('tcl'+ version)
if platform in ['aix3', 'aix4']:
libs.append('ld')
# Finally, link with the X11 libraries (not appropriate on cygwin)
if platform != "cygwin":
libs.append('X11')
ext = Extension('_tkinter', ['_tkinter.c', 'tkappinit.c'],
define_macros=[('WITH_APPINIT', 1)] + defs,
include_dirs = include_dirs,
libraries = libs,
library_dirs = added_lib_dirs,
)
self.extensions.append(ext)
## # Uncomment these lines if you want to play with xxmodule.c
## ext = Extension('xx', ['xxmodule.c'])
## self.extensions.append(ext)
# XXX handle these, but how to detect?
# *** Uncomment and edit for PIL (TkImaging) extension only:
# -DWITH_PIL -I../Extensions/Imaging/libImaging tkImaging.c \
# *** Uncomment and edit for TOGL extension only:
# -DWITH_TOGL togl.c \
# *** Uncomment these for TOGL extension only:
# -lGL -lGLU -lXext -lXmu \
def configure_ctypes_darwin(self, ext):
# Darwin (OS X) uses preconfigured files, in
# the Modules/_ctypes/libffi_osx directory.
srcdir = sysconfig.get_config_var('srcdir')
ffi_srcdir = os.path.abspath(os.path.join(srcdir, 'Modules',
'_ctypes', 'libffi_osx'))
sources = [os.path.join(ffi_srcdir, p)
for p in ['ffi.c',
'x86/darwin64.S',
'x86/x86-darwin.S',
'x86/x86-ffi_darwin.c',
'x86/x86-ffi64.c',
'powerpc/ppc-darwin.S',
'powerpc/ppc-darwin_closure.S',
'powerpc/ppc-ffi_darwin.c',
'powerpc/ppc64-darwin_closure.S',
]]
# Add .S (preprocessed assembly) to C compiler source extensions.
self.compiler.src_extensions.append('.S')
include_dirs = [os.path.join(ffi_srcdir, 'include'),
os.path.join(ffi_srcdir, 'powerpc')]
ext.include_dirs.extend(include_dirs)
ext.sources.extend(sources)
return True
def configure_ctypes(self, ext):
if not self.use_system_libffi:
if sys.platform == 'darwin':
return self.configure_ctypes_darwin(ext)
srcdir = sysconfig.get_config_var('srcdir')
ffi_builddir = os.path.join(self.build_temp, 'libffi')
ffi_srcdir = os.path.abspath(os.path.join(srcdir, 'Modules',
'_ctypes', 'libffi'))
ffi_configfile = os.path.join(ffi_builddir, 'fficonfig.py')
from distutils.dep_util import newer_group
config_sources = [os.path.join(ffi_srcdir, fname)
for fname in os.listdir(ffi_srcdir)
if os.path.isfile(os.path.join(ffi_srcdir, fname))]
if self.force or newer_group(config_sources,
ffi_configfile):
from distutils.dir_util import mkpath
mkpath(ffi_builddir)
config_args = []
# Pass empty CFLAGS because we'll just append the resulting
# CFLAGS to Python's; -g or -O2 is to be avoided.
cmd = "cd %s && env CFLAGS='' '%s/configure' %s" \
% (ffi_builddir, ffi_srcdir, " ".join(config_args))
res = os.system(cmd)
if res or not os.path.exists(ffi_configfile):
print "Failed to configure _ctypes module"
return False
fficonfig = {}
with open(ffi_configfile) as f:
exec f in fficonfig
# Add .S (preprocessed assembly) to C compiler source extensions.
self.compiler.src_extensions.append('.S')
include_dirs = [os.path.join(ffi_builddir, 'include'),
ffi_builddir,
os.path.join(ffi_srcdir, 'src')]
extra_compile_args = fficonfig['ffi_cflags'].split()
ext.sources.extend(os.path.join(ffi_srcdir, f) for f in
fficonfig['ffi_sources'])
ext.include_dirs.extend(include_dirs)
ext.extra_compile_args.extend(extra_compile_args)
return True
def detect_ctypes(self, inc_dirs, lib_dirs):
self.use_system_libffi = False
include_dirs = []
extra_compile_args = []
extra_link_args = []
sources = ['_ctypes/_ctypes.c',
'_ctypes/callbacks.c',
'_ctypes/callproc.c',
'_ctypes/stgdict.c',
'_ctypes/cfield.c']
depends = ['_ctypes/ctypes.h']
if sys.platform == 'darwin':
sources.append('_ctypes/malloc_closure.c')
sources.append('_ctypes/darwin/dlfcn_simple.c')
extra_compile_args.append('-DMACOSX')
include_dirs.append('_ctypes/darwin')
# XXX Is this still needed?
## extra_link_args.extend(['-read_only_relocs', 'warning'])
elif sys.platform == 'sunos5':
# XXX This shouldn't be necessary; it appears that some
# of the assembler code is non-PIC (i.e. it has relocations
# when it shouldn't. The proper fix would be to rewrite
# the assembler code to be PIC.
# This only works with GCC; the Sun compiler likely refuses
# this option. If you want to compile ctypes with the Sun
# compiler, please research a proper solution, instead of
# finding some -z option for the Sun compiler.
extra_link_args.append('-mimpure-text')
elif sys.platform.startswith('hp-ux'):
extra_link_args.append('-fPIC')
ext = Extension('_ctypes',
include_dirs=include_dirs,
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args,
libraries=[],
sources=sources,
depends=depends)
ext_test = Extension('_ctypes_test',
sources=['_ctypes/_ctypes_test.c'])
self.extensions.extend([ext, ext_test])
if not '--with-system-ffi' in sysconfig.get_config_var("CONFIG_ARGS"):
return
if sys.platform == 'darwin':
# OS X 10.5 comes with libffi.dylib; the include files are
# in /usr/include/ffi
inc_dirs.append('/usr/include/ffi')
ffi_inc = [sysconfig.get_config_var("LIBFFI_INCLUDEDIR")]
if not ffi_inc or ffi_inc[0] == '':
ffi_inc = find_file('ffi.h', [], inc_dirs)
if ffi_inc is not None:
ffi_h = ffi_inc[0] + '/ffi.h'
fp = open(ffi_h)
while 1:
line = fp.readline()
if not line:
ffi_inc = None
break
if line.startswith('#define LIBFFI_H'):
break
ffi_lib = None
if ffi_inc is not None:
for lib_name in ('ffi_convenience', 'ffi_pic', 'ffi'):
if (self.compiler.find_library_file(lib_dirs, lib_name)):
ffi_lib = lib_name
break
if ffi_inc and ffi_lib:
ext.include_dirs.extend(ffi_inc)
ext.libraries.append(ffi_lib)
self.use_system_libffi = True
class PyBuildInstall(install):
# Suppress the warning about installation into the lib_dynload
# directory, which is not in sys.path when running Python during
# installation:
def initialize_options (self):
install.initialize_options(self)
self.warn_dir=0
class PyBuildInstallLib(install_lib):
# Do exactly what install_lib does but make sure correct access modes get
# set on installed directories and files. All installed files with get
# mode 644 unless they are a shared library in which case they will get
# mode 755. All installed directories will get mode 755.
so_ext = sysconfig.get_config_var("SO")
def install(self):
outfiles = install_lib.install(self)
self.set_file_modes(outfiles, 0644, 0755)
self.set_dir_modes(self.install_dir, 0755)
return outfiles
def set_file_modes(self, files, defaultMode, sharedLibMode):
if not self.is_chmod_supported(): return
if not files: return
for filename in files:
if os.path.islink(filename): continue
mode = defaultMode
if filename.endswith(self.so_ext): mode = sharedLibMode
log.info("changing mode of %s to %o", filename, mode)
if not self.dry_run: os.chmod(filename, mode)
def set_dir_modes(self, dirname, mode):
if not self.is_chmod_supported(): return
os.path.walk(dirname, self.set_dir_modes_visitor, mode)
def set_dir_modes_visitor(self, mode, dirname, names):
if os.path.islink(dirname): return
log.info("changing mode of %s to %o", dirname, mode)
if not self.dry_run: os.chmod(dirname, mode)
def is_chmod_supported(self):
return hasattr(os, 'chmod')
SUMMARY = """
Python is an interpreted, interactive, object-oriented programming
language. It is often compared to Tcl, Perl, Scheme or Java.
Python combines remarkable power with very clear syntax. It has
modules, classes, exceptions, very high level dynamic data types, and
dynamic typing. There are interfaces to many system calls and
libraries, as well as to various windowing systems (X11, Motif, Tk,
Mac, MFC). New built-in modules are easily written in C or C++. Python
is also usable as an extension language for applications that need a
programmable interface.
The Python implementation is portable: it runs on many brands of UNIX,
on Windows, DOS, OS/2, Mac, Amiga... If your favorite system isn't
listed here, it may still be supported, if there's a C compiler for
it. Ask around on comp.lang.python -- or just try compiling Python
yourself.
"""
CLASSIFIERS = """
Development Status :: 6 - Mature
License :: OSI Approved :: Python Software Foundation License
Natural Language :: English
Programming Language :: C
Programming Language :: Python
Topic :: Software Development
"""
def main():
# turn off warnings when deprecated modules are imported
import warnings
warnings.filterwarnings("ignore",category=DeprecationWarning)
setup(# PyPI Metadata (PEP 301)
name = "Python",
version = sys.version.split()[0],
url = "http://www.python.org/%s" % sys.version[:3],
maintainer = "Guido van Rossum and the Python community",
maintainer_email = "[email protected]",
description = "A high-level object-oriented programming language",
long_description = SUMMARY.strip(),
license = "PSF license",
classifiers = filter(None, CLASSIFIERS.split("\n")),
platforms = ["Many"],
# Build info
cmdclass = {'build_ext':PyBuildExt, 'install':PyBuildInstall,
'install_lib':PyBuildInstallLib},
# The struct module is defined here, because build_ext won't be
# called unless there's at least one extension module defined.
ext_modules=[Extension('_struct', ['_struct.c'])],
# Scripts to install
scripts = ['Tools/scripts/pydoc', 'Tools/scripts/idle',
'Tools/scripts/2to3',
'Lib/smtpd.py']
)
# --install-platlib
if __name__ == '__main__':
main()
| {
"pile_set_name": "Github"
} |
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>recorder.js</title>
<style>
audio { display: block; margin-bottom: 10px; }
#audio-container { padding: 20px 0; }
.ui-btn { display: inline-block; padding: 5px 20px; font-size: 14px; line-height: 1.428571429; box-sizing:content-box; text-align: center; border: 1px solid #e8e8e8; border-radius: 3px; color: #555; background-color: #fff; border-color: #e8e8e8; white-space: nowrap; cursor: pointer; -webkit-user-select: none; -moz-user-select: none; -ms-user-select: none; user-select: none; }
.ui-btn:hover, .ui-btn.hover { color: #333; text-decoration: none; background-color: #f8f8f8; border:1px solid #ddd; }
.ui-btn:focus, .ui-btn:active { color: #333; outline: 0; }
.ui-btn.disabled, .ui-btn.disabled:hover, .ui-btn.disabled:active, .ui-btn[disabled], .ui-btn[disabled]:hover, .ui-state-disabled .ui-btn { cursor: not-allowed; background-color: #eee; border-color: #eee; color: #aaa; }
.ui-btn-primary { color: #fff; background-color: #39b54a; border-color: #39b54a; }
.ui-btn-primary:hover, .ui-btn-primary.hover { color: #fff; background-color: #16a329; border-color: #16a329; }
.ui-btn-primary:focus, .ui-btn-primary:active { color: #fff; }
.ui-btn-primary.disabled:focus{ color: #aaa; }
</style>
</head>
<body>
<iframe frameborder="0" scrolling="0" width="91px" height="20px" src="https://ghbtns.com/github-btn.html?user=wangpengfei15975&repo=recorder.js&type=star&count=true"></iframe>
<h1>recorder.js</h1>
<h2>HTML5录音解决方案</h2>
<p>由于Chrome47以上以及QQ浏览器需要HTTPS的支持,所以烦请更换至360、FF、Edge进行体验,或下载项目至本地通过localhost访问。</p>
<p>另:IE和Safari全版本不支持录音功能</p>
<button id="start" class="ui-btn ui-btn-primary" disabled>录音</button>
<button id="stop" class="ui-btn ui-btn-primary" disabled>停止</button>
<div id="audio-container"></div>
<script src="js/recorder.js"></script>
<script>
window.onload = function(){
var start = document.querySelector('#start');
var stop = document.querySelector('#stop');
var container = document.querySelector('#audio-container');
var recorder = new Recorder({
sampleRate: 44100, //采样频率,默认为44100Hz(标准MP3采样率)
bitRate: 128, //比特率,默认为128kbps(标准MP3质量)
success: function(){ //成功回调函数
start.disabled = false;
},
error: function(msg){ //失败回调函数
alert(msg);
},
fix: function(msg){ //不支持H5录音回调函数
alert(msg);
}
});
//开始录音
//recorder.start();
//停止录音
//recorder.stop();
//获取MP3编码的Blob格式音频文件
//recorder.getBlob(function(blob){ 获取成功回调函数,blob即为音频文件
// ...
//},function(msg){ 获取失败回调函数,msg为错误信息
// ...
//});
//getUserMedia() no longer works on insecure origins. To use this feature, you should consider switching your application to a secure origin, such as HTTPS.
start.addEventListener('click',function(){
this.disabled = true;
stop.disabled = false;
var audio = document.querySelectorAll('audio');
for(var i = 0; i < audio.length; i++){
if(!audio[i].paused){
audio[i].pause();
}
}
recorder.start();
});
stop.addEventListener('click',function(){
this.disabled = true;
start.disabled = false;
recorder.stop();
recorder.getBlob(function(blob){
var audio = document.createElement('audio');
audio.src = URL.createObjectURL(blob);
audio.controls = true;
container.appendChild(audio);
});
});
};
</script>
</body>
</html>
| {
"pile_set_name": "Github"
} |
git clone https://github.com/deepstreamIO/deepstream.io.git
cd deepstream.io
sed -i 's/[email protected]:/https:\/\/github.com\//' .gitmodules
git submodule update --init --recursive
npm i
npm run e2e:v3
| {
"pile_set_name": "Github"
} |
// UIProgressView+AFNetworking.h
//
// Copyright (c) 2013-2015 AFNetworking (http://afnetworking.com)
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
#import <Foundation/Foundation.h>
#import <Availability.h>
#if defined(__IPHONE_OS_VERSION_MIN_REQUIRED)
#import <UIKit/UIKit.h>
@class AFURLConnectionOperation;
/**
This category adds methods to the UIKit framework's `UIProgressView` class. The methods in this category provide support for binding the progress to the upload and download progress of a session task or request operation.
*/
@interface UIProgressView (AFNetworking)
///------------------------------------
/// @name Setting Session Task Progress
///------------------------------------
/**
Binds the progress to the upload progress of the specified session task.
@param task The session task.
@param animated `YES` if the change should be animated, `NO` if the change should happen immediately.
*/
#if __IPHONE_OS_VERSION_MIN_REQUIRED >= 70000
- (void)setProgressWithUploadProgressOfTask:(NSURLSessionUploadTask *)task
animated:(BOOL)animated;
#endif
/**
Binds the progress to the download progress of the specified session task.
@param task The session task.
@param animated `YES` if the change should be animated, `NO` if the change should happen immediately.
*/
#if __IPHONE_OS_VERSION_MIN_REQUIRED >= 70000
- (void)setProgressWithDownloadProgressOfTask:(NSURLSessionDownloadTask *)task
animated:(BOOL)animated;
#endif
///------------------------------------
/// @name Setting Session Task Progress
///------------------------------------
/**
Binds the progress to the upload progress of the specified request operation.
@param operation The request operation.
@param animated `YES` if the change should be animated, `NO` if the change should happen immediately.
*/
- (void)setProgressWithUploadProgressOfOperation:(AFURLConnectionOperation *)operation
animated:(BOOL)animated;
/**
Binds the progress to the download progress of the specified request operation.
@param operation The request operation.
@param animated `YES` if the change should be animated, `NO` if the change should happen immediately.
*/
- (void)setProgressWithDownloadProgressOfOperation:(AFURLConnectionOperation *)operation
animated:(BOOL)animated;
@end
#endif
| {
"pile_set_name": "Github"
} |
from typing import Any
from hamcrest.core.helpers.wrap_matcher import wrap_matcher
from hamcrest.core.matcher import Matcher
from hamcrest.core.string_description import tostring
__author__ = "Chris Rose"
__copyright__ = "Copyright 2011 hamcrest.org"
__license__ = "BSD, see License.txt"
__unittest = True
class EqualityWrapper(object):
def __init__(self, matcher: Matcher) -> None:
self.matcher = matcher
def __eq__(self, obj: Any) -> bool:
return self.matcher.matches(obj)
def __str__(self) -> str:
return repr(self)
def __repr__(self) -> str:
return tostring(self.matcher)
def match_equality(matcher: Matcher) -> EqualityWrapper:
"""Wraps a matcher to define equality in terms of satisfying the matcher.
``match_equality`` allows Hamcrest matchers to be used in libraries that
are not Hamcrest-aware. They might use the equality operator::
assert match_equality(matcher) == object
Or they might provide a method that uses equality for its test::
library.method_that_tests_eq(match_equality(matcher))
One concrete example is integrating with the ``assert_called_with`` methods
in Michael Foord's `mock <http://www.voidspace.org.uk/python/mock/>`_
library.
"""
return EqualityWrapper(wrap_matcher(matcher))
| {
"pile_set_name": "Github"
} |
<resources>
<string name="app_name">RN039</string>
<string name="fb_app_id">559717777526537</string>
<string name="ak_client_token">b895234af645976dee1e39a60fdd0592</string>
</resources>
| {
"pile_set_name": "Github"
} |
'use strict';
// do not edit .js files directly - edit src/index.jst
var envHasBigInt64Array = typeof BigInt64Array !== 'undefined';
module.exports = function equal(a, b) {
if (a === b) return true;
if (a && b && typeof a == 'object' && typeof b == 'object') {
if (a.constructor !== b.constructor) return false;
var length, i, keys;
if (Array.isArray(a)) {
length = a.length;
if (length != b.length) return false;
for (i = length; i-- !== 0;)
if (!equal(a[i], b[i])) return false;
return true;
}
if ((a instanceof Map) && (b instanceof Map)) {
if (a.size !== b.size) return false;
for (i of a.entries())
if (!b.has(i[0])) return false;
for (i of a.entries())
if (!equal(i[1], b.get(i[0]))) return false;
return true;
}
if ((a instanceof Set) && (b instanceof Set)) {
if (a.size !== b.size) return false;
for (i of a.entries())
if (!b.has(i[0])) return false;
return true;
}
if (ArrayBuffer.isView(a) && ArrayBuffer.isView(b)) {
length = a.length;
if (length != b.length) return false;
for (i = length; i-- !== 0;)
if (a[i] !== b[i]) return false;
return true;
}
if (a.constructor === RegExp) return a.source === b.source && a.flags === b.flags;
if (a.valueOf !== Object.prototype.valueOf) return a.valueOf() === b.valueOf();
if (a.toString !== Object.prototype.toString) return a.toString() === b.toString();
keys = Object.keys(a);
length = keys.length;
if (length !== Object.keys(b).length) return false;
for (i = length; i-- !== 0;)
if (!Object.prototype.hasOwnProperty.call(b, keys[i])) return false;
for (i = length; i-- !== 0;) {
var key = keys[i];
if (key === '_owner' && a.$$typeof) {
// React-specific: avoid traversing React elements' _owner.
// _owner contains circular references
// and is not needed when comparing the actual elements (and not their owners)
continue;
}
if (!equal(a[key], b[key])) return false;
}
return true;
}
// true if both NaN, false otherwise
return a!==a && b!==b;
};
| {
"pile_set_name": "Github"
} |
Class=[NonFinal]
ClassPath MEng.User.Tests.TestPolyColBase;
ParentClass MEng.Formattable;
EndClass;
Imports=
MEng.System.Runtime.TextOutStream;
EndImports;
Members=
String m_BaseValue;
EndMembers;
Methods=[Public,Const,Overrides]
Method FormatTo([InOut] TextOutStream TarStrm)
Begin
TarStrm.FmtStr("BaseClass=");
TarStrm.FmtStr(m_BaseValue);
EndMethod;
EndMethods;
Methods=[Public,Final]
Constructor()
Begin
EndConstructor;
Method SetBaseValue([In] String ToSet)
Begin
m_BaseValue := ToSet;
EndMethod;
EndMethods;
| {
"pile_set_name": "Github"
} |
#include <pbs_config.h>
#include <set>
#include <dirent.h>
#include <stdio.h>
#include <sys/types.h>
#include <ctype.h>
#include <signal.h>
#include <sys/param.h>
#include <fcntl.h>
#include <libgen.h>
#include <hwloc.h>
#include <errno.h>
#include <sys/stat.h>
#ifdef USELIBCPUSET
# include <bitmask.h>
# include <cpuset.h>
#endif
#define PBS_MOM 1
#include "libpbs.h"
#include "attribute.h"
#include "resource.h"
#include "server_limits.h"
#include "pbs_job.h"
#include "pbs_nodes.h"
#include "log.h"
#include "pbs_cpuset.h"
#include "mom_memory.h"
#include "mom_config.h"
#include "node_internals.hpp"
/* NOTE: move these three things to utils when lib is checked in */
#ifndef MAXPATHLEN
#define MAXPATHLEN 1024
#endif /* MAXPATHLEN */
#ifndef FAILURE
#define FAILURE 0
#endif /* FAILURE */
#ifndef SUCCESS
#define SUCCESS 1
#endif /* SUCCESS */
extern node_internals internal_layout;
extern hwloc_topology_t topology;
extern int MOMConfigUseSMT;
#ifdef NUMA_SUPPORT
extern nodeboard node_boards[];
extern int num_node_boards;
#endif /* NUMA_SUPPORT */
extern int LOGLEVEL;
extern long system_ncpus;
char cpuset_prefix[MAXPATHLEN];
/* FIXME: TODO: TTORQUECPUSET_PATH, enabling cpuset support, and correct error
* checking need a run-time config */
void set_cpuset_prefix()
{
char path[MAXPATHLEN];
struct stat statbuf;
cpuset_prefix[0] = '\0';
sprintf(path, "%s/cpuset.cpus", TROOTCPUSET_PATH);
if (lstat(path, &statbuf) != -1)
snprintf(cpuset_prefix, sizeof(cpuset_prefix), "cpuset.");
}
int manual_cpuset_init()
{
struct stat statbuf;
int rc = PBSE_NONE;
char cmd[MAXPATHLEN + 1];
if (lstat(TROOTCPUSET_PATH, &statbuf) == -1)
{
/* create cpuset base directory */
mkdir(TROOTCPUSET_PATH,0755);
/* now mount it */
sprintf(cmd,"mount -t cpuset none %s", TROOTCPUSET_PATH);
if (system(cmd) == -1)
{
fprintf(stderr,"Cannot mount directory '%s'\n",TROOTCPUSET_PATH);
rc = -1;
}
}
set_cpuset_prefix();
return(rc);
}
/**
* Initializes cpuset usage.
*
* Returns 0 on success.
* On failure, -1 is returned, error is logged.
*
* NOTES:
* - The current code looks for the root cpuset and
* fails if it does not exist. When using libcpuset,
* descriptive error messages are constructed from
* errno.
* - Sophisticated implementations may figure
* out the mount point of the cpuset VFS, and may
* set base paths that are currently hardcoded in
* TROOTCPUSET_PATH and the like.
*/
int init_cpusets(void)
{
int rc = -1;
#ifdef USELIBCPUSET
struct cpuset *cp = NULL;
#endif
#ifdef USELIBCPUSET
/* Allocate a cpuset */
if ((cp = cpuset_alloc()) == NULL)
{
log_err(errno, __func__, (char *)"failed to allocate cpuset");
return(-1);
}
/* Check if cpusets are available by querying the root cpuset */
if ((rc = cpuset_query(cp, TROOTCPUSET_BASE)) == -1)
{
if (errno == ENOSYS)
log_err(errno, __func__, (char *)"failed to query root cpuset, cpusets not supported on this system");
else if (errno == ENODEV)
log_err(errno, __func__, (char *)"failed to query root cpuset, cpusets not enabled on this system");
else
log_err(errno, __func__, (char *)"failed to query root cpuset");
}
cpuset_free(cp);
return(rc);
#else /* !USELIBCPUSET */
/* Check if /dev/cpuset/cpus exists */
rc = manual_cpuset_init();
return(rc);
#endif /* USELIBCPUSET */
} /* END init_cpusets() */
/**
* Creates/modifies a cpuset.
*
* Returns 0 on success.
* On failure, -1 is returned, errno is set, and log_buffer is populated.
*
* @param name - (I) - string
* @param cpus - (I) - hwloc_bitmap_t
* @param mems - (I) - hwloc_bitmap_t
* @param flags - (I) - mode_t
*
* NOTES:
* - If name starts with /, it is used as is. If not, it is
* relative to the TORQUE cpuset.
* - If cpus and/or mems are NULL, the corresponding cpuset
* properties are left empty (create new cpuset),
* or untouched (modify existing cpuset).
* - flags may contain ORed O_CREAT, O_EXCL. The meaning is
* similar to open(2):
* 0: modify cpuset, fail if it does not exist.
* O_CREAT: create cpuset if it does not exist, otherwise modify.
* O_CREAT | O_EXCL: create cpuset, fail if it already exists.
* - The cpuset is created with mode 0755 and owned by ruid.
* - When using libcpuset, and create fails, the cpuset does not exist.
* - When using libcpuset, and modify fails, the cpuset remains unmodified.
* - When not using libcpuset, and writing cpus or mems fails, the
* cpuset path remains in an undefined state.
*/
int create_cpuset(
const char *name, /* I */
const hwloc_bitmap_t cpus, /* I */
const hwloc_bitmap_t mems, /* I */
mode_t flags) /* I */
{
char cpuset_path[MAXPATHLEN + 1];
#ifdef USELIBCPUSET
struct cpuset *cp = NULL;
struct bitmask *mask = NULL;
int idx;
#else
char path[MAXPATHLEN + 1];
char cpuset_buf[MAXPATHLEN];
struct stat statbuf;
FILE *fd;
#endif
#ifdef USELIBCPUSET
/* Construct the name of the cpuset.
* libcpuset does not want the root-cpuset path in it */
if (name[0] == '/')
snprintf(cpuset_path, sizeof(cpuset_path), "%s", name);
else
snprintf(cpuset_path, sizeof(cpuset_path), "%s/%s", TTORQUECPUSET_BASE, name);
/* Allocate a cpuset */
if ((cp = cpuset_alloc()) == NULL)
{
sprintf(log_buffer, "(%s) failed to allocate cpuset", __func__);
return(-1);
}
/*
* Query cpuset.
* If it exists, fail if O_CREAT | O_EXCL.
* If it does not exist, fail unless O_CREAT.
* If query fails for other reasons, fail.
*/
if (cpuset_query(cp, cpuset_path) == 0)
{
if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
{
sprintf(log_buffer, "(%s) failed to create cpuset %s", __func__, cpuset_path);
errno = EEXIST;
cpuset_free(cp);
return(-1);
}
flags &= ~O_CREAT;
}
else if (errno == ENOENT)
{
if ((flags & O_CREAT) != O_CREAT)
{
sprintf(log_buffer, "(%s) failed to modify cpuset %s", __func__, cpuset_path);
cpuset_free(cp);
return(-1);
}
}
else
{
sprintf(log_buffer, "(%s) failed to query cpuset %s", __func__, cpuset_path);
cpuset_free(cp);
return(-1);
}
/* Set cpus */
if (cpus != NULL)
{
if ((mask = bitmask_alloc(cpuset_cpus_nbits())) == NULL)
{
sprintf(log_buffer, "(%s) failed to allocate bitmask", __func__);
cpuset_free(cp);
return(-1);
}
hwloc_bitmap_foreach_begin(idx, cpus)
bitmask_setbit(mask, idx);
hwloc_bitmap_foreach_end();
if (cpuset_setcpus(cp, mask) == -1)
{
sprintf(log_buffer, "(%s) failed to set cpus in cpuset %s", __func__, cpuset_path);
bitmask_free(mask);
cpuset_free(cp);
return(-1);
}
bitmask_free(mask);
} /* END cpus != NULL */
/* Set mems */
if (mems != NULL)
{
if ((mask = bitmask_alloc(cpuset_mems_nbits())) == NULL)
{
sprintf(log_buffer, "(%s) failed to allocate bitmask", __func__);
cpuset_free(cp);
return(-1);
}
hwloc_bitmap_foreach_begin(idx, mems)
bitmask_setbit(mask, idx);
hwloc_bitmap_foreach_end();
if (cpuset_setmems(cp, mask) == -1)
{
sprintf(log_buffer, "(%s) failed to set mems in cpuset %s", __func__, cpuset_path);
bitmask_free(mask);
cpuset_free(cp);
return(-1);
}
bitmask_free(mask);
}
/* Create/modify cpuset */
if ((flags & O_CREAT) == O_CREAT)
{
if (cpuset_create(cpuset_path, cp) == -1)
{
sprintf(log_buffer, "(%s) failed to create cpuset %s", __func__, cpuset_path);
cpuset_free(cp);
return(-1);
}
sprintf(log_buffer, "(%s) successfully created cpuset %s", __func__, cpuset_path);
}
else
{
if (cpuset_modify(cpuset_path, cp) == -1)
{
sprintf(log_buffer, "(%s) failed to modify cpuset %s", __func__, cpuset_path);
cpuset_free(cp);
return(-1);
}
sprintf(log_buffer, "(%s) successfully modified cpuset %s", __func__, cpuset_path);
}
/* Done */
cpuset_free(cp);
return(PBSE_NONE);
#else
/* Construct the name of the cpuset */
if (name[0] == '/')
snprintf(cpuset_path, sizeof(cpuset_path), "%s", name);
else
snprintf(cpuset_path, sizeof(cpuset_path), "%s/%s", TTORQUECPUSET_PATH, name);
/*
* See if cpuset exists.
* If it exists, fail if O_CREAT | O_EXCL.
* If it does not exist, fail unless O_CREAT.
* If query fails for other reasons, fail.
*/
if (lstat(cpuset_path, &statbuf) == 0)
{
if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
{
sprintf(log_buffer, "(%s) failed to create cpuset %s", __func__, cpuset_path);
errno = EEXIST;
return(-1);
}
flags &= ~O_CREAT;
}
else if (errno == ENOENT)
{
if ((flags & O_CREAT) != O_CREAT)
{
sprintf(log_buffer, "(%s) failed to modify cpuset %s", __func__, cpuset_path);
return(-1);
}
}
else
{
sprintf(log_buffer, "(%s) failed to stat cpuset %s", __func__, cpuset_path);
return(-1);
}
/* Create cpuset path, if needed */
if ((flags & O_CREAT) == O_CREAT)
{
if (mkdir(cpuset_path, 0755) == -1)
{
sprintf(log_buffer, "(%s) failed to create cpuset %s", __func__, cpuset_path);
return(-1);
}
}
/* Set cpus */
if (cpus != NULL)
{
sprintf(path, "%s/%scpus", cpuset_path, cpuset_prefix);
if ((fd = fopen(path, "w")) == NULL)
{
sprintf(log_buffer, "(%s) failed to open %s", __func__, path);
return(-1);
}
hwloc_bitmap_list_snprintf(cpuset_buf, sizeof(cpuset_buf), cpus);
if (fwrite(cpuset_buf, sizeof(char), strlen(cpuset_buf), fd) != strlen(cpuset_buf))
{
sprintf(log_buffer, "(%s) failed to write %s", __func__, path);
fclose(fd);
return(-1);
}
fclose(fd);
}
/* Set mems */
if (mems != NULL)
{
sprintf(path, "%s/%smems", cpuset_path, cpuset_prefix);
if ((fd = fopen(path, "w")) == NULL)
{
sprintf(log_buffer, "(%s) failed to open %s", __func__, path);
return(-1);
}
hwloc_bitmap_list_snprintf(cpuset_buf, sizeof(cpuset_buf), mems);
if (fwrite(cpuset_buf, sizeof(char), strlen(cpuset_buf), fd) != strlen(cpuset_buf))
{
sprintf(log_buffer, "(%s) failed to write %s", __func__, path);
fclose(fd);
return(-1);
}
fclose(fd);
}
/* Success */
if ((flags & O_CREAT) == O_CREAT)
sprintf(log_buffer, "(%s) successfully created cpuset %s", __func__, cpuset_path);
else
sprintf(log_buffer, "(%s) successfully modified cpuset %s", __func__, cpuset_path);
return(PBSE_NONE);
#endif
} /* END create_cpuset() */
/**
* Read cpus and mems of a cpuset into hwloc_bitmap structs.
*
* Returns 0 on success.
* On failure, -1 is returned, errno is set, and log_buffer is populated.
*
* @param name - (I) - string
* @param cpus - (O) - hwloc_bitmap_t
* @param mems - (O) - hwloc_bitmap_t
*
* NOTES:
* - If name starts with /, it is used as is. If not, it is
* relative to the TORQUE cpuset.
* - If cpus and/or mems are NULL, the corresponding cpuset
* properties are not read. This can be used as quick check if a
* cpuset exists (rc is -1, errno is ENOENT in this case).
* - When using libcpuset, and reading of the cpuset fails,
* cpus and mems have zero content.
* - When not using libcpuset, and the cpuset does not exist,
* cpus and mems have zero content.
* - When not using libcpuset, and reading mems fails,
* cpus is populated but mems has zero content.
* - When not using libcpuset, cpus and mems of the cpuset
* are assumed to be in ASCII list format.
*/
int read_cpuset(
const char *name, /* I */
hwloc_bitmap_t cpus, /* O */
hwloc_bitmap_t mems) /* O */
{
char cpuset_path[MAXPATHLEN + 1];
char cpuset_buf[MAXPATHLEN + 1];
int rc = -1;
#ifdef USELIBCPUSET
struct cpuset *cp = NULL;
struct bitmask *mask = NULL;
#else
char path[MAXPATHLEN + 1];
struct stat statbuf;
FILE *fd;
#endif
#ifdef USELIBCPUSET
/* Construct the name of the cpuset.
* libcpuset does not want the root-cpuset path in it */
if (name[0] == '/')
snprintf(cpuset_path, sizeof(cpuset_path), "%s", name);
else
snprintf(cpuset_path, sizeof(cpuset_path), "%s/%s", TTORQUECPUSET_BASE, name);
/* Zero bitmaps */
if (cpus != NULL)
hwloc_bitmap_zero(cpus);
if (mems != NULL)
hwloc_bitmap_zero(mems);
/* Allocate a cpuset */
if ((cp = cpuset_alloc()) == NULL)
{
sprintf(log_buffer, "(%s) failed to allocate cpuset", __func__);
return(-1);
}
/* Query cpuset */
if ((rc = cpuset_query(cp, cpuset_path)) == 0)
{
/* Read cpus */
if (cpus != NULL)
{
if ((mask = bitmask_alloc(cpuset_cpus_nbits())) == NULL)
{
sprintf(log_buffer, "(%s) failed to allocate bitmask", __func__);
cpuset_free(cp);
return(-1);
}
if (cpuset_getcpus(cp, mask) == -1)
{
sprintf(log_buffer, "(%s) failed to read cpus in cpuset %s", __func__, cpuset_path);
bitmask_free(mask);
cpuset_free(cp);
return(-1);
}
if (bitmask_weight(mask) > 0)
{
bitmask_displaylist(cpuset_buf, sizeof(cpuset_buf), mask);
hwloc_bitmap_list_sscanf(cpus, cpuset_buf);
}
bitmask_free(mask);
}
/* Read mems */
if (mems != NULL)
{
if ((mask = bitmask_alloc(cpuset_mems_nbits())) == NULL)
{
sprintf(log_buffer, "(%s) failed to allocate bitmask", __func__);
cpuset_free(cp);
return(-1);
}
if (cpuset_getmems(cp, mask) == -1)
{
sprintf(log_buffer, "(%s) failed to read mems in cpuset %s", __func__, cpuset_path);
bitmask_free(mask);
cpuset_free(cp);
return(-1);
}
if (bitmask_weight(mask) > 0)
{
bitmask_displaylist(cpuset_buf, sizeof(cpuset_buf), mask);
hwloc_bitmap_list_sscanf(mems, cpuset_buf);
}
bitmask_free(mask);
}
}
/* Done */
cpuset_free(cp);
return(rc);
#else
/* Construct the name of the cpuset */
if (name[0] == '/')
snprintf(cpuset_path, sizeof(cpuset_path), "%s", name);
else
snprintf(cpuset_path, sizeof(cpuset_path), "%s/%s", TTORQUECPUSET_PATH, name);
/* Zero bitmaps */
if (cpus != NULL)
hwloc_bitmap_zero(cpus);
if (mems != NULL)
hwloc_bitmap_zero(mems);
/* Stat cpuset */
if ((rc = lstat(cpuset_path, &statbuf)) == 0)
{
/* Read cpus */
if (cpus != NULL)
{
sprintf(path, "%s/%scpus", cpuset_path, cpuset_prefix);
if ((fd = fopen(path, "r")) == NULL)
{
sprintf(log_buffer, "(%s) failed to open %s", __func__, path);
return(-1);
}
if (fscanf(fd, "%s", cpuset_buf) == 1)
{
if (hwloc_bitmap_list_sscanf(cpus, cpuset_buf) < 0)
{
sprintf(log_buffer, "(%s) failed to parse %s: %s", __func__, path, cpuset_buf);
fclose(fd);
errno = EINVAL;
return(-1);
}
}
else
{
errno = ENOENT;
return(-1);
}
fclose(fd);
}
/* Read mems */
if (mems != NULL)
{
sprintf(path, "%s/%smems", cpuset_path, cpuset_prefix);
if ((fd = fopen(path, "r")) == NULL)
{
sprintf(log_buffer, "(%s) failed to open %s", __func__, path);
return(-1);
}
if (fscanf(fd, "%s", cpuset_buf) == 1)
{
if (hwloc_bitmap_list_sscanf(mems, cpuset_buf) < 0)
{
sprintf(log_buffer, "(%s) failed to parse %s: %s", __func__, path, cpuset_buf);
fclose(fd);
errno = EINVAL;
return(-1);
}
}
else
{
errno = ENOENT;
return(-1);
}
fclose(fd);
}
}
else
{
sprintf(log_buffer, "(%s) failed to stat %s", __func__, cpuset_path);
}
/* Done */
return(rc);
#endif
} /* END read_cpuset() */
/**
* Deletes a cpuset.
*
* Returns 0 on success.
* On failure, -1 is returned.
*
* @param name - (I) - string
*
* NOTES:
* - If name starts with /, it is used as is. If not, it is
* relative to the TORQUE cpuset.
* - Attempts to delete a non-existing cpuset return -1.
* - Deleting a cpuset, includes killing tasks in it,
* and deleting any descendent cpusets and killing their tasks.
* - If there are no tasks to kill, the function returns quickly.
* - If there are tasks to kill, SIGKILL is sent to each of them,
* followed by a short sleep. If there still remain tasks,
* the procedure is repeated.
* - When using libcpuset, this is tried max. 5 seconds for
* the cpuset including its children.
* - When not using libcpuset, this is tried max. 5 seconds
* for each individual cpuset to delete.
* - When there still remain tasks, deleting fails with
* some errno.
*/
int delete_cpuset(
const char *name, /* I */
bool remove_layout_reservation) /* I */
{
char cpuset_path[MAXPATHLEN + 1];
#ifndef USELIBCPUSET
char path[MAXPATHLEN + 1];
char tid[1024];
struct dirent *pdirent;
struct stat statbuf;
int npids;
int slept;
FILE *fd;
DIR *dir;
#endif
if (remove_layout_reservation == true)
internal_layout.remove_job(name);
#ifdef USELIBCPUSET
/* Construct the name of the cpuset.
* libcpuset does not want the root-cpuset path in it */
if (name[0] == '/')
snprintf(cpuset_path, sizeof(cpuset_path), "%s", name);
else
snprintf(cpuset_path, sizeof(cpuset_path), "%s/%s", TTORQUECPUSET_BASE, name);
/* Nuke the cpuset and all its child cpusets */
if (cpuset_nuke(cpuset_path, 5) == 0)
{
/* Success */
if (LOGLEVEL >= 6)
{
sprintf(log_buffer, "successfully nuked cpuset %s", cpuset_path);
log_event(PBSEVENT_SYSTEM, PBS_EVENTCLASS_SERVER, __func__, log_buffer);
}
return(PBSE_NONE);
}
/* Failure */
if (errno != ENOENT)
{
sprintf(log_buffer, "failed to nuke cpuset %s", cpuset_path);
log_err(errno, __func__, log_buffer);
}
return(-1);
#else
/* Construct the name of the cpuset */
if (name[0] == '/')
snprintf(cpuset_path, sizeof(cpuset_path), "%s", name);
else
snprintf(cpuset_path, sizeof(cpuset_path), "%s/%s", TTORQUECPUSET_PATH, name);
if ((dir = opendir(cpuset_path)) != NULL)
{
while ((pdirent = readdir(dir)) != NULL)
{
/* Skip parent and current directory. */
if ((!strcmp(pdirent->d_name, ".")) ||
(!strcmp(pdirent->d_name, "..")))
continue;
/* Prepend directory name to entry name for lstat. */
snprintf(path, sizeof(path), "%s/%s", cpuset_path, pdirent->d_name);
/* Skip entry, if lstat fails. */
if (lstat(path, &statbuf) == -1)
continue;
/* If a directory is found, it is a child cpuset. Try to delete it. */
if ((statbuf.st_mode & S_IFDIR) == S_IFDIR)
{
delete_cpuset(path, false);
}
/*
* If there are running processes, try to kill them.
* If this takes more than 5 seconds, give up.
*/
else if (!strcmp(pdirent->d_name, "tasks"))
{
slept = 0;
do
{
npids = 0;
if ((fd = fopen(path, "r")) != NULL)
{
while ((fgets(tid, sizeof(tid), fd)) != NULL)
{
kill(atoi(tid), SIGKILL);
npids++;
}
fclose(fd);
}
if (npids)
{
sleep(1);
slept++;
}
} while ((npids > 0) && (slept <= 5));
}
} /* END while((pdirent = readdir(dir)) != NULL) */
closedir(dir);
} /* END if (opendir) */
if (rmdir(cpuset_path) == 0)
{
/* Success */
if (LOGLEVEL >= 6)
{
sprintf(log_buffer, "successfully deleted cpuset %s", cpuset_path);
log_event(PBSEVENT_SYSTEM, PBS_EVENTCLASS_SERVER, __func__, log_buffer);
}
return(PBSE_NONE);
}
/* Failure */
if (errno != ENOENT)
{
sprintf(log_buffer, "failed to delete cpuset %s", cpuset_path);
log_err(errno, __func__, log_buffer);
}
return(-1);
#endif
} /* END delete_cpuset() */
/**
* Cleanup TORQUE cpuset from cpusets of jobs that are gone.
*
* Called after init_abort_jobs.
*/
void cleanup_torque_cpuset(void)
{
char path[MAXPATHLEN + 1];
struct dirent *pdirent;
struct stat statbuf;
DIR *dir;
if ((dir = opendir(TTORQUECPUSET_PATH)) == NULL)
{
log_err(errno, __func__, (char *)"failed to open TORQUE cpuset hierarchy");
return;
}
while ((pdirent = readdir(dir)) != NULL)
{
/* Skip parent and current directory. */
if ((!strcmp(pdirent->d_name, ".")) ||
(!strcmp(pdirent->d_name, "..")))
continue;
/* Prepend directory name to entry name for lstat. */
snprintf(path, sizeof(path), "%s/%s", TTORQUECPUSET_PATH, pdirent->d_name);
/* Skip entry, if lstat fails. */
if (lstat(path, &statbuf) == -1)
continue;
/* If a directory is found, it is a cpuset. */
if ((statbuf.st_mode & S_IFDIR) == S_IFDIR)
{
/*
* Check if entry name corresponds to a known job.
* If not, delete the cpuset.
*/
if (mom_find_job(pdirent->d_name) == NULL)
{
if (LOGLEVEL >= 6)
{
sprintf(log_buffer, "about to delete orphaned cpuset %s", path);
log_event(PBSEVENT_SYSTEM, PBS_EVENTCLASS_SERVER, __func__, log_buffer);
}
if (delete_cpuset(pdirent->d_name, true) == 0)
{
sprintf(log_buffer, "deleted orphaned cpuset %s", path);
log_event(PBSEVENT_SYSTEM, PBS_EVENTCLASS_SERVER, __func__, log_buffer);
}
}
else
{
sprintf(log_buffer, "found active cpuset %s", path);
log_event(PBSEVENT_SYSTEM, PBS_EVENTCLASS_SERVER, __func__, log_buffer);
}
}
} /* END while((pdirent = readdir(dir)) != NULL) */
closedir(dir);
} /* END cleanup_torque_cpuset() */
/*
* is_physical_core()
*
* @pre-cond: topology must be initialized
*
* @return true if the object at os_index is a physical core, false otherwise
*/
bool is_physical_core(
unsigned int os_index)
{
return(hwloc_get_obj_by_type(topology, HWLOC_OBJ_CORE, os_index) != NULL);
}
void remove_logical_processor_if_requested(
hwloc_bitmap_t *cpus)
{
hwloc_obj_t obj;
hwloc_obj_t pu;
int i;
/*
* Handle SMT CPUs.
* If a system has SMT enabled, there are more than one logical CPU per physical core.
* If MOMConfigUseSMT is off, we only want the first logical CPU of a core in the cpuset.
* Thus we map the additional logical CPUs out of the cpuset.
* To be portable among architectures as much as possible, the only assumption that
* is made here is that the CPUs to become mapped out are HWLOC_OBJ_PU objects that
* are children of a HWLOC_OBJ_CORE object.
* If there are no HWLOC_OBJ_CORE objects in the cpuset, we cannot detect if cpuset members
* are physical or logical. Then the cpuset is left as-is.
*/
if (!MOMConfigUseSMT && *cpus)
{
for (obj = hwloc_get_next_obj_inside_cpuset_by_type(topology, *cpus, HWLOC_OBJ_CORE, NULL);
obj;
obj = hwloc_get_next_obj_inside_cpuset_by_type(topology, *cpus, HWLOC_OBJ_CORE, obj))
{
i = 1;
while ((pu = hwloc_get_obj_inside_cpuset_by_type(topology, obj->cpuset, HWLOC_OBJ_PU, i++)) != NULL)
hwloc_bitmap_andnot(*cpus, *cpus, pu->cpuset);
}
}
}
/*
* get_cpu_list()
*
* reads the cpu list for the job matching jobid and stores it in buf
*
* @param jobid - the id of the job we're getting a cpu list for
* @param buf - the buffer where we'll store the list
* @param bufsize - the size of the buffer where we can store the list
*
*/
void get_cpu_list(
const char *jobid,
char *buf,
int bufsize)
{
char cpuset_path[MAXPATHLEN + 1];
FILE *cpu_file;
int fd;
if (buf == NULL)
return;
memset(buf, 0, bufsize);
snprintf(cpuset_path, sizeof(cpuset_path), "%s/%s/%scpus",
TTORQUECPUSET_PATH, jobid, cpuset_prefix);
if ((cpu_file = fopen(cpuset_path, "r")) != NULL)
{
fd = fileno(cpu_file);
read(fd, buf, bufsize - 1);
}
} /* END get_cpu_list() */
/**
* Initializes the TORQUE cpuset.
*
* Returns 0 on success.
* Returns -1 on failure.
*
* NOTES:
* - This function checks if cpusets are supported and available.
* So call it before any other cpuset-related things, and abort
* pbs_mom if it fails.
* - This function sets up cpus and mems, only. Other cpuset properties
* are not initialized/touched.
* - With NUMA_SUPPORT, the TORQUE cpuset will contain the ORed
* cpusets and nodesets of all nodeboards, as defined in mom.layout.
* Initialization will fail, if mom.layout has wrong content
* (e.g. defines nodeboards that overlap with a boot cpuset).
* - With NUMA_SUPPORT, the TOQRUE cpuset cpus and mems are sync'ed
* with mom.layout at every call of this function, even when the
* TORQUE cpuset already exists.
* - Without NUMA_SUPPORT, when the TORQUE cpuset does not exist,
* it becomes initialized with all cpus and mems of the system,
* excluding a possibly existing boot cpuset, and excluding
* logical CPUs if $use_smt is off.
* - Without NUMA_SUPPORT, when the TORQUE cpuset already exists,
* it is left untouched. It is not checked if it is in sync
* with the current setting of $use_smt (if it contains unwanted
* cpus when $use_smt is off, or if one may add additional
* cpus when $use_smt is on).
*/
int init_torque_cpuset(void)
{
hwloc_bitmap_t cpus = NULL;
hwloc_bitmap_t mems = NULL;
int rc = -1;
#ifndef NUMA_SUPPORT
hwloc_bitmap_t bootcpus = NULL;
hwloc_bitmap_t bootmems = NULL;
#else
int i;
#endif
#ifdef USELIBCPUSET
sprintf(log_buffer, "Init cpuset %s", TTORQUECPUSET_BASE);
#else
sprintf(log_buffer, "Init cpuset %s", TTORQUECPUSET_PATH);
#endif
log_event(PBSEVENT_SYSTEM, PBS_EVENTCLASS_SERVER, __func__, log_buffer);
/* Check if cpusets are supported, return error if not */
if ((init_cpusets()) == -1)
return(-1);
/* Allocate bitmaps */
if (((cpus = hwloc_bitmap_alloc()) == NULL) ||
((mems = hwloc_bitmap_alloc()) == NULL))
{
log_err(ENOMEM, __func__, (char *)"failed to allocate bitmap");
return(-1);
}
#ifdef NUMA_SUPPORT
/* Add cpus and mems of all nodeboards */
for (i = 0; i < num_node_boards; i++)
{
hwloc_bitmap_or(cpus, cpus, node_boards[i].cpuset);
hwloc_bitmap_or(mems, mems, node_boards[i].nodeset);
}
#else
/*
* See if cpuset exists.
* If it's already there and has non-empty cpus and mems, leave as is, set up otherwise.
*/
#ifdef USELIBCPUSET
if (read_cpuset(TTORQUECPUSET_BASE, cpus, mems) == -1)
#else
if (read_cpuset(TTORQUECPUSET_PATH, cpus, mems) == -1)
#endif
{
if (errno != ENOENT)
{
/* Error */
log_err(errno, __func__, log_buffer);
goto finish;
}
}
else if (! (hwloc_bitmap_iszero(cpus) || hwloc_bitmap_iszero(mems)))
{
/* Exists with non-empty cpus and mems, adjust and tell what we have and return */
remove_logical_processor_if_requested(&cpus);
sprintf(log_buffer, "cpus = ");
hwloc_bitmap_list_snprintf(log_buffer + strlen(log_buffer), sizeof(log_buffer) - strlen(log_buffer), cpus);
log_event(PBSEVENT_SYSTEM, PBS_EVENTCLASS_SERVER, __func__, log_buffer);
sprintf(log_buffer, "mems = ");
hwloc_bitmap_list_snprintf(log_buffer + strlen(log_buffer), sizeof(log_buffer) - strlen(log_buffer), mems);
log_event(PBSEVENT_SYSTEM, PBS_EVENTCLASS_SERVER, __func__, log_buffer);
rc = 0;
goto finish;
}
/* Add all resources of the root cpuset */
#ifdef USELIBCPUSET
if (read_cpuset(TROOTCPUSET_BASE, cpus, mems) == -1)
#else
if (read_cpuset(TROOTCPUSET_PATH, cpus, mems) == -1)
#endif
{
log_err(errno, __func__, log_buffer);
goto finish;
}
remove_logical_processor_if_requested(&cpus);
/* Allocate bitmaps before querying boot cpuset */
if ((bootcpus = hwloc_bitmap_alloc()) == NULL)
{
log_err(errno, __func__, (char *)"failed to allocate bitmap");
goto finish;
}
if ((bootmems = hwloc_bitmap_alloc()) == NULL)
{
log_err(errno, __func__, (char *)"failed to allocate bitmap");
goto finish;
}
/*
* Query boot cpuset.
* If it is there, subtract its cpus and mems.
*/
#ifdef USELIBCPUSET
if (read_cpuset(TBOOTCPUSET_BASE, bootcpus, bootmems) == -1)
#else
if (read_cpuset(TBOOTCPUSET_PATH, bootcpus, bootmems) == -1)
#endif
{
if (errno != ENOENT)
{
/* Error */
log_err(errno, __func__, log_buffer);
goto finish;
}
}
else
{
sprintf(log_buffer, "subtracting cpus of boot cpuset: ");
hwloc_bitmap_list_snprintf(log_buffer + strlen(log_buffer), sizeof(log_buffer) - strlen(log_buffer), bootcpus);
log_event(PBSEVENT_SYSTEM, PBS_EVENTCLASS_SERVER, __func__, log_buffer);
sprintf(log_buffer, "subtracting mems of boot cpuset: ");
hwloc_bitmap_list_snprintf(log_buffer + strlen(log_buffer), sizeof(log_buffer) - strlen(log_buffer), bootmems);
log_event(PBSEVENT_SYSTEM, PBS_EVENTCLASS_SERVER, __func__, log_buffer);
hwloc_bitmap_andnot(cpus, cpus, bootcpus);
hwloc_bitmap_andnot(mems, mems, bootmems);
}
#endif
sprintf(log_buffer, "setting cpus = ");
hwloc_bitmap_list_snprintf(log_buffer + strlen(log_buffer), sizeof(log_buffer) - strlen(log_buffer), cpus);
log_event(PBSEVENT_SYSTEM, PBS_EVENTCLASS_SERVER, __func__, log_buffer);
sprintf(log_buffer, "setting mems = ");
hwloc_bitmap_list_snprintf(log_buffer + strlen(log_buffer), sizeof(log_buffer) - strlen(log_buffer), mems);
log_event(PBSEVENT_SYSTEM, PBS_EVENTCLASS_SERVER, __func__, log_buffer);
#ifdef USELIBCPUSET
/* remove existing Torque cpuset if it exists (has empty/invalid cpus and/or mems) */
/* before creating new one */
if (rmdir(TTORQUECPUSET_BASE) == -1 && errno != ENOENT)
{
sprintf(log_buffer, "%s: rmdir failed with errno=%d", TTORQUECPUSET_BASE, errno);
log_event(PBSEVENT_SYSTEM, PBS_EVENTCLASS_SERVER, __func__, log_buffer);
}
if ((rc = create_cpuset(TTORQUECPUSET_BASE, cpus, mems, O_CREAT)) == -1)
#else
/* remove existing Torque cpuset if it exists (has empty/invalid cpus and/or mems */
/* before creating new one */
if (rmdir(TTORQUECPUSET_PATH) == -1 && errno != ENOENT)
{
sprintf(log_buffer, "%s: rmdir failed with errno=%d", TTORQUECPUSET_PATH, errno);
log_event(PBSEVENT_SYSTEM, PBS_EVENTCLASS_SERVER, __func__, log_buffer);
}
if ((rc = create_cpuset(TTORQUECPUSET_PATH, cpus, mems, O_CREAT)) == -1)
#endif
log_err(errno, __func__, log_buffer);
#ifndef NUMA_SUPPORT
finish:
#endif
if (cpus != NULL)
hwloc_bitmap_free(cpus);
if (mems != NULL)
hwloc_bitmap_free(mems);
#ifndef NUMA_SUPPORT
if (bootcpus != NULL)
hwloc_bitmap_free(bootcpus);
if (bootmems != NULL)
hwloc_bitmap_free(bootmems);
#endif
return(rc);
} /* END init_torque_cpuset */
/**
* Add cpuset of object at idx in a cpuset to another cpuset.
*
* Returns 0 (success) or -1 (failure)
*
* @param cpuset - (I) - cpuset to search for object
* @param cpus - (I/O) - cpuset to add found object
* @param idx - (I) - position in cpuset
*
* NOTES:
* - idx specifies the position of a bit in cpuset (1st bit set is idx = 0).
* - cpuset is searched for HWLOC_OBJ_CORE and HWLOC_OBJ_PU objects.
* - The target cpuset cpus is ORed with the cpuset of a found object.
*/
int add_obj_from_cpuset(
hwloc_bitmap_t cpuset, /* I */
hwloc_bitmap_t cpus, /* I/O */
unsigned idx) /* I */
{
hwloc_obj_t obj = NULL;
/* Figure out core or pu at position idx in cpuset */
if ((obj = hwloc_get_obj_inside_cpuset_by_type(topology, cpuset, HWLOC_OBJ_PU, idx)) == NULL)
if ((obj = hwloc_get_obj_inside_cpuset_by_type(topology, cpuset, HWLOC_OBJ_CORE, idx)) == NULL)
return(-1);
/* Add cpuset of found object */
hwloc_bitmap_or(cpus, cpus, obj->cpuset);
return(PBSE_NONE);
} /* END add_obj_from_cpuset() */
long long get_memory_requested_in_kb(
job &pjob)
{
long long mem_requested = 0;
resource *mem = find_resc_entry(&pjob.ji_wattr[JOB_ATR_resource],
find_resc_def(svr_resc_def, "mem", svr_resc_size));
if ((mem != NULL) &&
(mem->rs_value.at_val.at_size.atsv_num != 0))
{
int shift = mem->rs_value.at_val.at_size.atsv_shift;
mem_requested = mem->rs_value.at_val.at_size.atsv_num;
/* make sure that the requested memory is in kb */
while (shift > 10)
{
mem_requested *= 1024;
shift -= 10;
}
}
return(mem_requested);
} /* END get_memory_requested_in_kb() */
/**
* Creates cpuset for a job.
*
* Returns SUCCESS/FAILURE.
*
* @param pjob - (I) - job
*
* NOTES:
* - The cpuset is named like the job ID.
* - The cpuset is created below the TORQUE cpuset.
* - If a cpuset with the same name already exists, it is
* tried to become deleted. If delete fails, create fails.
* - With NUMA support, the cpuset is constructed by ORing
* subsets of the cpus and mems of the nodeboards that are
* allocated for the job.
* - Without NUMA support, the cpuset will contain
* a subset of the cpus of the TORQUE cpuset, and all
* mems of the TORQUE cpuset.
*/
int create_job_cpuset(
job *pjob) /* I */
{
hwloc_bitmap_t cpus = NULL;
hwloc_bitmap_t mems = NULL;
int rc = FAILURE;
#ifdef NUMA_SUPPORT
vnodent *np = pjob->ji_vnods;
int j;
int numa_idx;
#else
hwloc_bitmap_t tmems = NULL;
hwloc_bitmap_t tcpus = NULL;
# ifdef GEOMETRY_REQUESTS
vnodent *np = pjob->ji_vnods;
int j;
resource *presc = NULL;
resource_def *prd = NULL;
hwloc_obj_t obj = NULL;
hwloc_obj_t core = NULL;
# endif
#endif
/* Delete cpuset, if it exists */
delete_cpuset(pjob->ji_qs.ji_jobid, false);
/* Allocate bitmaps for cpus and mems */
if (((cpus = hwloc_bitmap_alloc()) == NULL) ||
((mems = hwloc_bitmap_alloc()) == NULL))
{
log_err(errno, __func__, (char *)"failed to allocate bitmap");
goto finish;
}
#ifdef NUMA_SUPPORT
/* Walk through job's vnodes, add corresponding cpus */
for (j = 0; j < pjob->ji_numvnod; ++j, np++)
{
/* Figure out numa_node for this vnode */
char *dash = strchr(np->vn_host->hn_host, '-');
if (dash)
{
while (strchr(dash + 1, '-'))
dash = strchr(dash + 1, '-');
numa_idx = atoi(dash + 1);
}
else
{
sprintf(log_buffer, "failed to parse node number from nodeboard name %s", np->vn_host->hn_host);
log_err(-1, __func__, log_buffer);
continue;
}
if ((pjob->ji_wattr[JOB_ATR_node_exclusive].at_flags & ATR_VFLAG_SET) &&
(pjob->ji_wattr[JOB_ATR_node_exclusive].at_val.at_long != 0))
{
/* If job's node_usage is singlejob, simply add all cpus/mems of this vnode */
hwloc_bitmap_or(cpus, cpus, node_boards[numa_idx].cpuset);
hwloc_bitmap_or(mems, mems, node_boards[numa_idx].nodeset);
}
else
{
/* Add core at position vn_index in nodeboard cpuset */
if (add_obj_from_cpuset(node_boards[numa_idx].cpuset, cpus, np->vn_index) == -1)
{
sprintf(log_buffer, "nodeboard %s cpuset contains no CPU at index %d", np->vn_host->hn_host, np->vn_index);
log_err(-1, __func__, log_buffer);
}
/* Set mems to all memory nodes covered by cpus */
hwloc_cpuset_to_nodeset_strict(topology, cpus, mems);
}
} /* END for(j) */
#else /* ndef NUMA_SUPPORT follows */
/* Allocate bitmap for cpus of TORQUE cpuset */
if (((tcpus = hwloc_bitmap_alloc()) == NULL) ||
((tmems = hwloc_bitmap_alloc()) == NULL))
{
log_err(errno, __func__, (char *)"failed to allocate bitmap");
goto finish;
}
/* Read TORQUE cpuset */
#ifdef USELIBCPUSET
if (read_cpuset(TTORQUECPUSET_BASE, tcpus, tmems) == -1)
#else
if (read_cpuset(TTORQUECPUSET_PATH, tcpus, tmems) == -1)
#endif
{
/* Error */
log_err(errno, __func__, log_buffer);
goto finish;
}
#ifdef GEOMETRY_REQUESTS
hwloc_bitmap_or(mems, mems, tmems);
/* Check if job requested procs_bitmap */
prd = find_resc_def(svr_resc_def,"procs_bitmap",svr_resc_size);
presc = find_resc_entry(&pjob->ji_wattr[JOB_ATR_resource],prd);
/* If so, walk through job's vnodes, add corresponding cpus */
if ((presc != NULL) && (presc->rs_value.at_flags & ATR_VFLAG_SET) == TRUE)
{
for (j = 0; j < pjob->ji_numvnod; ++j, np++)
{
/* Figure out cpu with os_index vn_index */
if ((obj = hwloc_get_pu_obj_by_os_index(topology, np->vn_index)) == NULL)
{
sprintf(log_buffer, "topology contains no CPU at os-index %d", np->vn_index);
log_err(-1, __func__, log_buffer);
continue;
}
/* Check if this cpu is part of the TORQUE cpuset */
if (!hwloc_bitmap_isincluded(obj->cpuset, tcpus))
{
sprintf(log_buffer, "TORQUE cpuset contains no CPU at os-index %d", np->vn_index);
log_err(-1, __func__, log_buffer);
continue;
}
/* If $use_smt is on, look for parent core */
if (MOMConfigUseSMT)
if ((core = hwloc_get_ancestor_obj_by_type(topology, HWLOC_OBJ_CORE, obj)) != NULL)
if (hwloc_bitmap_isincluded(core->cpuset, tcpus))
obj = core;
/* Add cpuset of found object */
hwloc_bitmap_or(cpus, cpus, obj->cpuset);
} /* END for(j) */
}
else
#endif /* GEOMETRY REQUESTS */
{
remove_logical_processor_if_requested(&tcpus);
// If job's node_usage is singlejob, simply add all cpus. Also, for logins, add all cpus
if (((pjob->ji_wattr[JOB_ATR_node_exclusive].at_flags & ATR_VFLAG_SET) &&
(pjob->ji_wattr[JOB_ATR_node_exclusive].at_val.at_long != 0)) ||
(is_login_node == TRUE))
{
hwloc_bitmap_or(cpus, cpus, tcpus);
hwloc_bitmap_or(mems, mems, tmems);
}
else
{
std::vector<int> *cpu_indices = internal_layout.get_cpu_indices(pjob->ji_qs.ji_jobid);
std::vector<int> *mem_indices = internal_layout.get_memory_indices(pjob->ji_qs.ji_jobid);
for (unsigned int i = 0; i < cpu_indices->size(); i++)
hwloc_bitmap_set(cpus, cpu_indices->at(i));
for (unsigned int i = 0; i < mem_indices->size(); i++)
hwloc_bitmap_set(mems, mem_indices->at(i));
delete mem_indices;
delete cpu_indices;
}
}
#endif /* NUMA_SUPPORT (first section def, second section ndef */
/* Now create cpuset for job */
if (LOGLEVEL >= 6)
{
snprintf(log_buffer, sizeof(log_buffer),
"creating cpuset for job %s: %d cpus (",
pjob->ji_qs.ji_jobid,
hwloc_bitmap_weight(cpus));
hwloc_bitmap_list_snprintf(log_buffer + strlen(log_buffer),
sizeof(log_buffer) - strlen(log_buffer),
cpus);
snprintf(log_buffer + strlen(log_buffer), sizeof(log_buffer) - strlen(log_buffer),
"), %d mems (",
hwloc_bitmap_weight(mems));
hwloc_bitmap_list_snprintf(log_buffer + strlen(log_buffer),
sizeof(log_buffer) - strlen(log_buffer),
mems);
snprintf(log_buffer + strlen(log_buffer), sizeof(log_buffer) - strlen(log_buffer), ")");
log_ext(-1, __func__, log_buffer, LOG_INFO);
}
if (create_cpuset(pjob->ji_qs.ji_jobid, cpus, mems, O_CREAT) == 0)
{
/* Success */
if (LOGLEVEL >= 6)
log_ext(-1, __func__, log_buffer, LOG_DEBUG);
rc = SUCCESS;
}
else
{
/* Failure */
log_err(errno, __func__, log_buffer);
rc = FAILURE;
}
finish:
if (cpus != NULL)
hwloc_bitmap_free(cpus);
if (mems != NULL)
hwloc_bitmap_free(mems);
#ifndef NUMA_SUPPORT
if (tcpus != NULL)
hwloc_bitmap_free(tcpus);
if (tmems != NULL)
hwloc_bitmap_free(tmems);
#endif
return(rc);
} /* END create_job_cpuset() */
/**
* Bind a process id to the cpuset of a job.
*
* Returns SUCCESS on success.
* Returns FAILURE on failure.
*
* @param pid - (I) - pid
* @param pjob - (I) - job
*
* NOTES:
* - If pid is zero, the current process is bound to the cpuset.
* - If binding fails, the process stays in the cpuset where
* it was created (where pbs_mom runs). In usual cases,
* this is the root cpuset of the system.
*/
int move_to_job_cpuset(
pid_t pid, /* I */
job *pjob) /* I */
{
char cpuset_path[MAXPATHLEN + 1];
#ifndef USELIBCPUSET
char path[MAXPATHLEN + 1];
char cpuset_buf[MAXPATHLEN];
FILE *fd;
#endif
#ifdef USELIBCPUSET
/* Construct the name of the cpuset.
* libcpuset does not want the root-cpuset path in it */
sprintf(cpuset_path, "%s/%s", TTORQUECPUSET_BASE, pjob->ji_qs.ji_jobid);
if (cpuset_migrate(pid, cpuset_path) == 0)
{
/* Success */
if (LOGLEVEL >= 4)
{
sprintf(log_buffer, "successfully moved pid %d to cpuset %s", pid, cpuset_path);
log_ext(-1, __func__, log_buffer, LOG_DEBUG);
}
return(SUCCESS);
}
/* Failure */
sprintf(log_buffer, "failed to move pid %d to cpuset %s", pid, cpuset_path);
log_err(errno, __func__, log_buffer);
return(FAILURE);
#else
/* Construct the name of the cpuset */
sprintf(cpuset_path, "%s/%s", TTORQUECPUSET_PATH, pjob->ji_qs.ji_jobid);
/* If pid is 0, set it to current pid */
if (pid == 0)
pid = getpid();
/* Write pid to tasks file */
sprintf(path, "%s/tasks", cpuset_path);
if ((fd = fopen(path, "w")) == NULL)
{
/* Failure */
sprintf(log_buffer, "failed to move pid %d to cpuset %s", pid, cpuset_path);
log_err(errno, __func__, log_buffer);
return(FAILURE);
}
sprintf(cpuset_buf, "%d", pid);
if (fwrite(cpuset_buf, sizeof(char), strlen(cpuset_buf), fd) != strlen(cpuset_buf))
{
sprintf(log_buffer, "failed to move pid %d to cpuset %s", pid, cpuset_path);
log_err(errno, __func__, log_buffer);
fclose(fd);
return(FAILURE);
}
if (fclose(fd))
{
sprintf(log_buffer, "failed to move pid %d to cpuset %s", pid, cpuset_path);
log_err(errno, __func__, log_buffer);
return(FAILURE);
}
if (LOGLEVEL >= 4)
{
sprintf(log_buffer, "successfully moved pid %d to cpuset %s", pid, cpuset_path);
log_ext(-1, __func__, log_buffer, LOG_DEBUG);
}
return(SUCCESS);
#endif
} /* END move_to_job_cpuset() */
int get_cpuset_size(
char *cpusetStr)
{
int val1 = -1;
int val2 = -2;
int j;
int len = 0;
char *ptr;
val1 = atoi(cpusetStr);
len = strlen(cpusetStr);
for (j=0; j<len; j++)
{
if (cpusetStr[j] == '-')
{
ptr = cpusetStr;
ptr += j + 1;
val2 = atoi(ptr);
break;
}
}
if (val2 != -1)
return(val2+1);
else
return(val1);
}
/**
* adjust_root_map
* @see remove_boot_set() - parent
*
* @param cpusetStr - (I) the cpuset string
* @param cpusetMap - (I/O) the cpuset map
* @param add - (I) True to add cpuset to map else we remove cpuset from map
*/
void adjust_root_map(
char *cpusetStr, /* I */
int cpusetMap[], /* I/O */
int add) /* I */
{
int val1 = -1;
int val2 = -1;
int i;
int j;
int len;
char *ptr;
val1 = atoi(cpusetStr);
len = strlen(cpusetStr);
for (j=0; j<len; j++)
{
if (cpusetStr[j] == '-')
{
ptr = cpusetStr;
ptr += j + 1;
val2 = atoi(ptr);
}
else if (cpusetStr[j] == ',')
{
if (val2 > -1)
{
for (i=val1; i<=val2; i++)
{
cpusetMap[i] = add? 1: 0;
}
}
else
{
cpusetMap[val1] = add? 1: 0;
}
ptr = cpusetStr;
ptr += j + 1;
val1 = atoi(ptr); val2 = -1;
}
}
if (val2 > -1)
{
for (i=val1; i<=val2; i++)
{
cpusetMap[i] = add? 1: 0;
}
}
else
{
cpusetMap[val1] = add? 1: 0;
}
return;
} /* END adjust_root_map() */
/**
* remove_boot_set
* @see initialize_root_cpuset() - parent
*
* @param rootStr - (I/O) the root cpuset string
* @param bootStr - (I) the boot cpuset string
*/
void remove_boot_set(
char *rootStr, /* I/O */
char *bootStr) /* I */
{
int j;
int first;
int *cpusetMap;
int cpuset_size;
char tmpBuf[MAXPATHLEN];
if ((rootStr == NULL) ||
(bootStr == NULL))
return;
cpuset_size = get_cpuset_size(rootStr);
if (cpuset_size <= 0)
{
return;
}
cpusetMap = (int *)calloc(1, cpuset_size + 1);
if(cpusetMap == NULL)
return;
if (LOGLEVEL >= 7)
{
sprintf(log_buffer,
"removing boot cpuset (%s) from root cpuset (%s)",
bootStr, rootStr);
log_ext(-1, __func__, log_buffer, LOG_DEBUG);
}
/* add the root cpuset to the map */
adjust_root_map(rootStr, cpusetMap, TRUE);
/* now remove the boot cpuset from the map */
adjust_root_map(bootStr, cpusetMap, FALSE);
/* convert the cpuset map back into the root cpuset string */
rootStr[0] = '\0';
first = TRUE;
for (j=0; j<cpuset_size; j++)
{
if (cpusetMap[j] > 0 )
{
if (first)
{
sprintf (rootStr, "%d", j);
first = FALSE;
}
else
{
sprintf (tmpBuf, ",%d", j);
strcat (rootStr, tmpBuf);
}
}
}
if (LOGLEVEL >= 7)
{
sprintf(log_buffer,
"resulting root cpuset (%s)",
rootStr);
log_ext(-1, __func__, log_buffer, LOG_DEBUG);
}
free(cpusetMap);
return;
} /* END remove_boot_set() */
/**
* Check if a pid_t number is a process ID or thread ID.
*
* Return 1 if it is a pid.
* Return 0 if it is a tid.
* Return -1 if error.
*
* The check compares the tread group ID from /proc/<id>/status with pid.
* If both are equal, id is the master thread of the thread group, and is regarded as "process ID".
* If they are not, id is a child thread in the thread group, and is regarded as "thread ID".
*/
static int PidIsPid(
pid_t pid)
{
char path[1024];
char readbuf[1024];
FILE *fd;
pid_t tgid;
int rc = -1;
sprintf(path, "/proc/%d/status", pid);
if ((fd = fopen(path, "r")) != NULL)
{
while ((fgets(readbuf, sizeof(readbuf), fd)) != NULL)
{
if ((strncmp(readbuf, "Tgid:",5)) == 0)
{
if ((sscanf(readbuf + 5, " %d", &tgid)) == 1)
rc = tgid == pid;
break;
}
}
fclose(fd);
}
return(rc);
} /* END PidIsPid() */
/**
* Lists tasks currently attached to a cpuset incl. its child cpusets.
*
* Returns a new allocated list of pids.
* Returns NULL on error, or if no pids are found.
*
* @param name - (I) - string
* @param pids - (I) - pid list
*
* NOTES:
* - If name starts with /, it is used as is. If not, it is
* relative to the TORQUE cpuset.
* - The returned pid list must be freed with free_pidlist().
* - The parameter pids is used for recursive calls. Found
* pids are appended to this list. The top-level call
* should call with pids = NULL.
* - pids will contain process IDs, only. Thread IDs are
* not stored.
*/
struct pidl *get_cpuset_pidlist(
const char *name, /* I */
struct pidl *pids) /* I */
{
char cpuset_path[MAXPATHLEN + 1];
char path[MAXPATHLEN + 1];
struct pidl *pl, *pp;
int npids = 0;
pid_t pid;
#ifdef USELIBCPUSET
struct cpuset_pidlist *plist;
int i;
#else
char tid[1024];
struct stat statbuf;
struct dirent *pdirent;
DIR *dir;
FILE *fd;
#endif
#ifdef USELIBCPUSET
/* Construct the name of the cpuset.
* libcpuset does not want the root-cpuset path in it */
if (name[0] == '/')
snprintf(cpuset_path, sizeof(cpuset_path), "%s", name);
else
snprintf(cpuset_path, sizeof(cpuset_path), "%s/%s", TTORQUECPUSET_BASE, name);
/* Get the list of PIDs attached to the cpuset,
* do not care if the cpuset does not exist */
if ((plist = cpuset_init_pidlist(cpuset_path, 1)) == NULL)
{
if (errno != ENOENT)
{
sprintf(log_buffer, "%s: cpuset_init_pidlist", path);
log_err(errno, __func__, log_buffer);
}
return(NULL);
}
/* Transform the PID list into what we return */
pl = NULL;
for (i = 0; i < cpuset_pidlist_length(plist); i++)
{
pid = cpuset_get_pidlist(plist, i);
/* Do not store IDs of individual threads */
if ((PidIsPid(pid)) != 1)
continue;
if ((pp = (struct pidl *)calloc(1,sizeof(struct pidl))) == NULL)
{
log_err(errno, __func__, (char *)"calloc");
break;
}
pp->pid = pid;
pp->next = NULL;
npids++;
if (pl)
pl->next = pp;
else
pids = pp;
pl = pp;
} /* END for(i) */
/* Free the initial PID list */
cpuset_freepidlist(plist);
#else
/* Construct the name of the cpuset */
if (name[0] == '/')
snprintf(cpuset_path, sizeof(cpuset_path), "%s", name);
else
snprintf(cpuset_path, sizeof(cpuset_path), "%s/%s", TTORQUECPUSET_PATH, name);
/* Try to open cpuset directory, don't care if it does not exist */
if ((dir = opendir(cpuset_path)) == NULL)
{
if (errno != ENOENT)
{
sprintf(log_buffer, "%s: opendir", path);
log_err(errno, __func__, log_buffer);
}
}
else
{
/* Dive into child cpusets, if they exist */
while ((pdirent = readdir(dir)) != NULL)
{
/* Skip parent and current directory. */
if (! strcmp(pdirent->d_name, "."))
continue;
if (! strcmp(pdirent->d_name, ".."))
continue;
/* Prepend directory name to entry name for lstat. */
snprintf(path, sizeof(path), "%s/%s", cpuset_path, pdirent->d_name);
/* Skip entry, if lstat fails. */
if (lstat(path, &statbuf) == -1)
continue;
/* If a directory is found, it is a child cpuset. Parse its content. */
if ((statbuf.st_mode & S_IFDIR) == S_IFDIR)
{
pids = get_cpuset_pidlist(path, pids);
}
/* Read tasks list of this cpuset */
else if (!strcmp(pdirent->d_name, "tasks"))
{
/* Find last pidl entry in pids */
if (pids != NULL)
{
pl = pids;
while(pl->next != NULL)
pl = pl->next;
}
else
{
pl = NULL;
}
if ((fd = fopen(path, "r")) != NULL)
{
/* Read tasks list line by line, store */
while ((fgets(tid, sizeof(tid), fd)) != NULL)
{
pid = atoi(tid);
/* Do not store IDs of individual threads */
if ((PidIsPid(pid)) != 1)
continue;
if ((pp = (struct pidl *)calloc(1, sizeof(struct pidl))) == NULL)
{
log_err(errno, __func__, (char *)"calloc");
break;
}
else
{
pp->pid = pid;
pp->next = NULL;
npids++;
if (pl)
pl->next = pp;
else
pids = pp;
pl = pp;
}
} /* END while(fgets) */
fclose(fd);
}
}
} /* END while(readdir) */
closedir(dir);
} /* END if (opendir) */
#endif
if (LOGLEVEL >= 6)
{
sprintf(log_buffer, "%s contains %d PIDs", cpuset_path, npids);
log_event(PBSEVENT_SYSTEM, PBS_EVENTCLASS_SERVER, __func__, log_buffer);
}
return(pids);
} /* END get_cpuset_pidlist() */
/**
* Returns memory pressure of a cpuset.
*
* Returns a non-negative number on success.
* Returns -1 on failure.
* @param name - (I) - string
*
* NOTES:
* - If name starts with /, it is used as is. If not, it is
* relative to the TORQUE cpuset.
* - Child cpusets are not checked.
*/
int get_cpuset_mempressure(
const char *name) /* I */
{
char path[MAXPATHLEN + 1];
int rc;
#ifdef USELIBCPUSET
int fd;
#else
FILE *fd;
#endif
#ifdef USELIBCPUSET
/* Construct the name of the cpuset.
* libcpuset does not want the root-cpuset path in it */
if (name[0] == '/')
snprintf(path, sizeof(path), "%s", name);
else
snprintf(path, sizeof(path), "%s/%s", TTORQUECPUSET_BASE, name);
/* Open, read, close */
if ((fd = cpuset_open_memory_pressure(path)) == -1)
{
if (errno != ENOENT)
{
sprintf(log_buffer, "%s: cpuset_open_memory_pressure", path);
log_err(errno, __func__, log_buffer);
}
return(-1);
}
if ((rc = cpuset_read_memory_pressure(fd)) == -1)
{
sprintf(log_buffer, "%s: cpuset_read_memory_pressure", path);
log_err(errno, __func__, log_buffer);
}
cpuset_close_memory_pressure(fd);
if (LOGLEVEL >= 6)
{
sprintf(log_buffer, "%s/memory_pressure=%d", path, rc);
log_record(PBSEVENT_SYSTEM, PBS_EVENTCLASS_SERVER, __func__, log_buffer);
}
#else
/* Construct the name of the cpuset's memory_pressure file */
if (name[0] == '/')
snprintf(path, sizeof(path), "%s/%smemory_pressure", name, cpuset_prefix);
else
snprintf(path, sizeof(path), "%s/%s/%smemory_pressure", TTORQUECPUSET_PATH, name, cpuset_prefix);
/* Open, read, close */
if ((fd = fopen(path, "r")) == NULL)
{
if (errno != ENOENT)
{
sprintf(log_buffer, "%s: fopen", path);
log_err(errno, __func__, log_buffer);
}
return(-1);
}
if ((fscanf(fd, "%d", &rc)) != 1)
{
sprintf(log_buffer, "%s: fscanf", path);
rc = -1;
}
fclose(fd);
if (LOGLEVEL >= 6)
{
sprintf(log_buffer, "%s=%d", path, rc);
log_event(PBSEVENT_SYSTEM, PBS_EVENTCLASS_SERVER, __func__, log_buffer);
}
#endif
return(rc);
} /* END get_cpuset_mempressure() */
| {
"pile_set_name": "Github"
} |
fileFormatVersion: 2
guid: c22ccff6a39660d4b84961d4367fbba7
folderAsset: yes
DefaultImporter:
externalObjects: {}
userData:
assetBundleName:
assetBundleVariant:
| {
"pile_set_name": "Github"
} |
//
// AudioAnimatedSticker.swift
// Telegram
//
// Created by Mikhail Filimonov on 06.12.2019.
// Copyright © 2019 Telegram. All rights reserved.
//
import Cocoa
import TGUIKit
import SwiftSignalKit
private struct SelectFrameState : Equatable {
let frame: Int32
init(frame: Int32) {
self.frame = frame
}
func withUpdatedFrame(_ frame: Int32) -> SelectFrameState {
return SelectFrameState(frame: frame)
}
}
private let _id_input = InputDataIdentifier("frame")
private func selectFrameEntries(_ state: SelectFrameState) -> [InputDataEntry] {
var entries:[InputDataEntry] = []
var sectionId:Int32 = 0
var index: Int32 = 0
entries.append(.sectionId(sectionId, type: .normal))
sectionId += 1
entries.append(.input(sectionId: sectionId, index: index, value: .string(String(state.frame)), error: nil, identifier: _id_input, mode: .plain, data: InputDataRowData(viewType: .singleItem), placeholder: nil, inputPlaceholder: "Start frame", filter: { $0 }, limit: 3))
index += 1
entries.append(.sectionId(sectionId, type: .normal))
sectionId += 1
return entries
}
private func selectFrameController(context: AccountContext, select:@escaping(Int32)->Void) -> InputDataModalController {
let initialState = SelectFrameState(frame: 1)
let statePromise = ValuePromise(initialState, ignoreRepeated: true)
let stateValue = Atomic(value: initialState)
let updateState: ((SelectFrameState) -> SelectFrameState) -> Void = { f in
statePromise.set(stateValue.modify (f))
}
let signal = statePromise.get() |> map { state in
return InputDataSignalValue(entries: selectFrameEntries(state))
}
let controller = InputDataController(dataSignal: signal, title: "Sound Effect Frame")
var close: (()->Void)? = nil
let modalInteractions = ModalInteractions(acceptTitle: "Save", accept: {
select(stateValue.with { $0.frame })
close?()
}, height: 50, singleButton: true)
controller.leftModalHeader = ModalHeaderData(image: theme.icons.modalClose, handler: {
close?()
})
controller.updateDatas = { data in
updateState { state in
if let rawFrame = data[_id_input]?.stringValue, let frame = Int32(rawFrame) {
return state.withUpdatedFrame(frame)
}
return state
}
return .none
}
let modalController = InputDataModalController(controller, modalInteractions: modalInteractions, closeHandler: { f in f() }, size: NSMakeSize(300, 300))
close = { [weak modalController] in
modalController?.close()
}
return modalController
}
func addAudioToSticker(context: AccountContext) {
filePanel(with: ["tgs", "mp3"], allowMultiple: true, canChooseDirectories: false, for: mainWindow, completion: { files in
if let files = files {
let stickerPath = files.first(where: { $0.nsstring.pathExtension == "tgs" })
let audioPath = files.first(where: { $0.nsstring.pathExtension == "mp3" })
if let stickerPath = stickerPath, let audioPath = audioPath {
let data = try! Data(contentsOf: URL.init(fileURLWithPath: stickerPath))
let uncompressed = TGGUnzipData(data, 8 * 1024 * 1024)!
let string = NSMutableString(data: uncompressed, encoding: String.Encoding.utf8.rawValue)!
let mp3Data = try! Data(contentsOf: URL(fileURLWithPath: audioPath))
showModal(with: selectFrameController(context: context, select: { frame in
let effectString = "\"soundEffect\":{\"triggerOn\":\(frame),\"data\":\"\(mp3Data.base64EncodedString())\"}"
let range = string.range(of: "\"tgs\":1,")
if range.location != NSNotFound {
string.insert(effectString + ",", at: range.max)
}
let updatedData = string.data(using: String.Encoding.utf8.rawValue)!
let zipData = TGGZipData(updatedData, -1)!
let output = NSTemporaryDirectory() + "\(arc4random()).tgs"
try! zipData.write(to: URL(fileURLWithPath: output))
if let controller = context.sharedContext.bindings.rootNavigation().controller as? ChatController {
showModal(with: PreviewSenderController(urls: [URL(fileURLWithPath: output)], chatInteraction: controller.chatInteraction), for: context.window)
}
}), for: context.window)
}
}
})
}
| {
"pile_set_name": "Github"
} |
# -*- coding: utf-8 -*-
from mamonsu.plugins.pgsql.plugin import PgsqlPlugin as Plugin
from distutils.version import LooseVersion
from .pool import Pooler
class Instance(Plugin):
query_agent = "select sum({0}) as {0} from pg_catalog.pg_stat_database;"
key = 'pgsql.'
AgentPluginType = 'pg'
Items = [
# key, zbx_key, description,
# ('graph name', color, side), units, delta
('xact_commit', 'transactions[total]', 'transactions: total',
('PostgreSQL instance: rate', '0000CC', 1),
Plugin.UNITS.none, Plugin.DELTA.speed_per_second),
('blks_hit', 'blocks[hit]', 'blocks: hit',
('PostgreSQL instance: rate', '00CC00', 0),
Plugin.UNITS.none, Plugin.DELTA.speed_per_second),
('blks_read', 'blocks[read]', 'blocks: read',
('PostgreSQL instance: rate', 'CC0000', 0),
Plugin.UNITS.none, Plugin.DELTA.speed_per_second),
('conflicts', 'events[conflicts]', 'event: conflicts',
('PostgreSQL instance: events', '0000CC', 0),
Plugin.UNITS.none, Plugin.DELTA.simple_change),
('deadlocks', 'events[deadlocks]', 'event: deadlocks',
('PostgreSQL instance: events', '000000', 0),
Plugin.UNITS.none, Plugin.DELTA.simple_change),
('xact_rollback', 'events[xact_rollback]', 'event: rollbacks',
('PostgreSQL instance: events', 'CC0000', 0),
Plugin.UNITS.none, Plugin.DELTA.simple_change),
('temp_bytes', 'temp[bytes]', 'temp: bytes written',
('PostgreSQL instance: temp files', 'CC0000', 0),
Plugin.UNITS.bytes, Plugin.DELTA.simple_change),
('temp_files', 'temp[files]', 'temp: files created',
('PostgreSQL instance: temp files', '0000CC', 1),
Plugin.UNITS.none, Plugin.DELTA.simple_change),
# stacked
('tup_deleted', 'tuples[deleted]', 'tuples: deleted',
('PostgreSQL instance: tuples', '000000', 0),
Plugin.UNITS.none, Plugin.DELTA.speed_per_second),
('tup_fetched', 'tuples[fetched]', 'tuples: fetched',
('PostgreSQL instance: tuples', '0000CC', 0),
Plugin.UNITS.none, Plugin.DELTA.speed_per_second),
('tup_inserted', 'tuples[inserted]', 'tuples: inserted',
('PostgreSQL instance: tuples', '00CC00', 0),
Plugin.UNITS.none, Plugin.DELTA.speed_per_second),
('tup_returned', 'tuples[returned]', 'tuples: returned',
('PostgreSQL instance: tuples', 'CC00CC', 1),
Plugin.UNITS.none, Plugin.DELTA.speed_per_second),
('tup_updated', 'tuples[updated]', 'tuples: updated',
('PostgreSQL instance: tuples', 'CC0000', 0),
Plugin.UNITS.none, Plugin.DELTA.speed_per_second),
]
Items_pg_12 = [
# key, zbx_key, description,
# ('graph name', color, side), units, delta
('checksum_failures', 'events[checksum_failures]', 'event: checksum_failures',
('PostgreSQL instance: events', '00FF00', 0),
Plugin.UNITS.none, Plugin.DELTA.simple_change)
]
def run(self, zbx):
all_items = self.Items
if Pooler.server_version_greater('12.0'):
all_items = self.Items + self.Items_pg_12
params = ['sum(COALESCE({0}, 0)) as {0}'.format(x[0]) for x in all_items]
result = Pooler.query('select {0} from \
pg_catalog.pg_stat_database'.format(
', '.join(params)))
for idx, val in enumerate(result[0]):
key, val = 'pgsql.{0}'.format(
all_items[idx][1]), int(val)
zbx.send(key, val, all_items[idx][5], only_positive_speed=True)
del params, result
def items(self, template):
result = ''
for num, item in enumerate(self.Items + self.Items_pg_12):
if self.Type == "mamonsu":
delta = Plugin.DELTA.as_is
else:
delta = item[5]
# split each item to get values for keys of both agent type and mamonsu type
keys = item[1].split('[')
result += template.item({
'key': self.right_type(self.key + keys[0] + '{0}', keys[1][:-1]),
'name': 'PostgreSQL {0}'.format(item[2]),
'value_type': self.VALUE_TYPE.numeric_float,
'units': item[4],
'delay': self.plugin_config('interval'),
'delta': delta
})
return result
def graphs(self, template):
graphs_name = [
'PostgreSQL instance: rate',
'PostgreSQL instance: events',
'PostgreSQL instance: temp files',
'PostgreSQL instance: tuples']
result = ''
for name in graphs_name:
items = []
for num, item in enumerate(self.Items + self.Items_pg_12):
if item[3][0] == name:
# split each item to get values for keys of both agent type and mamonsu type
keys = item[1].split('[')
items.append({
'key': self.right_type(self.key + keys[0] + '{0}', keys[1][:-1]),
'color': item[3][1],
'yaxisside': item[3][2]
})
graph = {'name': name, 'items': items}
result += template.graph(graph)
return result
def keys_and_queries(self, template_zabbix):
result = []
all_items = []
if LooseVersion(self.VersionPG) > LooseVersion('11'):
all_items = self.Items + self.Items_pg_12
else:
all_items = self.Items
for item in all_items:
# split each item to get values for keys of both agent type and mamonsu type
keys = item[1].split('[')
result.append('{0}[*],$2 $1 -c "{1}"'.format('{0}{1}.{2}'.format(self.key, keys[0], keys[1][:-1]),
self.query_agent.format(format(item[0]))))
return template_zabbix.key_and_query(result)
| {
"pile_set_name": "Github"
} |
%YAML 1.1
%TAG !u! tag:unity3d.com,2011:
--- !u!29 &1
OcclusionCullingSettings:
m_ObjectHideFlags: 0
serializedVersion: 2
m_OcclusionBakeSettings:
smallestOccluder: 5
smallestHole: 0.25
backfaceThreshold: 100
m_SceneGUID: 00000000000000000000000000000000
m_OcclusionCullingData: {fileID: 0}
--- !u!104 &2
RenderSettings:
m_ObjectHideFlags: 0
serializedVersion: 9
m_Fog: 0
m_FogColor: {r: 0.5, g: 0.5, b: 0.5, a: 1}
m_FogMode: 3
m_FogDensity: 0.01
m_LinearFogStart: 0
m_LinearFogEnd: 300
m_AmbientSkyColor: {r: 0.212, g: 0.227, b: 0.259, a: 1}
m_AmbientEquatorColor: {r: 0.114, g: 0.125, b: 0.133, a: 1}
m_AmbientGroundColor: {r: 0.047, g: 0.043, b: 0.035, a: 1}
m_AmbientIntensity: 1
m_AmbientMode: 0
m_SubtractiveShadowColor: {r: 0.42, g: 0.478, b: 0.627, a: 1}
m_SkyboxMaterial: {fileID: 10304, guid: 0000000000000000f000000000000000, type: 0}
m_HaloStrength: 0.5
m_FlareStrength: 1
m_FlareFadeSpeed: 3
m_HaloTexture: {fileID: 0}
m_SpotCookie: {fileID: 10001, guid: 0000000000000000e000000000000000, type: 0}
m_DefaultReflectionMode: 0
m_DefaultReflectionResolution: 128
m_ReflectionBounces: 1
m_ReflectionIntensity: 1
m_CustomReflection: {fileID: 0}
m_Sun: {fileID: 0}
m_IndirectSpecularColor: {r: 0.44657898, g: 0.4964133, b: 0.5748178, a: 1}
m_UseRadianceAmbientProbe: 0
--- !u!157 &3
LightmapSettings:
m_ObjectHideFlags: 0
serializedVersion: 11
m_GIWorkflowMode: 0
m_GISettings:
serializedVersion: 2
m_BounceScale: 1
m_IndirectOutputScale: 1
m_AlbedoBoost: 1
m_TemporalCoherenceThreshold: 1
m_EnvironmentLightingMode: 0
m_EnableBakedLightmaps: 1
m_EnableRealtimeLightmaps: 1
m_LightmapEditorSettings:
serializedVersion: 10
m_Resolution: 2
m_BakeResolution: 40
m_AtlasSize: 1024
m_AO: 0
m_AOMaxDistance: 1
m_CompAOExponent: 1
m_CompAOExponentDirect: 0
m_Padding: 2
m_LightmapParameters: {fileID: 0}
m_LightmapsBakeMode: 1
m_TextureCompression: 1
m_FinalGather: 0
m_FinalGatherFiltering: 1
m_FinalGatherRayCount: 256
m_ReflectionCompression: 2
m_MixedBakeMode: 2
m_BakeBackend: 1
m_PVRSampling: 1
m_PVRDirectSampleCount: 32
m_PVRSampleCount: 500
m_PVRBounces: 2
m_PVRFilterTypeDirect: 0
m_PVRFilterTypeIndirect: 0
m_PVRFilterTypeAO: 0
m_PVRFilteringMode: 1
m_PVRCulling: 1
m_PVRFilteringGaussRadiusDirect: 1
m_PVRFilteringGaussRadiusIndirect: 5
m_PVRFilteringGaussRadiusAO: 2
m_PVRFilteringAtrousPositionSigmaDirect: 0.5
m_PVRFilteringAtrousPositionSigmaIndirect: 2
m_PVRFilteringAtrousPositionSigmaAO: 1
m_ShowResolutionOverlay: 1
m_LightingDataAsset: {fileID: 0}
m_UseShadowmask: 1
--- !u!196 &4
NavMeshSettings:
serializedVersion: 2
m_ObjectHideFlags: 0
m_BuildSettings:
serializedVersion: 2
agentTypeID: 0
agentRadius: 0.5
agentHeight: 2
agentSlope: 45
agentClimb: 0.4
ledgeDropHeight: 0
maxJumpAcrossDistance: 0
minRegionArea: 2
manualCellSize: 0
cellSize: 0.16666667
manualTileSize: 0
tileSize: 256
accuratePlacement: 0
debug:
m_Flags: 0
m_NavMeshData: {fileID: 0}
--- !u!1 &491810794
GameObject:
m_ObjectHideFlags: 0
m_PrefabParentObject: {fileID: 0}
m_PrefabInternal: {fileID: 0}
serializedVersion: 5
m_Component:
- component: {fileID: 491810798}
- component: {fileID: 491810797}
- component: {fileID: 491810796}
- component: {fileID: 491810795}
- component: {fileID: 491810799}
m_Layer: 0
m_Name: Quad (1)
m_TagString: Untagged
m_Icon: {fileID: 0}
m_NavMeshLayer: 0
m_StaticEditorFlags: 0
m_IsActive: 1
--- !u!64 &491810795
MeshCollider:
m_ObjectHideFlags: 0
m_PrefabParentObject: {fileID: 0}
m_PrefabInternal: {fileID: 0}
m_GameObject: {fileID: 491810794}
m_Material: {fileID: 0}
m_IsTrigger: 0
m_Enabled: 1
serializedVersion: 3
m_Convex: 0
m_CookingOptions: 14
m_SkinWidth: 0.01
m_Mesh: {fileID: 10210, guid: 0000000000000000e000000000000000, type: 0}
--- !u!23 &491810796
MeshRenderer:
m_ObjectHideFlags: 0
m_PrefabParentObject: {fileID: 0}
m_PrefabInternal: {fileID: 0}
m_GameObject: {fileID: 491810794}
m_Enabled: 1
m_CastShadows: 1
m_ReceiveShadows: 1
m_DynamicOccludee: 1
m_MotionVectors: 1
m_LightProbeUsage: 1
m_ReflectionProbeUsage: 1
m_RenderingLayerMask: 4294967295
m_Materials:
- {fileID: 2100000, guid: b2fc3eb22d4e0114dadd26220e6c126c, type: 2}
m_StaticBatchInfo:
firstSubMesh: 0
subMeshCount: 0
m_StaticBatchRoot: {fileID: 0}
m_ProbeAnchor: {fileID: 0}
m_LightProbeVolumeOverride: {fileID: 0}
m_ScaleInLightmap: 1
m_PreserveUVs: 0
m_IgnoreNormalsForChartDetection: 0
m_ImportantGI: 0
m_StitchLightmapSeams: 0
m_SelectedEditorRenderState: 3
m_MinimumChartSize: 4
m_AutoUVMaxDistance: 0.5
m_AutoUVMaxAngle: 89
m_LightmapParameters: {fileID: 0}
m_SortingLayerID: 0
m_SortingLayer: 0
m_SortingOrder: 0
--- !u!33 &491810797
MeshFilter:
m_ObjectHideFlags: 0
m_PrefabParentObject: {fileID: 0}
m_PrefabInternal: {fileID: 0}
m_GameObject: {fileID: 491810794}
m_Mesh: {fileID: 10210, guid: 0000000000000000e000000000000000, type: 0}
--- !u!4 &491810798
Transform:
m_ObjectHideFlags: 0
m_PrefabParentObject: {fileID: 0}
m_PrefabInternal: {fileID: 0}
m_GameObject: {fileID: 491810794}
m_LocalRotation: {x: 0, y: 0, z: 0, w: 1}
m_LocalPosition: {x: 0, y: 0, z: -0.1}
m_LocalScale: {x: 1.9, y: 1, z: 1}
m_Children: []
m_Father: {fileID: 0}
m_RootOrder: 3
m_LocalEulerAnglesHint: {x: 0, y: 0, z: 0}
--- !u!114 &491810799
MonoBehaviour:
m_ObjectHideFlags: 0
m_PrefabParentObject: {fileID: 0}
m_PrefabInternal: {fileID: 0}
m_GameObject: {fileID: 491810794}
m_Enabled: 1
m_EditorHideFlags: 0
m_Script: {fileID: 11500000, guid: 4c80583529e67064197f6ec75e7da443, type: 3}
m_Name:
m_EditorClassIdentifier:
positions:
- {x: 0.2, y: 0, z: 0, w: 0}
- {x: -0.3, y: -0.2, z: 0, w: 0}
- {x: 0.5, y: 0.3, z: 0, w: 0}
radiuses:
- 0.2
- 0.3
- 0.4
intensities:
- 2
- 1
- 1
material: {fileID: 2100000, guid: b2fc3eb22d4e0114dadd26220e6c126c, type: 2}
--- !u!1 &679314577
GameObject:
m_ObjectHideFlags: 0
m_PrefabParentObject: {fileID: 0}
m_PrefabInternal: {fileID: 0}
serializedVersion: 5
m_Component:
- component: {fileID: 679314581}
- component: {fileID: 679314580}
- component: {fileID: 679314579}
- component: {fileID: 679314578}
m_Layer: 0
m_Name: Quad
m_TagString: Untagged
m_Icon: {fileID: 0}
m_NavMeshLayer: 0
m_StaticEditorFlags: 0
m_IsActive: 1
--- !u!64 &679314578
MeshCollider:
m_ObjectHideFlags: 0
m_PrefabParentObject: {fileID: 0}
m_PrefabInternal: {fileID: 0}
m_GameObject: {fileID: 679314577}
m_Material: {fileID: 0}
m_IsTrigger: 0
m_Enabled: 1
serializedVersion: 3
m_Convex: 0
m_CookingOptions: 14
m_SkinWidth: 0.01
m_Mesh: {fileID: 10210, guid: 0000000000000000e000000000000000, type: 0}
--- !u!23 &679314579
MeshRenderer:
m_ObjectHideFlags: 0
m_PrefabParentObject: {fileID: 0}
m_PrefabInternal: {fileID: 0}
m_GameObject: {fileID: 679314577}
m_Enabled: 1
m_CastShadows: 1
m_ReceiveShadows: 1
m_DynamicOccludee: 1
m_MotionVectors: 1
m_LightProbeUsage: 1
m_ReflectionProbeUsage: 1
m_RenderingLayerMask: 4294967295
m_Materials:
- {fileID: 2100000, guid: 7c758ec9d6314cd4dbac0a82a9d587d2, type: 2}
m_StaticBatchInfo:
firstSubMesh: 0
subMeshCount: 0
m_StaticBatchRoot: {fileID: 0}
m_ProbeAnchor: {fileID: 0}
m_LightProbeVolumeOverride: {fileID: 0}
m_ScaleInLightmap: 1
m_PreserveUVs: 0
m_IgnoreNormalsForChartDetection: 0
m_ImportantGI: 0
m_StitchLightmapSeams: 0
m_SelectedEditorRenderState: 3
m_MinimumChartSize: 4
m_AutoUVMaxDistance: 0.5
m_AutoUVMaxAngle: 89
m_LightmapParameters: {fileID: 0}
m_SortingLayerID: 0
m_SortingLayer: 0
m_SortingOrder: 0
--- !u!33 &679314580
MeshFilter:
m_ObjectHideFlags: 0
m_PrefabParentObject: {fileID: 0}
m_PrefabInternal: {fileID: 0}
m_GameObject: {fileID: 679314577}
m_Mesh: {fileID: 10210, guid: 0000000000000000e000000000000000, type: 0}
--- !u!4 &679314581
Transform:
m_ObjectHideFlags: 0
m_PrefabParentObject: {fileID: 0}
m_PrefabInternal: {fileID: 0}
m_GameObject: {fileID: 679314577}
m_LocalRotation: {x: 0, y: 0, z: 0, w: 1}
m_LocalPosition: {x: 0, y: 0, z: 0}
m_LocalScale: {x: 1.9, y: 1, z: 1}
m_Children: []
m_Father: {fileID: 0}
m_RootOrder: 2
m_LocalEulerAnglesHint: {x: 0, y: 0, z: 0}
--- !u!1 &803844346
GameObject:
m_ObjectHideFlags: 0
m_PrefabParentObject: {fileID: 0}
m_PrefabInternal: {fileID: 0}
serializedVersion: 5
m_Component:
- component: {fileID: 803844350}
- component: {fileID: 803844349}
- component: {fileID: 803844348}
- component: {fileID: 803844347}
m_Layer: 0
m_Name: Main Camera
m_TagString: MainCamera
m_Icon: {fileID: 0}
m_NavMeshLayer: 0
m_StaticEditorFlags: 0
m_IsActive: 1
--- !u!81 &803844347
AudioListener:
m_ObjectHideFlags: 0
m_PrefabParentObject: {fileID: 0}
m_PrefabInternal: {fileID: 0}
m_GameObject: {fileID: 803844346}
m_Enabled: 1
--- !u!124 &803844348
Behaviour:
m_ObjectHideFlags: 0
m_PrefabParentObject: {fileID: 0}
m_PrefabInternal: {fileID: 0}
m_GameObject: {fileID: 803844346}
m_Enabled: 1
--- !u!20 &803844349
Camera:
m_ObjectHideFlags: 0
m_PrefabParentObject: {fileID: 0}
m_PrefabInternal: {fileID: 0}
m_GameObject: {fileID: 803844346}
m_Enabled: 1
serializedVersion: 2
m_ClearFlags: 1
m_BackGroundColor: {r: 0.19215687, g: 0.3019608, b: 0.4745098, a: 0}
m_NormalizedViewPortRect:
serializedVersion: 2
x: 0
y: 0
width: 1
height: 1
near clip plane: 0.3
far clip plane: 1000
field of view: 60
orthographic: 1
orthographic size: 0.5
m_Depth: -1
m_CullingMask:
serializedVersion: 2
m_Bits: 4294967295
m_RenderingPath: -1
m_TargetTexture: {fileID: 0}
m_TargetDisplay: 0
m_TargetEye: 3
m_HDR: 1
m_AllowMSAA: 1
m_AllowDynamicResolution: 0
m_ForceIntoRT: 0
m_OcclusionCulling: 1
m_StereoConvergence: 10
m_StereoSeparation: 0.022
--- !u!4 &803844350
Transform:
m_ObjectHideFlags: 0
m_PrefabParentObject: {fileID: 0}
m_PrefabInternal: {fileID: 0}
m_GameObject: {fileID: 803844346}
m_LocalRotation: {x: 0, y: 0, z: 0, w: 1}
m_LocalPosition: {x: 0, y: 0, z: -10}
m_LocalScale: {x: 1, y: 1, z: 1}
m_Children: []
m_Father: {fileID: 0}
m_RootOrder: 0
m_LocalEulerAnglesHint: {x: 0, y: 0, z: 0}
--- !u!1 &1842677472
GameObject:
m_ObjectHideFlags: 0
m_PrefabParentObject: {fileID: 0}
m_PrefabInternal: {fileID: 0}
serializedVersion: 5
m_Component:
- component: {fileID: 1842677474}
- component: {fileID: 1842677473}
m_Layer: 0
m_Name: Directional Light
m_TagString: Untagged
m_Icon: {fileID: 0}
m_NavMeshLayer: 0
m_StaticEditorFlags: 0
m_IsActive: 1
--- !u!108 &1842677473
Light:
m_ObjectHideFlags: 0
m_PrefabParentObject: {fileID: 0}
m_PrefabInternal: {fileID: 0}
m_GameObject: {fileID: 1842677472}
m_Enabled: 1
serializedVersion: 8
m_Type: 1
m_Color: {r: 1, g: 0.95686275, b: 0.8392157, a: 1}
m_Intensity: 1
m_Range: 10
m_SpotAngle: 30
m_CookieSize: 10
m_Shadows:
m_Type: 2
m_Resolution: -1
m_CustomResolution: -1
m_Strength: 1
m_Bias: 0.05
m_NormalBias: 0.4
m_NearPlane: 0.2
m_Cookie: {fileID: 0}
m_DrawHalo: 0
m_Flare: {fileID: 0}
m_RenderMode: 0
m_CullingMask:
serializedVersion: 2
m_Bits: 4294967295
m_Lightmapping: 4
m_AreaSize: {x: 1, y: 1}
m_BounceIntensity: 1
m_ColorTemperature: 6570
m_UseColorTemperature: 0
m_ShadowRadius: 0
m_ShadowAngle: 0
--- !u!4 &1842677474
Transform:
m_ObjectHideFlags: 0
m_PrefabParentObject: {fileID: 0}
m_PrefabInternal: {fileID: 0}
m_GameObject: {fileID: 1842677472}
m_LocalRotation: {x: 0.40821788, y: -0.23456968, z: 0.10938163, w: 0.8754261}
m_LocalPosition: {x: 0, y: 3, z: 0}
m_LocalScale: {x: 1, y: 1, z: 1}
m_Children: []
m_Father: {fileID: 0}
m_RootOrder: 1
m_LocalEulerAnglesHint: {x: 50, y: -30, z: 0}
| {
"pile_set_name": "Github"
} |
/* Copyright 2010-present MongoDB Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
using System.Linq;
using MongoDB.Bson;
using MongoDB.Bson.Serialization;
using MongoDB.Bson.Serialization.Attributes;
using Xunit;
namespace MongoDB.Bson.Tests.Jira.CSharp239
{
public class CSharp239Tests
{
public class Tree
{
public string Node;
[BsonIgnoreIfNull]
public Tree Left;
[BsonIgnoreIfNull]
public Tree Right;
}
[Fact]
public void TestSerialization()
{
var obj = new Tree
{
Node = "top",
Left = new Tree { Node = "left" },
Right = new Tree { Node = "right" }
};
var json = obj.ToJson();
var expected = "{ 'Node' : 'top', 'Left' : { 'Node' : 'left' }, 'Right' : { 'Node' : 'right' } }".Replace("'", "\"");
Assert.Equal(expected, json);
var bson = obj.ToBson();
var rehydrated = BsonSerializer.Deserialize<Tree>(bson);
Assert.True(bson.SequenceEqual(rehydrated.ToBson()));
}
}
}
| {
"pile_set_name": "Github"
} |
// Copyright 2001-2019 Crytek GmbH / Crytek Group. All rights reserved.
#ifndef __SOCKETIOMANAGERLOBBYIDADDR_H__
#define __SOCKETIOMANAGERLOBBYIDADDR_H__
#pragma once
#if USE_LOBBYIDADDR
#define HAS_SOCKETIOMANAGER_LOBBYIDADDR
#endif // USE_LOBBYIDADDR
#if defined(HAS_SOCKETIOMANAGER_LOBBYIDADDR)
#include "SocketError.h"
#include <CryMemory/PoolAllocator.h>
#include "ISocketIOManager.h"
#include "Network.h"
class CSocketIOManagerLobbyIDAddr : public CSocketIOManager
{
public:
CSocketIOManagerLobbyIDAddr();
~CSocketIOManagerLobbyIDAddr();
bool Init();
const char* GetName() override { return "LobbyIDAddr"; }
bool PollWait(uint32 waitTime) override;
int PollWork(bool& performedWork) override;
static void RecvPacket(void* privateRef, uint8* recvBuffer, uint32 recvSize, CRYSOCKET recvSocket, TNetAddress& recvAddr);
SSocketID RegisterSocket(CRYSOCKET sock, int protocol) override;
void SetRecvFromTarget(SSocketID sockid, IRecvFromTarget* pTarget) override;
void SetSendToTarget(SSocketID sockid, ISendToTarget* pTarget) override;
void SetConnectTarget(SSocketID sockid, IConnectTarget* pTarget) override;
void SetAcceptTarget(SSocketID sockid, IAcceptTarget* pTarget) override;
void SetRecvTarget(SSocketID sockid, IRecvTarget* pTarget) override;
void SetSendTarget(SSocketID sockid, ISendTarget* pTarget) override;
void RegisterBackoffAddressForSocket(TNetAddress addr, SSocketID sockid) override;
void UnregisterBackoffAddressForSocket(TNetAddress addr, SSocketID sockid) override;
void UnregisterSocket(SSocketID sockid) override;
bool RequestRecvFrom(SSocketID sockid) override;
bool RequestSendTo(SSocketID sockid, const TNetAddress& addr, const uint8* pData, size_t len) override;
bool RequestSendVoiceTo(SSocketID sockid, const TNetAddress& addr, const uint8* pData, size_t len) override;
bool RequestConnect(SSocketID sockid, const TNetAddress& addr) override;
bool RequestAccept(SSocketID sockid) override;
bool RequestSend(SSocketID sockid, const uint8* pData, size_t len) override;
bool RequestRecv(SSocketID sockid) override;
void PushUserMessage(int msg) override {}
bool HasPendingData() override { return false; }
#if LOCK_NETWORK_FREQUENCY
virtual void ForceNetworkStart() override {}
virtual bool NetworkSleep() override { return true; }
#endif
private:
struct SRegisteredSocket
{
SRegisteredSocket(uint16 saltValue) :
sock(CRY_INVALID_SOCKET),
pRecvFromTarget(0),
pSendToTarget(0),
pConnectTarget(0),
pAcceptTarget(0),
pRecvTarget(0),
pSendTarget(0),
salt(saltValue),
inUse(false)
{
}
bool inUse;
CRYSOCKET sock;
uint16 salt;
IRecvFromTarget* pRecvFromTarget;
ISendToTarget* pSendToTarget;
IConnectTarget* pConnectTarget;
IAcceptTarget* pAcceptTarget;
IRecvTarget* pRecvTarget;
ISendTarget* pSendTarget;
};
typedef std::vector<SRegisteredSocket> TRegisteredSockets;
TRegisteredSockets m_registeredSockets;
SRegisteredSocket* GetRegisteredSocket(SSocketID sockid);
uint8 m_recvBuffer[MAX_UDP_PACKET_SIZE];
TNetAddress m_recvAddr;
uint32 m_recvSize;
CRYSOCKET m_recvSocket;
};
#endif // defined(HAS_SOCKETIOMANAGER_LOBBYIDADDR)
#endif // __SOCKETIOMANAGERLOBBYIDADDR_H__
| {
"pile_set_name": "Github"
} |
<?xml version="1.0"?>
<ZopeData>
<record id="1" aka="AAAAAAAAAAE=">
<pickle>
<global name="PythonScript" module="Products.PythonScripts.PythonScript"/>
</pickle>
<pickle>
<dictionary>
<item>
<key> <string>Script_magic</string> </key>
<value> <int>3</int> </value>
</item>
<item>
<key> <string>_bind_names</string> </key>
<value>
<object>
<klass>
<global name="NameAssignments" module="Shared.DC.Scripts.Bindings"/>
</klass>
<tuple/>
<state>
<dictionary>
<item>
<key> <string>_asgns</string> </key>
<value>
<dictionary>
<item>
<key> <string>name_container</string> </key>
<value> <string>container</string> </value>
</item>
<item>
<key> <string>name_context</string> </key>
<value> <string>context</string> </value>
</item>
<item>
<key> <string>name_m_self</string> </key>
<value> <string>script</string> </value>
</item>
<item>
<key> <string>name_subpath</string> </key>
<value> <string>traverse_subpath</string> </value>
</item>
</dictionary>
</value>
</item>
</dictionary>
</state>
</object>
</value>
</item>
<item>
<key> <string>_params</string> </key>
<value> <string>mapping_dict</string> </value>
</item>
<item>
<key> <string>id</string> </key>
<value> <string>NotificationMessage_getSubstitutionMappingDictFromArgument</string> </value>
</item>
</dictionary>
</pickle>
</record>
</ZopeData>
| {
"pile_set_name": "Github"
} |
<configuration>
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
<encoder>
<pattern>%d{HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n</pattern>
</encoder>
</appender>
<root level="error">
<appender-ref ref="STDOUT" />
</root>
</configuration> | {
"pile_set_name": "Github"
} |
#!/bin/bash
#
# This configuration file provides information on the
# guest instances used for this test
# All guest instances that are required for this test
#
VIRTHOSTS="alice moon carol winnetou dave"
# Corresponding block diagram
#
DIAGRAM="a-m-c-w-d.png"
# Guest instances on which tcpdump is to be started
#
TCPDUMPHOSTS="moon alice"
# Guest instances on which IPsec is started
# Used for IPsec logging purposes
#
IPSECHOSTS="moon carol dave"
| {
"pile_set_name": "Github"
} |
module github.com/spf13/pflag
go 1.12
| {
"pile_set_name": "Github"
} |
/** Error message constants. */
var FUNC_ERROR_TEXT = 'Expected a function';
/**
* The base implementation of `_.delay` and `_.defer` which accepts `args`
* to provide to `func`.
*
* @private
* @param {Function} func The function to delay.
* @param {number} wait The number of milliseconds to delay invocation.
* @param {Array} args The arguments to provide to `func`.
* @returns {number|Object} Returns the timer id or timeout object.
*/
function baseDelay(func, wait, args) {
if (typeof func != 'function') {
throw new TypeError(FUNC_ERROR_TEXT);
}
return setTimeout(function() { func.apply(undefined, args); }, wait);
}
module.exports = baseDelay;
| {
"pile_set_name": "Github"
} |
#import "AppDelegate.h"
#import "ViewController.h"
@implementation AppDelegate
- (BOOL)application:(UIApplication *)application didFinishLaunchingWithOptions:(NSDictionary *)launchOptions
{
self.window = [[UIWindow alloc] initWithFrame:[[UIScreen mainScreen] bounds]];
self.window.rootViewController = [ViewController new];
[self.window makeKeyAndVisible];
return YES;
}
@end | {
"pile_set_name": "Github"
} |
#ifndef TH_GENERIC_FILE
#define TH_GENERIC_FILE "generic/LookupTable.c"
#else
static void THNN_(LookupTable_resetCount)(
THInteger_t *count_data,
THIndexTensor *input)
{
ptrdiff_t i;
THIndex_t *input_data = THIndexTensor_(data)(input);
ptrdiff_t numel = THIndexTensor_(nElement)(input);
for (i = 0; i<numel; i++)
{
long k = input_data[i] - TH_INDEX_BASE;
count_data[k] = 0;
}
for (i = 0; i<numel; i++)
{
long k = input_data[i] - TH_INDEX_BASE;
count_data[k]++;
}
}
void THNN_(LookupTable_accGradParameters)(
THNNState *state,
THIndexTensor *input,
THTensor *gradOutput,
THTensor *gradWeight,
THIntegerTensor *count,
THTensor *sorted,
THIndexTensor *indices,
bool scaleGradByFreq,
int paddingValue,
accreal ascale)
{
real scale = TH_CONVERT_ACCREAL_TO_REAL(ascale);
ptrdiff_t i;
THInteger_t *count_data = NULL;
if (scaleGradByFreq)
{
THIntegerTensor_(resize1d)(count, gradWeight->size[0]);
count_data = THIntegerTensor_(data)(count);
}
if (!THTensor_(isContiguous)(gradWeight))
THError("gradWeight must be contiguous");
if (!THIndexTensor_(isContiguous)(input))
THError("input must be contiguous");
if (THIndexTensor_(nDimension)(input) != 1 && THIndexTensor_(nDimension)(input) != 2) {
THDescBuff s1 = THIndexTensor_(sizeDesc)(input);
THError("input must be a vector or matrix, but is of shape: %s", s1.str);
}
THIndex_t *input_data = THIndexTensor_(data)(input);
ptrdiff_t numel = THIndexTensor_(nElement)(input);
long numw = THTensor_(size)(gradWeight, 0);
// check that inputs are all within range
for (i=0; i<numel; i++)
if (input_data[i] < TH_INDEX_BASE || input_data[i] >= numw + TH_INDEX_BASE) {
THError("inputs need to be in the range %ld <= input < %ld, "
"but got input of value: %ld", TH_INDEX_BASE, (numw + TH_INDEX_BASE),
input_data[i]);
}
gradOutput = THTensor_(newContiguous)(gradOutput);
real *gw = THTensor_(data)(gradWeight);
real *go = THTensor_(data)(gradOutput);
long stride = THTensor_(stride)(gradWeight, 0);
if (count_data)
THNN_(LookupTable_resetCount)(count_data, input);
#ifdef _OPENMP
if (numel > 1000)
{
// The strategy is to parallelize over sections of the vocabulary, so that
// thread 1 handles updates to gradWeight[0..nVocab/nThreads]. Every thread
// has to traverse the entire input, but the dominating factor is the axpy
// BLAS call.
#pragma omp parallel private(i)
{
int tid = omp_get_thread_num();
int nthreads = omp_get_num_threads();
long start = tid * (numw/nthreads + 1);
long end = start + (numw/nthreads + 1);
for (i=0; i<numel; i++)
{
if (input_data[i] != paddingValue)
{
long k = input_data[i] - TH_INDEX_BASE;
if (k >= start && k < end)
{
real scale_ = scale;
if (count_data) scale_ /= count_data[k];
THBlas_(axpy)(stride, scale_, go + i*stride, 1, gw + k*stride, 1);
}
}
}
}
THTensor_(free)(gradOutput);
return;
}
#endif
for (i=0; i<numel; i++)
{
if (input_data[i] != paddingValue)
{
long k = input_data[i] - TH_INDEX_BASE;
real scale_ = scale;
if (count_data) scale_ /= count_data[k];
THBlas_(axpy)(stride, scale_, go + i*stride, 1, gw + k*stride, 1);
}
}
THTensor_(free)(gradOutput);
}
/*
* Keep the norm of weight smaller than maxNorm
*/
static void THNN_(LookupTable_renormRow)(
real *row_data,
long stride,
real maxNorm,
real normType)
{
real norm = 0;
real new_norm;
long j;
for (j=0; j<stride; j++)
{
if (normType == 1) {
norm += fabs(row_data[j]);
} else if (normType == 2) {
norm += row_data[j] * row_data[j];
} else {
norm += pow(fabs(row_data[j]), normType);
}
}
norm = pow(norm, 1.0 / normType);
if (norm > maxNorm)
{
new_norm = maxNorm / (norm + 1e-7);
for (j=0; j<stride; j++) {
row_data[j] *= new_norm;
}
}
}
static int THNN_(compare_THIndex)(const void* a, const void* b)
{
return *(const THIndex_t*)a < *(const THIndex_t*)b ? -1 : 1;
}
void THNN_(LookupTable_renorm)(
THNNState *state,
THIndexTensor *idx,
THTensor *weight,
accreal maxNorm_,
accreal normType_)
{
real maxNorm = TH_CONVERT_ACCREAL_TO_REAL(maxNorm_);
real normType = TH_CONVERT_ACCREAL_TO_REAL(normType_);
if (!THTensor_(isContiguous)(weight))
THError("weight must be contiguous");
if (!THIndexTensor_(isContiguous)(idx))
THError("input must be contiguous");
if (THIndexTensor_(nDimension)(idx) != 1)
THError("idx must be a vector");
if (normType <= 0)
THError("non-positive-norm not supported");
ptrdiff_t i;
THIndex_t *row_idx = THIndexTensor_(data)(idx);
ptrdiff_t numel = THIndexTensor_(nElement)(idx);
long numw = THTensor_(size)(weight, 0);
long stride = THTensor_(stride)(weight, 0);
real *gw = THTensor_(data)(weight);
for (i=0; i<numel; i++) {
if (row_idx[i] < TH_INDEX_BASE || row_idx[i] >= numw + TH_INDEX_BASE) {
THError("input need to be in the range %ld <= input < %ld, "
"but got input of value: %ld", TH_INDEX_BASE, (numw + TH_INDEX_BASE),
row_idx[i]);
}
}
// get unique indices
qsort(row_idx, numel, sizeof(THIndex_t), THNN_(compare_THIndex));
ptrdiff_t ptr = 0;
for (i=0; i<numel; i++)
if (i == 0 || row_idx[i] != row_idx[i-1])
row_idx[ptr++] = row_idx[i];
numel = ptr;
#ifdef _OPENMP
if (numel > 1000)
{
// The strategy is to parallelize over the rows that appear in
// row_idx, so that thread 1 handles the rows in row_idx[0..numel/nThreads].
// This distributes the work evenly to each thread.
#pragma omp parallel for private(i)
for (i=0; i<numel; i++)
{
long k = row_idx[i] - TH_INDEX_BASE;
THNN_(LookupTable_renormRow)(gw + k*stride, stride, maxNorm, normType);
}
return;
}
#endif
for (i=0; i<numel; i++)
{
long k = row_idx[i] - TH_INDEX_BASE;
THNN_(LookupTable_renormRow)(gw + k*stride, stride, maxNorm, normType);
}
}
#endif
| {
"pile_set_name": "Github"
} |
# Node/Express
This example shows how to embed Kepler.gl in a node/express/webpack application.
#### 1. Install
```sh
npm install
```
or
```sh
yarn
```
#### 2. Mapbox Token
add mapbox access token to node env
```sh
export MapboxAccessToken=<your_mapbox_token>
```
#### 3. Start the app
```sh
npm start
```
| {
"pile_set_name": "Github"
} |
<!DOCTYPE html>
<html lang="en">
<head>
<meta http-equiv="refresh" content="0;URL=../../../../libc/fn.aio_return.html">
</head>
<body>
<p>Redirecting to <a href="../../../../libc/fn.aio_return.html">../../../../libc/fn.aio_return.html</a>...</p>
<script>location.replace("../../../../libc/fn.aio_return.html" + location.search + location.hash);</script>
</body>
</html> | {
"pile_set_name": "Github"
} |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to you under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.qihoo.qsql.org.apache.calcite.adapter.enumerable;
import com.qihoo.qsql.org.apache.calcite.plan.Convention;
import com.qihoo.qsql.org.apache.calcite.plan.RelTraitSet;
import com.qihoo.qsql.org.apache.calcite.rel.RelNode;
import com.qihoo.qsql.org.apache.calcite.rel.convert.ConverterRule;
import com.qihoo.qsql.org.apache.calcite.rel.logical.LogicalMinus;
/**
* Rule to convert an {@link com.qihoo.qsql.org.apache.calcite.rel.logical.LogicalMinus} to an
* {@link EnumerableMinus}.
*/
class EnumerableMinusRule extends ConverterRule {
EnumerableMinusRule() {
super(LogicalMinus.class, Convention.NONE, EnumerableConvention.INSTANCE,
"EnumerableMinusRule");
}
public RelNode convert(RelNode rel) {
final LogicalMinus minus = (LogicalMinus) rel;
if (minus.all) {
return null; // EXCEPT ALL not implemented
}
final EnumerableConvention out = EnumerableConvention.INSTANCE;
final RelTraitSet traitSet =
rel.getTraitSet().replace(
EnumerableConvention.INSTANCE);
return new EnumerableMinus(rel.getCluster(), traitSet,
convertList(minus.getInputs(), out), false);
}
}
// End EnumerableMinusRule.java
| {
"pile_set_name": "Github"
} |
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_GRAPPLER_COSTS_GRAPH_PROPERTIES_H_
#define TENSORFLOW_GRAPPLER_COSTS_GRAPH_PROPERTIES_H_
#include <unordered_map>
#include <vector>
#include "tensorflow/core/framework/shape_inference.h"
#include "tensorflow/core/grappler/clusters/cluster.h"
#include "tensorflow/core/grappler/costs/op_performance_data.pb.h"
#include "tensorflow/core/grappler/grappler_item.h"
namespace tensorflow {
namespace grappler {
// A TensorFlow model to optimize.
// Models are represented by the combination of a graph, one of more fetch
// nodes, and potentially a set of nodes to feed.
class GraphProperties {
public:
// Factory method for creating a GrapplerShapes from a MetaGraphDef.
// Returns nullptr if the given meta_graph cannot be converted.
explicit GraphProperties(const GrapplerItem& item) : item_(item) {}
Status InferStatically();
Status InferDynamically(Cluster* cluster);
Status InferFromCostGraph(const CostGraphDef& cost_graph);
// Stores `item_.graph` with the inferred output shapes to `output_graph_def`.
Status AnnotateOutputShapes(GraphDef* output_graph_def);
bool HasInputProperties(const string& name) const;
bool HasOutputProperties(const string& name) const;
const std::vector<OpInfo::TensorProperties>& GetInputProperties(
const string& node_name) const;
const std::vector<OpInfo::TensorProperties>& GetOutputProperties(
const string& node_name) const;
static void FillTensorPropertiesFromContext(
const shape_inference::ShapeHandle&, const DataType&,
shape_inference::InferenceContext*, OpInfo::TensorProperties*);
private:
// Inputs
GrapplerItem item_;
std::map<string, std::vector<OpInfo::TensorProperties>> input_properties_;
std::map<string, std::vector<OpInfo::TensorProperties>> output_properties_;
const std::vector<OpInfo::TensorProperties> missing_properties_;
// Merges shapes <shapes_and_types>, determined from an EnqueueV2 node, into
// <*queue_shapes_and_types>.
Status MergeEnqueueShapesAndTypes(
const std::vector<shape_inference::ShapeAndType>& shapes_and_types,
shape_inference::InferenceContext* qctx,
std::vector<shape_inference::ShapeAndType>* queue_shapes_and_types);
// Relaxes shapes <shapes_and_types>, determined from an EnqueueV2 node, into
// <*queue_shapes_and_types>.
Status RelaxEnqueueShapesAndMergeTypes(
const std::vector<shape_inference::ShapeAndType>& shapes_and_types,
shape_inference::InferenceContext* qctx,
std::vector<shape_inference::ShapeAndType>* queue_shapes_and_types);
// This gives access to private function of InferenceContext.
static void Relax(shape_inference::InferenceContext* c,
shape_inference::ShapeHandle s0,
shape_inference::ShapeHandle s1,
shape_inference::ShapeHandle* out);
// These give access to private functions of ShapeRefiner.
static bool SameDefinedShape(shape_inference::InferenceContext* c,
shape_inference::ShapeHandle s0,
shape_inference::ShapeHandle s1);
static bool IsUpdatedShapesOrTypes(
shape_inference::InferenceContext* c,
const std::vector<shape_inference::ShapeAndType>& existing,
const std::vector<shape_inference::ShapeAndType>& updated);
};
} // end namespace grappler
} // end namespace tensorflow
#endif // TENSORFLOW_GRAPPLER_COSTS_GRAPH_PROPERTIES_H_
| {
"pile_set_name": "Github"
} |
// This file is part of Silk.NET.
//
// You may modify and distribute Silk.NET under the terms
// of the MIT license. See the LICENSE file for details.
namespace Silk.NET.OpenAL
{
/// <summary>
/// Defines available parameters for <see cref="IContextState.GetContextProperty(Device*,GetContextString)" />.
/// </summary>
public enum GetContextString
{
/// <summary>
/// A list of available context extensions separated by spaces.
/// </summary>
Extensions = 0x1006,
/// <summary>
/// Gets the name of the provided device.
/// </summary>
DeviceSpecifier = 0x1005
}
} | {
"pile_set_name": "Github"
} |
About Klipper
-------------
Klipper is a 3D printer firmware system which utilizes an external processor such as a Raspberry Pi or other computer to compute kinematic equations and send the processed information to the MCU on the printer, usually a ATMega 2560. Klipper's main fork is available at <https://github.com/KevinOConnor/klipper>, however, if you want the experimental features outlined in this wiki it is recommended to clone our community repository at <https://github.com/PrusaOwners/klipper>.
Installing and Configuring Klipper on the MK3
---------------------------------------------
If you are installing Klipper on a Raspberry Pi, follow the instructions [Adafruit's put together here](https://learn.adafruit.com/octoprint-on-m3d/octoprint-set-up) to install Octoprint, or use a preconfigured octopi image.
First, SSH into the Pi (Windows: use PuTTY; Linux/Mac, use the terminal). If you don’t know what it is, try octopi.local or install something like Advanced IP Scanner to find it out. Username is pi, password raspberry
Run the following commands:
git clone https://github.com/PrusaOwners/klipper
./klipper/scripts/install-octopi.sh
cd ~/klipper/
make menuconfig
Leave everything as stock, and click save, then exit.
Run the following command:
make
Ensure that the Raspberry Pi is physically connected to the printer, and that Octoprint is not connected to the printer by navigating to <http://octopi.local> or your pi’s IP address on a web browser (the "Connect" button should be displayed).
Next is finding the USB serial port. The general way to find a USB serial port is to run ls -l /dev/serial/by-id/ from an ssh terminal on the host machine. It will likely produce output similar to the following:
lrwxrwxrwx 1 root root 13 Jun 1 21:12 usb-1a86_USB2.0-Serial-if00-port0 -> ../../ttyUSB0
The name found in the above command (minus the -> ../../ttyACM0 portion) is stable and it should be used in the config file and while flashing the micro-controller code. For example, a flash command might look similar to:
sudo service klipper stop
make flash FLASH_DEVICE=/dev/serial/by-id/usb-1a86_USB2.0-Serial-if00-port0
sudo service klipper start
Next, copy the file ~/klipper/config/printer-prusa-i3-mk3-2017.cfg to your home directory and rename it printer.cfg:
cd ~
cp ~/klipper/config/printer-prusa-i3-mk3-2017.cfg printer.cfg
This file contains a default configuration for the Prusa i3 MK3 and enables all of the experimental features outlined in this wiki. The config must be updated with the updated USB information. Open up printer.cfg in a text editor and change the line listed below:
[mcu]
serial: /dev/ttyACM0
to match the the USB device from the ls -l command. Be sure to copy-and-paste the name from the "ls" command (minus the -> ../../ttyACM0 portion) that you ran above as the name will be different for each printer. The line in the updated config might look like:
[mcu]
serial: /dev/serial/by-id/usb-1a86_USB2.0-Serial-if00-port0
Next, restart Klipper with the following command:
sudo service klipper restart
Finally, connect Klipper to Octoprint:
Go to the setting wrench at the top.
Under **“Serial Connection”** in **“Additional serial ports”** add **“/tmp/printer”**. Then click **“Save”**.
Enter the Settings tab again and under **“Serial Connection”** change the **“Serial Port”** setting to **“/tmp/printer”**. Navigate to the **“Behavior”** sub-tab and select the **“Cancel any ongoing prints but stay connected to the printer”** option. Click **“Save”**.
From the main page, under the **“Connection”** section (at the top left of the page) make sure the **“Serial Port”** is set to **“/tmp/printer”** and baudrate is **250000** and click **“Connect”**. (If **“/tmp/printer”** is not an available selection then try reloading the page.)
Once connected, navigate to the **“Terminal”** tab and type **“status”** (without the quotes) into the command entry box and click **“Send”**. If you see this, you’re good: **“Recv: // Printer is ready”**
Special considerations for installing on a non-raspberry-pi
-----------------------------------------------------------
If you follow the above instructions and it doesn't work, try running this:
cat /etc/default/klipper
You should get an output like this:
# Configuration for /etc/init.d/klipper
KLIPPY_USER=server
KLIPPY_EXEC=/home/server/klippy-env/bin/python
KLIPPY_ARGS="/home/server/OctoPrint/klipper/klippy/klippy.py /home/server/printer.cfg -l /tmp/klippy.log"
If KLIPPY_ARGS's path is different than your klipper path (e.g. this example shows Klipper under the Octoprint folder, and I had it in /home/server), correct with a mv command like so: mv /home/YourUserName/klipper /home/YourUserName/OctoPrint/klipper
Calibrating your Z-Endstop
--------------------------
The Prusa i3 MK3 uses the Pinda probe as a Z-Endstop, and the endstop height needs to be calibrated prior to printing. Proceed with the following steps for initial calibration:
1. Set the following properties under [stepper_z] in your printer.cfg:
position_min: -2
and under [probe]:
z_offset: 0
2. Restart klipper from the shell to load the new config:
sudo service klipper restart
Alternatively, you can issue a restart command from Octoprint's terminal if you are connected.
3. Home the Z-axis using Octoprint.
4. Use Octoprint or the Printer Menu to move the Z-axis down in .1mm to .01mm increments until the nozzle is just above the bed
5. The current Z position will likely be somewhere between -0.4 and -1.0 It will be displayed on the printer, you can also retrieve the current position by issuing the GET_POSITION command in Octoprint's terminal.
6. The absolute value of the current Z position will be your endstop position and probe offset. For example, if your Z position is -.6, enter the following under [probe]:
z_offset: .6
7. Restart Klipper again and you are ready to print. You may have to make slight changes to get a good first layer. Increasing the endstop position will bring the nozzle closer to the bed when printing, decreasing the endstop will raise it. Make sure you update both the endstop position and probe offset when making changes.
Prusa GCode Support
--------------------
Currently the following prusa specific gcodes are supported:
- TIMED_GCODE
- TRAM_Z
To enable these gcodes, add the following to your printer.cfg:
[prusa_gcodes]
TIMED_GCODE allows the user to execute a gcode with a specific delay. This is, for example, useful for resetting the display after a M117 message. It is used in the following manner:
TIMED_GCODE GCODE=M117 DELAY=5
The example above will send an empty M117 with a delay of 5 seconds. Note that if you want to execute a more complex gcode, spaces should be replaced underscores. For example, if you want to send M117 Hello on a with a delay of 10 seconds, you could enter the following:
TIMED_GCODE GCODE=M117_Hello DELAY=5
TRAM_Z functions in a manner similar to Prusa's Calibrate-Z functionality. It will use the TMC2130 driver to home to the top of the printer, then move an extra 10mm to “tram” the Z-Axis.
Note for previous users:
LOAD_FILAMENT, UNLOAD_FILAMENT, SET_BEEPER, and M900 have been deprecated. It is recommend to use gcode macros to replace the functionality of these gcodes.
Configuring Mesh Bed Leveling
-----------------------------
### About Klipper Mesh Bed Leveling
Mesh Bed Leveling is now available in upstream Klipper: <https://github.com/KevinOConnor/klipper>
You may also pull it from the PrusaOwners repo: <https://github.com/PrusaOwners/klipper/>
### Things to Note Before Implementing
There are a few things to keep in mind with this implementation that may be subject to change:
1. By default each probe point is only sampled once, unlike Prusa Firmware which samples twice. After testing this doesn't seem to create an issue, as the Pinda produces fairly repeatable results. Functionality has been added to multi-sample points, with the tradeoff being that there is a small pause between samples unlike stock firmware.
2. <b>Those transitioning from ALPHA versions of bed_mesh should be aware of the following:</b>
1. The probe_offset option of [bed_mesh] has now moved to the [probe] section as x_offset and y_offset.
2. In the [bed_mesh] section probe_min and probe_max have been renamed to min_point and max_point.
3. G80 and G81 aliases are no longer hardcoded. Instructions for how to add aliases via gcode_macros are provided later in this document.
4. After probing the tool no longer returns to (0,0), as this is not suitable for every printer. This can be added in a gcode_macro or in your slicer's start gcode.
### Pre-requisites
Before proceeding, make sure [bed_tilt] is not in your printer.cfg. It will present a conflict with Mesh Leveling. You also need [probe] in your printer.cfg:
[probe]
pin: PB4
x_offset: 24.
y_offset: 5.
z_offset: .0
speed: 10.0
The z_offset is the distance between the nozzle and the print surface when the probe triggers. When a probe is used as an endstop for the z axis, the value for z_offset will be used as the endstop position. Thus, if you have position_endstop in [stepper_z] defined, remove it. The x_offset and y_offset options refer to the distance between the nozzle and the probe on their respective axis.
It is also a good idea to make sure position_min under [stepper_z] is a negative number. Its likely that you will need to move below Z0 if your bed is significantly warped.
Finally, I recommend adding a homing override to to home to the center of the bed:
[homing_override]
gcode:
G1 Z3
G28 X0 Y0
G1 X101 Y100 F5000
G28 Z0
set_position_z: 0
This will lift Z 3mm prior to homing, making sure that it doesn't crash into the bed if its location is close to the build plate. It will then home X and Y, move X and Y so that the probe is at the center of the build plate, then home Z.
### Configuration
Mesh Leveling is fully parametric. As a result it is possible to configure it for any printer, and a varying number of probe points. Probe points are generated dynamically. To get started with Mesh Leveling, the bare minimum is required in printer.cfg:
[bed_mesh]
speed: 100
min_point: 11,1
max_point: 215,193
Below is what a Mesh Bed Leveling configuration would look like with all options set:
[bed_mesh]
speed: 100
horizontal_move_z: 5
samples: 1
sample_retract_dist: 2.0
min_point: 11,1
max_point: 215,193
probe_count: 3,3
fade_start: 1.0
fade_end: 10.0
move_check_distance: 5.0
split_delta_z: .025
mesh_pps: 2,2
algorithm: lagrange
bicubic_tension: .2
Be aware that when dynamically generating points the distance between each point is calculated. If the distance is not a whole number, the value will be floored to the last hundredth. This will result in your maximum points being adjusted inward slightly. Also keep in mind that the min and max points refer to the position of the nozzle, not the probe. Make sure you do not choose points that will move the probe off of the bed.
Below is a detailed explanation of each option:
- speed:
The speed at which the bed is probed.
- horizontal_move_z:
The distance to raise the toolhead between probes. Default is 5mm.
- samples:
The number of samples to take for each probe point.
- sample_retract_dist:
The distance to retract the tool between samples. Only applies when samples > 1.
- min_point:
The minimum start point (x,y) on the grid to generate. This parameter must be provided.
- max_point:
The maximum start point (x,y) on the grid to generate. Note that this may not be the 'last' point if probing an even number of Y values on the grid. This parameter must be provided.
- probe_count:
A comma separated pair of integers (x,y) indicating the number of points to probe on each axis. It is possible to enter a single integer value for this parameter, in which case it will be applied to both axes. Default is 3,3.
- fade_start:
The z position in which to start fading out z_adjustment. Default is 1.0
- fade_end:
The z position in which fading will complete. If fade_end is less than or equal to fade_start then fading will be disabled. Be careful with this setting, fading too quickly will likely result in a failed print, or could potentially crash the nozzle into the bed if done near Z0. Default is 10.0
- move_check_distance:
The minimum distance a move can be split. Default is 5.0
- split_delta_z:
The minimum amount of z-difference that will trigger a split move. The check algorithm will traverse a move by the amount specified in move_check_distance until it finds a z-delta greater than or equal to split_delta_z. Default is 0.025
- mesh_pps:
A comma separated pair of integers (x,y) indicating the number of points to interpolate for each segment in the axis. Consider a 3x3 probed grid. There are two “segments” between the X points, and two between the Y points. If you specify mesh_pps: 3,3 then there will be 6 interpolated points and 3 probe points in the mesh along each axis, for a total of 81 points in the mesh. If this is set to zero, then no interpolation will be done on the axis. Note that this value can also be a single integer, in which case the number is applied to both axes. Default is 2,2.
- algorithm:
The interpolation algorithm to use. May be either “lagrange” or “bicubic”. 3x3, 4x3, and 3x4 meshes must use lagrange, 4x4 or above may choose either. Default is lagrange.
- bicubic_tension:
The bicubic interpolation algorithm uses cardinal hermite spline interpolation. A “tension” parameter may be entered to influence the amount of slope interpolated in the curve. That is, a higher tension will will make the mesh more curvy, and zero tension will be closer to bilinear interpolation. Be careful with this setting as generating steep hills and valleys in the mesh may cause the the nozzle to crash. This setting will be ignored if not using bicubic interpolation. Default is 0.2.
### Bed Mesh Gcodes
BED_MESH_CALIBRATE
Initiates the calibration procedure (ie probes the bed and builds the mesh).
BED_MESH_OUTPUT
Prints the results of the Mesh to the terminal.
BED_MESH_MAP
This will probe the bed, serialize the points, and send them to the terminal. No mesh will be generated during this procedure, so no correction will be applied. It will however clear any prior mesh data. The purpose of this gcode is to allow for Octoprint plugins such as PrusaMeshMap to easily fetch probe data from a large number of sampled points. This has not yet been implemented in any plugins, but the functionality is there for those who wish to take advantage of it.
BED_MESH_CLEAR
This will clear the mesh from memory. No further Z adjustment will be performed after a clear.
BED_MESH_PROFILE LOAD=<name> SAVE=<name> REMOVE=<name>
BED_MESH_PROFILE can be used to save mesh state to persistent storage. This isn't advised for MK3 users as bed geometry may not be consistent between prints, However users with rigid beds and glass surfaces may find this functionality useful.
### Generating Aliases
To generate gcode aliases, the following can be added to your printer.cfg:
[gcode_macro G80]
gcode:
G28
BED_MESH_CALIBRATE
G1 X0 Y0 Z0.4 F4000
[gcode_macro G81]
gcode:
BED_MESH_OUTPUT
This will create G80 and G81 aliases respectively. Also, as you can see, the G80 Macro moves the tool back to the origin after calibration. You can alter this GCode line to move it where you please at a speed that best suits your printer.
### Final Notes
- Generally probing a 3x3 grid with default mesh values will produce the desired result. However, one may wish to experiment with more probe points and different interpolation algorithms to eliminate deadspots. Lagrange interpolation tends to oscillate as the number of samples increase, so it is recommended to use bicubic interpolation for larger probe grids.
Bed Skew Correction:
--------------------
TODO (COMING SOON)
Pressure Advance
----------------
The next thing you'll have to do is to configure _Pressure Advance_ (from this point downwards _PA_). Without _PA_ you will be getting ugly prints with blobs and curling at corners and lots of oozing. There is a [guide](https://github.com/KevinOConnor/klipper/blob/master/docs/Pressure_Advance.md) on how to configure _PA_ in the main Klipper repository, but it is complex and requires lots of manual steps. Read it through to understand what it is about, but don't actually follow it. We provide a setup process which is mode advanced and simpler at the same time, where you only have to print one test object from which you can derive the _PA_ value. The guide is [here](Pressure_Advance.md).
[Alpha] Probe Temperature Compensation
----------------------------------------
Initial support has been added for probe temperature drift compensation. In its current form temperature compensation is configured manually, in a similar fashion to the method used in stock Prusa firmware.
To enabled Probe temperature support, add the following to your printer.cfg:
[probe_temp]
sensor_type: EPCOS 100K B57560G104F
sensor_pin: PF3
t_offsets:
35.0, 0.0
40.0, 0.02
45.0, 0.06
50.0, 0.120
55.0, 0.2
60.0, 0.3
Note that the offsets above are Prusa Firmware's defaults. Also note the space padding the front of each offset. This is <b>required</b>, otherwise Klipper's parser will get confused.
The following gcodes are added with this module:
GET_PROBE_TEMP
This simply returns the probe's current temperature to octoprints terminal
PROBE_WAIT TEMP=<Target Temperature> TIMEOUT=<SECONDS>
Use the gcode above to wait for the probe to reach a certain temperature. The minimum target is 20, maximum is 70. The direction is automatically determined by the heater state. If both heaters are off then the gcode will wait for the probe to cool to the supplied temperture, otherwise it will wait for it to heat. Timeout sets a timeout in minutes. If the timeout is reached the printer will stop waiting and no changes will be made to the offset. A timeout value of 0 will wait indefinitely, which is the default value.
APPLY_TEMP_OFFSET
The gcode above will look up the probe's current temperatue, calculate the offset to apply based on the supplied parameters, then adjust the gcode offset by that amount.
### Manual Calibration Procedure
1. Before Calibrating, make sure to remove t_offsets and all of its associated values from printer.cfg, then restart Klipper. This will make sure that no additional offset is applied during calibration.
2. Use a gcode similar to what is used for manual probe calibration on Prusa Firmware. A single layered object with a large surface area works well.
3. Your start gcode should look something like the following:
M83 ; extruder relative mode
M104 S210 ; Set nozzle temp
M140 S60 ; set bed temp
M190 S60 ; wait for bed temp
M109 S210 ; wait for extruder temp
G28 ; home axes
G1 X50 Y50 Z.4 ; move printhead to a good warming position
PROBE_WAIT TEMP=40 DIRECTION=heat ; wait for the probe to reach 40C
G80 ; mesh leveling
G1 X0 Y-3.0 F3000.0 ; go outside print area
G92 E0.0
G1 X60.0 E9.0 F1000.0 ; intro line
G1 X100.0 E12.5 F1000.0 ; intro line
G92 E0.0
Adjust the above to your filament requirements. You may be able to move your nozzle closer to the bed when warming, but be careful as mesh leveling has not yet been applied to compensate for warped bed geometry. Also note that bed geometry changes as heat is applied.
4. Use Klipper's SET_GCODE_OFFSET Z_ADJUST=[value] to act in a similar manner to live-z. Note that positive and negative offsets can be mapped to buttons (see Octoprint's Custom Control Editor plugin). A z_adjust value of +/-.02 is a good place to start, continue adjusting until you have your desired first layer
5. Use GET_POSITION to get your total z_offset. Your output will look something like this:
Send: GET_POSITION
Recv: // mcu: x:-4778 y:-5497 z:-25
Recv: // stepper: x:50.000000 y:50.000000 z:50.000000
Recv: // kinematic: X:50.000000 Y:50.000000 Z:50.000000
Recv: // toolhead: X:50.000000 Y:50.000000 Z:50.000000 E:0.000000
Recv: // gcode: X:50.000000 Y:50.000000 Z:49.980000 E:0.000000
Recv: // gcode base: X:0.000000 Y:0.000000 Z:-0.020000 E:0.000000
Recv: // gcode homing: X:0.000000 Y:0.000000 Z:-0.020000
The Z value next to gcode base is your total z adjustment. In this example, drift at 40C increased by .02, as an offset of -.02 was added to compensate. The value 40.0, 0.02 should be added to t_offsets in printer.cfg. NOTE: If you are already using SET_GCODE_OFFSET to adjust your nozzle height, don't forget to account for it.
### Auto Calibration Testing
Currently there is no automatic calibration because we simply do not have enough data, and the data we have is wildly consistent. For example, in our experiments, drift at X50 Y50 with only the bed on results in insignificant drift. With the extruder on, at the same position, drift increases significantly. The amount of drift increases with higher extruder and bed temperatures.
However, if the probe is repositioned directly over the center standoff on the bed, roughly X97 Y103, the probe actually triggers LOWER. This holds true at high extruder and bed temperatures. This phenomenon suggests that “drift” may not be related to the Probe, but rather to a change in bed geometry. If that is the case then drift can simply be overcome with a more accurate mesh when applying leveling.
That being said, more data will lead to a more solid conclusion, and if the probe itself is indeed experiencing drift, then perhaps we can come up with a way to automatically compensate for it. If you are interested in collecting this data, read on.
The current probe_temp implementation includes the following gcode:
CALIBRATE_PROBE_TEMP X=97 Y=103 TARGET=45 B_TMP=70 E_TMP=200 TIMEOUT=180
Currently the gcode doesn't actually calibrate the sensor. It gathers temperature and drift data, serializes it, and dumps it to a file. As you can see there are several options, which are explained below. The values for each option above are defaults, any option may be left out if you desire a default value.
- X - X position from which to gather probe data
- Y - Y position from which to gather probe data
- TARGET - The target temperature for the Pinda to reach. Once it has been obtained collection will end.
- B_TMP - The bed temperature to set
- E_TMP - The extruder temperature to set
- TIMEOUT - The maximum time, in seconds, between each sample. If timeout is reached collection will end.
When calibration is complete (or times out) data will be dumped to ~/PindaTemps.json. It is useful to calibrate at the default position (center standoff) and one other position such as X50 Y50, up to a target temp of 55 to 60C. To reach these probe temperatures it is recommended to heat the extruder to a minimum 235C and the Bed to 90C.
If you are interested in collecting data, please visit the \#mk3-klipper channel on the Prusa3d-Users discord server.
Filament Sensors
----------------
The MK3 and MK3S both have a filament sensor, but they each use a different type of sensor. In order to use them with Klipper you'll need to update your printer.cfg with the appropriate section below, then add/update the common sections to your printer.cfg. Look in the example-extra.cfg for more details. You must use Klipper's RESUME command and not OctoPrint's once a runout has been detected and filament has loaded.
### MK3 Filament Sensor (pat9125)
[pat9125 fsensor]
pause_on_runout: True
runout_gcode:
M118 Filament Runout Detected
M600 X250 Y-3 Z10
insert_gcode:
M118 Filament Load Detected
LOAD_FILAMENT
invert_axis: True
oq_enable: True
### MK3S Filament Sensor
[filament_switch_sensor fsensor]
pause_on_runout: True
runout_gcode:
M118 Filament Runout Detected
M600 X250 Y-3 Z10
insert_gcode:
M118 Filament Load Detected
LOAD_FILAMENT
event_delay: 3.0
switch_pin: !PK0
### Common configuration for both
You will need to add the following sections to you printer.cfg so that the M600 and LOAD_FILAMENT commands in the above work as intended. You can adjust the gcodes to suite your need, as long as the M600 starts with Klipper's PAUSE command.
[respond]
default_type: command
[pause_resume]
# Filament change gcode, parameters are a default park position if no XYZ is specified - Z is relative.
[gcode_macro M600]
default_parameter_X: 100
default_parameter_Y: 0
default_parameter_Z: 10
gcode:
PAUSE
G91
G1 E-.8 F2700
G1 Z{Z}
G90
G1 X{X} Y{Y} F3000
M117 Ready for unload
[gcode_macro LOAD_FILAMENT]
gcode:
M117 Loading Filament...
G1 E70 F400
G1 E40 F100
G92 E0.0
M400
M117 Load Complete
[gcode_macro UNLOAD_FILAMENT]
gcode:
M117 Unloading Filament...
G1 E0.5 F1000
G1 E-0.5 F1000
G1 E1.0 F1000
G1 E-1.0 F1000
G1 E1.5 F1000
G1 E-1.5 F1000
G1 E2.0 F1000
G1 E-100 F3000
M400
M117 Remove Filament Now!
M300 S300 P1000
These two are not strictly needed, but they are useful to add - They add a __Change Filament__ and __Resume__ to the Filament menu on the display.
````
[menu __filament __change]
type: command
name: Change Filament
gcode:
M600
[menu __filament __resume]
type: command
name: Resume
gcode:
RESUME
````
### Notes
You can check the status of the filament sensor with the following command
QUERY_FILAMENT_SENSOR SENSOR=fsensor
If you are using the 'filament_switch_sensor' and the status is inverted, then you just remove the ! on the 'switch_pin' so that it reads
switch_pin: PK0
If you are using an older version of OctoPrint (<0.16) then you will also need to edit OctoPrint's config.yaml and add `unknownCommandsNeedAck: true` to the serial section. It should look something like this.
````
serial:
additionalPorts:
- /tmp/printer
baudrate: 250000
unknownCommandsNeedAck: true
disconnectOnErrors: false
logPositionOnCancel: true
port: /tmp/printer
````
You need to SSH into your Pi or use SFTP. The file should be in ~/.octoprint directory.
| {
"pile_set_name": "Github"
} |
/*
* Copyright 2010 Red Hat, Inc. and/or its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.drools.examples.golfing;
import org.kie.api.KieServices;
import org.kie.api.runtime.KieContainer;
import org.kie.api.runtime.KieSession;
public class GolfingExample {
/**
* @param args
*/
public static void main(final String[] args) {
KieContainer kc = KieServices.Factory.get().getKieClasspathContainer();
System.out.println(kc.verify().getMessages().toString());
execute( kc );
}
public static void execute( KieContainer kc ) {
KieSession ksession = kc.newKieSession("GolfingKS");
String[] names = new String[]{"Fred", "Joe", "Bob", "Tom"};
String[] colors = new String[]{"red", "blue", "plaid", "orange"};
int[] positions = new int[]{1, 2, 3, 4};
for ( int n = 0; n < names.length; n++ ) {
for ( int c = 0; c < colors.length; c++ ) {
for ( int p = 0; p < positions.length; p++ ) {
ksession.insert( new Golfer( names[n],
colors[c],
positions[p] ) );
}
}
}
ksession.fireAllRules();
ksession.dispose();
}
public static class Golfer {
private String name;
private String color;
private int position;
public Golfer() {
}
public Golfer(String name,
String color,
int position) {
super();
this.name = name;
this.color = color;
this.position = position;
}
/**
* @return the color
*/
public String getColor() {
return this.color;
}
/**
* @return the name
*/
public String getName() {
return this.name;
}
/**
* @return the name
*/
public int getPosition() {
return this.position;
}
}
}
| {
"pile_set_name": "Github"
} |
platform_is :windows do
require_relative '../../fixtures/classes'
describe :win32ole_setproperty, shared: true do
before :each do
@dict = WIN32OLESpecs.new_ole('Scripting.Dictionary')
end
it "raises ArgumentError if no argument is given" do
lambda { @dict.send(@method) }.should raise_error ArgumentError
end
it "sets key to newkey and returns nil" do
oldkey = 'oldkey'
newkey = 'newkey'
@dict.add(oldkey, 'value')
result = @dict.send(@method, 'Key', oldkey, newkey)
result.should == nil
@dict[oldkey].should == nil
@dict[newkey].should == 'value'
end
end
end
| {
"pile_set_name": "Github"
} |
(ns stedi.basics.cdk
(:require [clojure.edn :as edn]
[clojure.java.io :as io]
[stedi.cdk.alpha :as cdk]
[uberdeps.api :as uberdeps]))
;; CDK is a framework released by Amazon that allows developers to
;; deploy CloudFormation-based infrastructure using their prefered
;; language.
;; It is built from TypeScript and made available to other languages
;; through the JSii protocol by Amazon. JSii allows other languages to
;; interact with JavaScript classes through an RPC protocol.
;; cdk-clj wraps the JSii protocol for CDK classes in Clojure.
;; The best way of getting information about what is availble via CDK
;; is to call `(cdk/browse)` in the REPL. This will take you to the
;; AWS CDK API reference documentation.
(comment
(cdk/browse)
)
;; CDK applications consist of Constructs arranged into a tree
;; structure. Constructs represent one or more AWS resources. All CDK
;; applications have an App construct at the root of the
;; tree. `cdk-clj` exposes access to these constructs through the
;; `cdk/import` macro.
(cdk/import [[App] :from "@aws-cdk/core"])
;; Import does two things:
;; 1. makes App resolvable to a jsii-class in the local namespace
;; 2. aliases the ns for "@aws-cdk/core.App" to App
;; App will now resolve to a jsii-class
App
;; Invoking the class calls its constructor and returns a
;; jsii-instance:
(def app (App))
;; These constructor vars also have docstrings
App
;; Import also makes an alias to the ns that contains all the static
;; and instance methods for App
App/isApp
App/synth
;; You can also browse to a constructs documentation via browse on
;; the constructor or an instance:
(comment
(cdk/browse app)
(cdk/browse App)
)
;; Applications are composed of one or more Stacks, each representing
;; a CloudFormation Stack. A Stack is a Construct as well.
(cdk/import [[Stack] :from "@aws-cdk/core"])
;; Child constructs are connected to their parent by passing in the
;; parent as the scope of the child's constructor function.
(def stack (Stack app "cdk-clj-basics"))
;; Class instances implement the ILookup interface so they work with
;; keyword lookups
(:stackName stack)
;; A stack needs at least one resource Construct in order to be
;; deployable so lets add a bucket.
(cdk/import [[Bucket] :from "@aws-cdk/aws-s3"])
;; cdk-clj generates specs for and instruments all jsii constructors
;; and functions:
(comment
(Bucket stack nil) ; Fails with spec error
;; Worth noting that the CDK specs are closed
(Bucket stack "id" {:does-not-exist :foo}) ; Fails due to specs being closed
)
(def bucket (Bucket stack "bucket"))
;; Buckets aren't particularly interesting, and lambdas + serverless
;; are all the rage so lets add a lambda function as well.
(cdk/import [[Code Function Runtime] :from "@aws-cdk/aws-lambda"])
(defn- clean
[]
(->> (io/file "classes")
(file-seq)
(reverse)
(map io/delete-file)
(dorun)))
(def jarpath "target/app.jar")
;; Build an uberjar with the compiled source + dependency classes
(let [deps (edn/read-string (slurp "deps.edn"))]
(when (.exists (io/file "classes")) (clean))
(with-out-str
(io/make-parents "classes/.")
(io/make-parents jarpath)
(compile 'stedi.lambada)
(uberdeps/package deps jarpath {:aliases [:classes]})))
(comment
(cdk/browse Function)
)
(def my-fn
(Function stack
"my-fn"
{:code (Code/fromAsset jarpath) ;; Calling a static method
:handler "stedi.cdk.basics.Hello"
:runtime (:JAVA_8 Runtime) ;; Getting a static property
:environment {"BUCKET" (:bucketName bucket)} ;; Getting an instance property
}))
(comment
;; See it bound to the construct tree
(map (comp :path :node)
(get-in stack [:node :children]))
)
;; We can grant the function write access to the bucket using an
;; instance method
(Bucket/grantWrite bucket my-fn)
;; CDK constructs often have functions for granting permissions,
;; adding metrics and triggering events.
;; This app can now be deployed using `cdk-cli` in a shell with AWS
;; credentials configured.
;; Synth:
;; cdk synth
;; Deploy:
;; cdk deploy
;; Destroy:
;; cdk destroy
| {
"pile_set_name": "Github"
} |
<?php
/**
* @package Joomla.Site
* @subpackage mod_tags_popular
*
* @copyright Copyright (C) 2005 - 2017 Open Source Matters, Inc. All rights reserved.
* @license GNU General Public License version 2 or later; see LICENSE.txt
*/
defined('_JEXEC') or die;
?>
<?php JLoader::register('TagsHelperRoute', JPATH_BASE . '/components/com_tags/helpers/route.php'); ?>
<div class="tagspopular<?php echo $moduleclass_sfx; ?>">
<?php if (!count($list)) : ?>
<div class="alert alert-no-items"><?php echo JText::_('MOD_TAGS_POPULAR_NO_ITEMS_FOUND'); ?></div>
<?php else : ?>
<ul>
<?php foreach ($list as $item) : ?>
<li>
<a href="<?php echo JRoute::_(TagsHelperRoute::getTagRoute($item->tag_id . ':' . $item->alias)); ?>">
<?php echo htmlspecialchars($item->title, ENT_COMPAT, 'UTF-8'); ?></a>
<?php if ($display_count) : ?>
<span class="tag-count badge badge-info"><?php echo $item->count; ?></span>
<?php endif; ?>
</li>
<?php endforeach; ?>
</ul>
<?php endif; ?>
</div>
| {
"pile_set_name": "Github"
} |
{
'prototype:src/prototype/ajax/request.js':[
179,
230,
289,
],
'prototype:src/prototype/dom/form.js':[
139,
],
'prototype:src/prototype/dom/layout.js':[
52,
298,
1055,
1407,
],
'prototype:src/prototype/lang/object.js':[
143,
],
'prototype:test/unit/static/js/assertions.js':[
26,
79,
],
'prototype:test/unit/static/js/mocha.js':[
104,
236,
424,
1642,
2416,
],
'prototype:test/unit/static/js/proclaim.js':[
397,
450,
],
}
| {
"pile_set_name": "Github"
} |
/*
* linux/fs/nls/nls_koi8-ru.c
*
* Charset koi8-ru translation based on charset koi8-u.
* The Unicode to charset table has only exact mappings.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/nls.h>
#include <linux/errno.h>
static struct nls_table *p_nls;
static int uni2char(const wchar_t uni,
unsigned char *out, int boundlen)
{
if (boundlen <= 0)
return -ENAMETOOLONG;
if ((uni & 0xffaf) == 0x040e || (uni & 0xffce) == 0x254c) {
/* koi8-ru and koi8-u differ only on two characters */
if (uni == 0x040e)
out[0] = 0xbe;
else if (uni == 0x045e)
out[0] = 0xae;
else if (uni == 0x255d || uni == 0x256c)
return 0;
else
return p_nls->uni2char(uni, out, boundlen);
return 1;
}
else
/* fast path */
return p_nls->uni2char(uni, out, boundlen);
}
static int char2uni(const unsigned char *rawstring, int boundlen,
wchar_t *uni)
{
int n;
if ((*rawstring & 0xef) != 0xae) {
/* koi8-ru and koi8-u differ only on two characters */
*uni = (*rawstring & 0x10) ? 0x040e : 0x045e;
return 1;
}
n = p_nls->char2uni(rawstring, boundlen, uni);
return n;
}
static struct nls_table table = {
.charset = "koi8-ru",
.uni2char = uni2char,
.char2uni = char2uni,
};
static int __init init_nls_koi8_ru(void)
{
p_nls = load_nls("koi8-u");
if (p_nls) {
table.charset2upper = p_nls->charset2upper;
table.charset2lower = p_nls->charset2lower;
return register_nls(&table);
}
return -EINVAL;
}
static void __exit exit_nls_koi8_ru(void)
{
unregister_nls(&table);
unload_nls(p_nls);
}
module_init(init_nls_koi8_ru)
module_exit(exit_nls_koi8_ru)
MODULE_LICENSE("Dual BSD/GPL");
| {
"pile_set_name": "Github"
} |
// Distributes layers (control option cmd ,)
@import '../inventory.js'
var onRun = function (context) {
// old school variable
doc = context.document;
selection = context.selection;
// init
com.getflourish.common.init(context);
// align
com.getflourish.utils.sendDistributeHorizontally();
} | {
"pile_set_name": "Github"
} |
package hcsshim
import "github.com/Sirupsen/logrus"
// UnprepareLayer disables the filesystem filter for the read-write layer with
// the given id.
func UnprepareLayer(info DriverInfo, layerId string) error {
title := "hcsshim::UnprepareLayer "
logrus.Debugf(title+"flavour %d layerId %s", info.Flavour, layerId)
// Convert info to API calling convention
infop, err := convertDriverInfo(info)
if err != nil {
logrus.Error(err)
return err
}
err = unprepareLayer(&infop, layerId)
if err != nil {
err = makeErrorf(err, title, "layerId=%s flavour=%d", layerId, info.Flavour)
logrus.Error(err)
return err
}
logrus.Debugf(title+"succeeded flavour %d layerId=%s", info.Flavour, layerId)
return nil
}
| {
"pile_set_name": "Github"
} |
// @flow
// This file is generated automatically by `scripts/build/typings.js`. Please, don't change it.
export type Interval = {
start: Date | number,
end: Date | number,
}
export type Locale = {
code?: string,
formatDistance?: (...args: Array<any>) => any,
formatRelative?: (...args: Array<any>) => any,
localize?: {
ordinalNumber: (...args: Array<any>) => any,
era: (...args: Array<any>) => any,
quarter: (...args: Array<any>) => any,
month: (...args: Array<any>) => any,
day: (...args: Array<any>) => any,
dayPeriod: (...args: Array<any>) => any,
},
formatLong?: {
date: (...args: Array<any>) => any,
time: (...args: Array<any>) => any,
dateTime: (...args: Array<any>) => any,
},
match?: {
ordinalNumber: (...args: Array<any>) => any,
era: (...args: Array<any>) => any,
quarter: (...args: Array<any>) => any,
month: (...args: Array<any>) => any,
day: (...args: Array<any>) => any,
dayPeriod: (...args: Array<any>) => any,
},
options?: {
weekStartsOn?: 0 | 1 | 2 | 3 | 4 | 5 | 6,
firstWeekContainsDate?: 1 | 2 | 3 | 4 | 5 | 6 | 7,
},
}
export type Duration = {
years?: number,
months?: number,
weeks?: number,
days?: number,
hours?: number,
minutes?: number,
seconds?: number,
}
declare module.exports: (
date: Date | number,
options?: {
locale?: Locale,
weekStartsOn?: 0 | 1 | 2 | 3 | 4 | 5 | 6,
}
) => Date
| {
"pile_set_name": "Github"
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.