text
stringlengths 2
99.9k
| meta
dict |
---|---|
package cloudcallcenter
//Licensed under the Apache License, Version 2.0 (the "License");
//you may not use this file except in compliance with the License.
//You may obtain a copy of the License at
//
//http://www.apache.org/licenses/LICENSE-2.0
//
//Unless required by applicable law or agreed to in writing, software
//distributed under the License is distributed on an "AS IS" BASIS,
//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//See the License for the specific language governing permissions and
//limitations under the License.
//
// Code generated by Alibaba Cloud SDK Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
import (
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
)
// Appraise invokes the cloudcallcenter.Appraise API synchronously
// api document: https://help.aliyun.com/api/cloudcallcenter/appraise.html
func (client *Client) Appraise(request *AppraiseRequest) (response *AppraiseResponse, err error) {
response = CreateAppraiseResponse()
err = client.DoAction(request, response)
return
}
// AppraiseWithChan invokes the cloudcallcenter.Appraise API asynchronously
// api document: https://help.aliyun.com/api/cloudcallcenter/appraise.html
// asynchronous document: https://help.aliyun.com/document_detail/66220.html
func (client *Client) AppraiseWithChan(request *AppraiseRequest) (<-chan *AppraiseResponse, <-chan error) {
responseChan := make(chan *AppraiseResponse, 1)
errChan := make(chan error, 1)
err := client.AddAsyncTask(func() {
defer close(responseChan)
defer close(errChan)
response, err := client.Appraise(request)
if err != nil {
errChan <- err
} else {
responseChan <- response
}
})
if err != nil {
errChan <- err
close(responseChan)
close(errChan)
}
return responseChan, errChan
}
// AppraiseWithCallback invokes the cloudcallcenter.Appraise API asynchronously
// api document: https://help.aliyun.com/api/cloudcallcenter/appraise.html
// asynchronous document: https://help.aliyun.com/document_detail/66220.html
func (client *Client) AppraiseWithCallback(request *AppraiseRequest, callback func(response *AppraiseResponse, err error)) <-chan int {
result := make(chan int, 1)
err := client.AddAsyncTask(func() {
var response *AppraiseResponse
var err error
defer close(result)
response, err = client.Appraise(request)
callback(response, err)
result <- 1
})
if err != nil {
defer close(result)
callback(nil, err)
result <- 0
}
return result
}
// AppraiseRequest is the request struct for api Appraise
type AppraiseRequest struct {
*requests.RpcRequest
Source string `position:"Query" name:"Source"`
Type string `position:"Query" name:"Type"`
RamId string `position:"Query" name:"RamId"`
Acid string `position:"Query" name:"Acid"`
PressKey string `position:"Query" name:"PressKey"`
InstanceId string `position:"Query" name:"InstanceId"`
SkillGroupId string `position:"Query" name:"SkillGroupId"`
PressKeyMapping string `position:"Query" name:"PressKeyMapping"`
}
// AppraiseResponse is the response struct for api Appraise
type AppraiseResponse struct {
*responses.BaseResponse
RequestId string `json:"RequestId" xml:"RequestId"`
Success bool `json:"Success" xml:"Success"`
Code string `json:"Code" xml:"Code"`
Message string `json:"Message" xml:"Message"`
HttpStatusCode int `json:"HttpStatusCode" xml:"HttpStatusCode"`
}
// CreateAppraiseRequest creates a request to invoke Appraise API
func CreateAppraiseRequest() (request *AppraiseRequest) {
request = &AppraiseRequest{
RpcRequest: &requests.RpcRequest{},
}
request.InitWithApiInfo("CloudCallCenter", "2017-07-05", "Appraise", "", "")
request.Method = requests.POST
return
}
// CreateAppraiseResponse creates a response to parse from Appraise response
func CreateAppraiseResponse() (response *AppraiseResponse) {
response = &AppraiseResponse{
BaseResponse: &responses.BaseResponse{},
}
return
}
| {
"pile_set_name": "Github"
} |
Description of the TNO AND CENTAUR DIAMETERS, ALBEDOS, AND DENSITIES bundle V1.0
================================================================================
Bundle Generation Date: 2018-08-27
Peer Review: 2018a Asteroid Review
Discipline node: Small Bodies Node
Content description for the TNO AND CENTAUR DIAMETERS, ALBEDOS, AND DENSITIES bundle
====================================================================================
OVERVIEW
This data set is a compilation of published diameters, albedos, and
densities for Transneptunian Objects (TNOs) and Centaurs. A total of 194
objects are listed, many with more than one entry. This version covers
published values through 31 March 2018.
DATA
Data were collected from the published literature for TNOs and Centaurs,
with many objects having data reported by more than one group. Reported
values for Pluto and Charon are limited to those published after 1985.
Entries are listed by object permanent number, then by provisional
designation, then by year of publication, then alphabetically by
author(s). Users are referred to the respective papers regarding details
of observations and methodology.
TABLES
The table 'tno_centaur_diam_alb_dens' lists data for objects with the
following columns:
* object number, name, and provisional designation;
* object heliocentric semimajor axis (AU), eccentricity, and
inclination;
* object dynamical type;
* number of known companions;
* MPC-reported absolute magnitude (as of 31 March 2016);
* absolute magnitude value used in albedo calculation with error bar;
* effective diameter (km) with upper/lower error bars and note code;
* primary diameter (km) with upper/lower error bars and note code;
* companion diameter (km) with upper/lower error bars and note code;
* albedo with upper/lower error bars, note code, and color code;
* system mass density (g/cm^3) with upper/lower error bars and note
code;
* methods used for diameter/albedo determination and density
determination;
* and reference code.
Dynamical type codes are as follows:
1:2 = Neptune 1:2 resonance object,
2:5 = Neptune 2:5 resonance object,
2:7 = Neptune 2:7 resonance object,
3:5 = Neptune 3:5 resonance object,
3:8 = Neptune 3:8 resonance object,
3:10 = Neptune 3:10 resonance object,
4:7 = Neptune 4:7 resonance object,
CEN = Centaur,
CUB = Cubewano,
HAU = Haumea family,
NL4 = Neptune trojan, L4 position,
PLU = Plutino (= Neptune 2:3 resonance object),
SAT = satellite,
SDO = scattered disk object, and
TNO = other Transneptunian Object.
Dynamical types are based on the Minor Planet Center 'Unusual Minor
Planet' listings and on classifications by the Deep Ecliptic Survey Team
[http://www.boulder.swri.edu/~buie/kbo/desclass.html] (see Elliot et al.,
2005). Haumea family members are those listed by Snodgrass et al., 2010.
For single objects, effective diameter is the measured diameter and the
component diameter columns have null values. For objects with more than
one component, effective diameter is the combined effective diameter
(where D_eff^2 = D_1^2 + D_2^2 for binaries, etc.) and individual
component diameters are provided in the respective columns. In general,
albedo and density values are the system averages; if they are specific to
a component, they include a note code specifying which component (1, 2, 3,
...) in a separate entry for that component. Separate entries are also
provided for additional components after the first two, again with note
codes specifying which component (3, 4, ...).
Note codes indicate comments on data values as follows (more than one code
may apply):
1, 2, 3, ... = component diameter for first, second, third, etc.,
system component;
A = assumed, applies to adopted values used from other publications or
to assumed densities used for dynamical determinations of
diameter/albedo;
D = derived by the compiler from values reported by the respective
source;
G = greater than;
L = less than;
Albedo color codes indicate color reported by the source: B = blue, R =
red, V = visual, and * = geometric (no color specified).
Method codes for diameter/albedo determinations are as follows (more than
one code may apply):
E = mutual events;
I = direct imaging;
M = multiple or modified methods (see respective source for details);
O = stellar occultation;
T = thermal/radiometric observations/modeling;
Y = dynamical (based on system mass from orbit determination, observed
magnitude, and assumed density;
# = cited source reported only system mass, values given here are
derived;
* = reanalysis of previously published results.
Method codes for density determinations are as follows (more than one code
may apply):
H = hydrodynamic model (based on observed/derived oblateness).
M = multiple or modified methods (see respective source for details);
Y = dynamical (based on system mass from orbit determination and
independently measured diameter);
* = reanalysis of previously published results.
Reference codes refer to full reference information listed in the
'references' table. This table has columns for reference code and full
reference.
Regarding uncertainties on measured values, readers are referred to the
respective references. Dashes or invalid inputs (usually -9.99) indicate
unassigned or unknown values.
MODIFICATION HISTORY
Differences between the current database version and the previous version
(2016) are:
* A total of 194 objects/systems are represented, or 4 more than the
previous release.
* A total of 652 entries are included, or 70 more than the previous
release.
* Heliocentric orbit semimajor axis, eccentricity, and inclination are
updated from the MPC Orbit (MPCORB) Database, and outer solar system
dynamical types are supplemented by classifications from the Deep
Ecliptic Survey Team.
Modifications in successive versions are summarized as follows:
* V1.0 in PDS3 (31 March 2013) contained 341 entries representing 125 objects/
systems and included object number, name, and provisional designation;
heliocentric semimajor axis, eccentricity, inclination, and dynamical
type; number of known companions; absolute magnitude from MPC and as
used in albedo calculations; effective diameter and (for binary/
multiple objects) primary and secondary diameters; albedo, system mass
density, codes for methods used, and reference codes.
* V2.0 (31 March 2014) contained 519 entries representing 178 objects/
systems.
* V3.0 (31 March 2015) contained 549 entries representing 181 objects/
systems.
* V4.0 (31 March 2016) contained 582 entries representing 190 objects/
systems.
* V1.0 in PDS4 (31 March 2018) contains 652 entries representing 194 objects/
systems.
JOURNALS USED
Cited sources include articles from the following journals: Astronomy and
Astrophysics; Astronomical Journal; Astrophysical Journal; Astrophysical
Journal Letters; Bulletin of the American Astronomical Society; Earth,
Moon and Planets; Icarus; Monthly Notices of the Royal Astronomical
Society; Nature; Publications of the Astronomical Society of the Pacific;
and Science. Other sources are from conference proceedings, IAUCs, and
book contributions.
REFERENCES
Elliot, J. L., et al., 2005, The Deep Ecliptic Survey: A search for
Kuiper belt objects and Centaurs. II. Dynamical classification, the Kuiper
belt plane, and the core population, Astron. J., 129:1117-1162.
Snodgrass, C., B. Carry, C. Dumas, and O. Hainaut, 2010, Characterisation
of candidate members of (136108) Haumea's family, Astron. & Astrophys.,
511:A72.
Caveats to the data user
========================
The Astrophysics Data System (ADS) was used to collect the data presented in these tables. Users are referred to the individual papers in the ancillary document for the confidence level of each item.
| {
"pile_set_name": "Github"
} |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
SET ISO8859-1
TRY esianrtolcdugmphbyfvkwzESIANRTOLCDUGMPHBYFVKWZ'
NOSUGGEST !
# ordinal numbers
COMPOUNDMIN 1
# only in compounds: 1th, 2th, 3th
ONLYINCOMPOUND c
# compound rules:
# 1. [0-9]*1[0-9]th (10th, 11th, 12th, 56714th, etc.)
# 2. [0-9]*[02-9](1st|2nd|3rd|[4-9]th) (21st, 22nd, 123rd, 1234th, etc.)
COMPOUNDRULE 2
COMPOUNDRULE n*1t
COMPOUNDRULE n*mp
WORDCHARS 0123456789
PFX A Y 1
PFX A 0 re .
PFX I Y 1
PFX I 0 in .
PFX U Y 1
PFX U 0 un .
PFX C Y 1
PFX C 0 de .
PFX E Y 1
PFX E 0 dis .
PFX F Y 1
PFX F 0 con .
PFX K Y 1
PFX K 0 pro .
SFX V N 2
SFX V e ive e
SFX V 0 ive [^e]
SFX N Y 3
SFX N e ion e
SFX N y ication y
SFX N 0 en [^ey]
SFX X Y 3
SFX X e ions e
SFX X y ications y
SFX X 0 ens [^ey]
SFX H N 2
SFX H y ieth y
SFX H 0 th [^y]
SFX Y Y 1
SFX Y 0 ly .
SFX G Y 2
SFX G e ing e
SFX G 0 ing [^e]
SFX J Y 2
SFX J e ings e
SFX J 0 ings [^e]
SFX D Y 4
SFX D 0 d e
SFX D y ied [^aeiou]y
SFX D 0 ed [^ey]
SFX D 0 ed [aeiou]y
SFX T N 4
SFX T 0 st e
SFX T y iest [^aeiou]y
SFX T 0 est [aeiou]y
SFX T 0 est [^ey]
SFX R Y 4
SFX R 0 r e
SFX R y ier [^aeiou]y
SFX R 0 er [aeiou]y
SFX R 0 er [^ey]
SFX Z Y 4
SFX Z 0 rs e
SFX Z y iers [^aeiou]y
SFX Z 0 ers [aeiou]y
SFX Z 0 ers [^ey]
SFX S Y 4
SFX S y ies [^aeiou]y
SFX S 0 s [aeiou]y
SFX S 0 es [sxzh]
SFX S 0 s [^sxzhy]
SFX P Y 3
SFX P y iness [^aeiou]y
SFX P 0 ness [aeiou]y
SFX P 0 ness [^y]
SFX M Y 1
SFX M 0 's .
SFX B Y 3
SFX B 0 able [^aeiou]
SFX B 0 able ee
SFX B e able [^aeiou]e
SFX L Y 1
SFX L 0 ment .
REP 90
REP a ei
REP ei a
REP a ey
REP ey a
REP ai ie
REP ie ai
REP alot a_lot
REP are air
REP are ear
REP are eir
REP air are
REP air ere
REP ere air
REP ere ear
REP ere eir
REP ear are
REP ear air
REP ear ere
REP eir are
REP eir ere
REP ch te
REP te ch
REP ch ti
REP ti ch
REP ch tu
REP tu ch
REP ch s
REP s ch
REP ch k
REP k ch
REP f ph
REP ph f
REP gh f
REP f gh
REP i igh
REP igh i
REP i uy
REP uy i
REP i ee
REP ee i
REP j di
REP di j
REP j gg
REP gg j
REP j ge
REP ge j
REP s ti
REP ti s
REP s ci
REP ci s
REP k cc
REP cc k
REP k qu
REP qu k
REP kw qu
REP o eau
REP eau o
REP o ew
REP ew o
REP oo ew
REP ew oo
REP ew ui
REP ui ew
REP oo ui
REP ui oo
REP ew u
REP u ew
REP oo u
REP u oo
REP u oe
REP oe u
REP u ieu
REP ieu u
REP ue ew
REP ew ue
REP uff ough
REP oo ieu
REP ieu oo
REP ier ear
REP ear ier
REP ear air
REP air ear
REP w qu
REP qu w
REP z ss
REP ss z
REP shun tion
REP shun sion
REP shun cion
REP size cise
| {
"pile_set_name": "Github"
} |
// Copyright 2014 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package storage
import (
"crypto/tls"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"log"
"net"
"net/http"
"net/http/httptest"
"strings"
"testing"
"time"
"cloud.google.com/go/internal/testutil"
"golang.org/x/net/context"
"google.golang.org/api/googleapi"
"google.golang.org/api/iterator"
"google.golang.org/api/option"
raw "google.golang.org/api/storage/v1"
)
func TestSignedURL(t *testing.T) {
t.Parallel()
expires, _ := time.Parse(time.RFC3339, "2002-10-02T10:00:00-05:00")
url, err := SignedURL("bucket-name", "object-name", &SignedURLOptions{
GoogleAccessID: "xxx@clientid",
PrivateKey: dummyKey("rsa"),
Method: "GET",
MD5: "ICy5YqxZB1uWSwcVLSNLcA==",
Expires: expires,
ContentType: "application/json",
Headers: []string{"x-header1", "x-header2"},
})
if err != nil {
t.Error(err)
}
want := "https://storage.googleapis.com/bucket-name/object-name?" +
"Expires=1033570800&GoogleAccessId=xxx%40clientid&Signature=" +
"ZMw18bZVhySNYAMEX87RMyuZCUMtGLVi%2B2zU2ByiQ0Rxgij%2BhFZ5LsT" +
"5ZPIH5h3QXB%2BiSb1URJnZo3aF0exVP%2FYR1hpg2e65w9HHt7yYjIqcg" +
"%2FfAOIyxriFtgRYk3oAv%2FFLF62fI8iF%2BCp0fWSm%2FHggz22blVnQz" +
"EtSP%2BuRhFle4172L%2B710sfMDtyQLKTz6W4TmRjC9ymTi8mVj95dZgyF" +
"RXbibTdtw0JzndE0Ig4c6pU4xDPPiyaziUSVDMIpzZDJH1GYOGHxbFasba4" +
"1rRoWWkdBnsMtHm2ck%2FsFD2leL6u8q0OpVAc4ZdxseucL4OpCy%2BCLhQ" +
"JFQT5bqSljP0g%3D%3D"
if url != want {
t.Fatalf("Unexpected signed URL; found %v", url)
}
}
func TestSignedURL_PEMPrivateKey(t *testing.T) {
t.Parallel()
expires, _ := time.Parse(time.RFC3339, "2002-10-02T10:00:00-05:00")
url, err := SignedURL("bucket-name", "object-name", &SignedURLOptions{
GoogleAccessID: "xxx@clientid",
PrivateKey: dummyKey("pem"),
Method: "GET",
MD5: "ICy5YqxZB1uWSwcVLSNLcA==",
Expires: expires,
ContentType: "application/json",
Headers: []string{"x-header1", "x-header2"},
})
if err != nil {
t.Error(err)
}
want := "https://storage.googleapis.com/bucket-name/object-name?" +
"Expires=1033570800&GoogleAccessId=xxx%40clientid&Signature=" +
"gHlh63sOxJnNj22X%2B%2F4kwOSNMeqwXWr4udEfrzJPQcq1xzxA8ovMM5SOrOc%" +
"2FuE%2Ftc9%2Bq7a42CDBwZff1PsvuJMBDaPbluU257h%2Bvxx8lHMnb%2Bg1wD1" +
"99FiCE014MRH9TlIg%2FdXRkErosVWTy4GqAgZemmKHo0HwDGT6IovB9mdg%3D"
if url != want {
t.Fatalf("Unexpected signed URL; found %v", url)
}
}
func TestSignedURL_SignBytes(t *testing.T) {
t.Parallel()
expires, _ := time.Parse(time.RFC3339, "2002-10-02T10:00:00-05:00")
url, err := SignedURL("bucket-name", "object-name", &SignedURLOptions{
GoogleAccessID: "xxx@clientid",
SignBytes: func(b []byte) ([]byte, error) {
return []byte("signed"), nil
},
Method: "GET",
MD5: "ICy5YqxZB1uWSwcVLSNLcA==",
Expires: expires,
ContentType: "application/json",
Headers: []string{"x-header1", "x-header2"},
})
if err != nil {
t.Error(err)
}
want := "https://storage.googleapis.com/bucket-name/object-name?" +
"Expires=1033570800&GoogleAccessId=xxx%40clientid&Signature=" +
"c2lnbmVk" // base64('signed') == 'c2lnbmVk'
if url != want {
t.Fatalf("Unexpected signed URL\ngot: %q\nwant: %q", url, want)
}
}
func TestSignedURL_URLUnsafeObjectName(t *testing.T) {
t.Parallel()
expires, _ := time.Parse(time.RFC3339, "2002-10-02T10:00:00-05:00")
url, err := SignedURL("bucket-name", "object name界", &SignedURLOptions{
GoogleAccessID: "xxx@clientid",
PrivateKey: dummyKey("pem"),
Method: "GET",
MD5: "ICy5YqxZB1uWSwcVLSNLcA==",
Expires: expires,
ContentType: "application/json",
Headers: []string{"x-header1", "x-header2"},
})
if err != nil {
t.Error(err)
}
want := "https://storage.googleapis.com/bucket-name/object%20name%E7%95%8C?" +
"Expires=1033570800&GoogleAccessId=xxx%40clientid&Signature=" +
"LSxs1YwXNKOa7mQv1ZAI2ao0Fuv6yXLLU7%2BQ97z2B7hYZ57OiFwQ72EdGXSiIM" +
"JwLisEKkwoSlYCMm3uuTdgJtXXVi7SYXMfdeKaonyQwMv531KETCBTSewt8CW%2B" +
"FaUJ5SEYG44SeJCiqeIr3GF7t90UNWs6TdFXDaKShpQzBGg%3D"
if url != want {
t.Fatalf("Unexpected signed URL; found %v", url)
}
}
func TestSignedURL_MissingOptions(t *testing.T) {
t.Parallel()
pk := dummyKey("rsa")
expires, _ := time.Parse(time.RFC3339, "2002-10-02T10:00:00-05:00")
var tests = []struct {
opts *SignedURLOptions
errMsg string
}{
{
&SignedURLOptions{},
"missing required GoogleAccessID",
},
{
&SignedURLOptions{GoogleAccessID: "access_id"},
"exactly one of PrivateKey or SignedBytes must be set",
},
{
&SignedURLOptions{
GoogleAccessID: "access_id",
SignBytes: func(b []byte) ([]byte, error) { return b, nil },
PrivateKey: pk,
},
"exactly one of PrivateKey or SignedBytes must be set",
},
{
&SignedURLOptions{
GoogleAccessID: "access_id",
PrivateKey: pk,
},
"missing required method",
},
{
&SignedURLOptions{
GoogleAccessID: "access_id",
SignBytes: func(b []byte) ([]byte, error) { return b, nil },
},
"missing required method",
},
{
&SignedURLOptions{
GoogleAccessID: "access_id",
PrivateKey: pk,
Method: "PUT",
},
"missing required expires",
},
{
&SignedURLOptions{
GoogleAccessID: "access_id",
PrivateKey: pk,
Method: "PUT",
Expires: expires,
MD5: "invalid",
},
"invalid MD5 checksum",
},
}
for _, test := range tests {
_, err := SignedURL("bucket", "name", test.opts)
if !strings.Contains(err.Error(), test.errMsg) {
t.Errorf("expected err: %v, found: %v", test.errMsg, err)
}
}
}
func dummyKey(kind string) []byte {
slurp, err := ioutil.ReadFile(fmt.Sprintf("./testdata/dummy_%s", kind))
if err != nil {
log.Fatal(err)
}
return slurp
}
func TestCopyToMissingFields(t *testing.T) {
t.Parallel()
var tests = []struct {
srcBucket, srcName, destBucket, destName string
errMsg string
}{
{
"mybucket", "", "mybucket", "destname",
"name is empty",
},
{
"mybucket", "srcname", "mybucket", "",
"name is empty",
},
{
"", "srcfile", "mybucket", "destname",
"name is empty",
},
{
"mybucket", "srcfile", "", "destname",
"name is empty",
},
}
ctx := context.Background()
client, err := NewClient(ctx, option.WithHTTPClient(&http.Client{Transport: &fakeTransport{}}))
if err != nil {
panic(err)
}
for i, test := range tests {
src := client.Bucket(test.srcBucket).Object(test.srcName)
dst := client.Bucket(test.destBucket).Object(test.destName)
_, err := dst.CopierFrom(src).Run(ctx)
if !strings.Contains(err.Error(), test.errMsg) {
t.Errorf("CopyTo test #%v:\ngot err %q\nwant err %q", i, err, test.errMsg)
}
}
}
func TestObjectNames(t *testing.T) {
t.Parallel()
// Naming requirements: https://cloud.google.com/storage/docs/bucket-naming
const maxLegalLength = 1024
type testT struct {
name, want string
}
tests := []testT{
// Embedded characters important in URLs.
{"foo % bar", "foo%20%25%20bar"},
{"foo ? bar", "foo%20%3F%20bar"},
{"foo / bar", "foo%20/%20bar"},
{"foo %?/ bar", "foo%20%25%3F/%20bar"},
// Non-Roman scripts
{"타코", "%ED%83%80%EC%BD%94"},
{"世界", "%E4%B8%96%E7%95%8C"},
// Longest legal name
{strings.Repeat("a", maxLegalLength), strings.Repeat("a", maxLegalLength)},
// Line terminators besides CR and LF: https://en.wikipedia.org/wiki/Newline#Unicode
{"foo \u000b bar", "foo%20%0B%20bar"},
{"foo \u000c bar", "foo%20%0C%20bar"},
{"foo \u0085 bar", "foo%20%C2%85%20bar"},
{"foo \u2028 bar", "foo%20%E2%80%A8%20bar"},
{"foo \u2029 bar", "foo%20%E2%80%A9%20bar"},
// Null byte.
{"foo \u0000 bar", "foo%20%00%20bar"},
// Non-control characters that are discouraged, but not forbidden, according to the documentation.
{"foo # bar", "foo%20%23%20bar"},
{"foo []*? bar", "foo%20%5B%5D%2A%3F%20bar"},
// Angstrom symbol singleton and normalized forms: http://unicode.org/reports/tr15/
{"foo \u212b bar", "foo%20%E2%84%AB%20bar"},
{"foo \u0041\u030a bar", "foo%20A%CC%8A%20bar"},
{"foo \u00c5 bar", "foo%20%C3%85%20bar"},
// Hangul separating jamo: http://www.unicode.org/versions/Unicode7.0.0/ch18.pdf (Table 18-10)
{"foo \u3131\u314f bar", "foo%20%E3%84%B1%E3%85%8F%20bar"},
{"foo \u1100\u1161 bar", "foo%20%E1%84%80%E1%85%A1%20bar"},
{"foo \uac00 bar", "foo%20%EA%B0%80%20bar"},
}
// C0 control characters not forbidden by the docs.
var runes []rune
for r := rune(0x01); r <= rune(0x1f); r++ {
if r != '\u000a' && r != '\u000d' {
runes = append(runes, r)
}
}
tests = append(tests, testT{fmt.Sprintf("foo %s bar", string(runes)), "foo%20%01%02%03%04%05%06%07%08%09%0B%0C%0E%0F%10%11%12%13%14%15%16%17%18%19%1A%1B%1C%1D%1E%1F%20bar"})
// C1 control characters, plus DEL.
runes = nil
for r := rune(0x7f); r <= rune(0x9f); r++ {
runes = append(runes, r)
}
tests = append(tests, testT{fmt.Sprintf("foo %s bar", string(runes)), "foo%20%7F%C2%80%C2%81%C2%82%C2%83%C2%84%C2%85%C2%86%C2%87%C2%88%C2%89%C2%8A%C2%8B%C2%8C%C2%8D%C2%8E%C2%8F%C2%90%C2%91%C2%92%C2%93%C2%94%C2%95%C2%96%C2%97%C2%98%C2%99%C2%9A%C2%9B%C2%9C%C2%9D%C2%9E%C2%9F%20bar"})
opts := &SignedURLOptions{
GoogleAccessID: "xxx@clientid",
PrivateKey: dummyKey("rsa"),
Method: "GET",
MD5: "ICy5YqxZB1uWSwcVLSNLcA==",
Expires: time.Date(2002, time.October, 2, 10, 0, 0, 0, time.UTC),
ContentType: "application/json",
Headers: []string{"x-header1", "x-header2"},
}
for _, test := range tests {
g, err := SignedURL("bucket-name", test.name, opts)
if err != nil {
t.Errorf("SignedURL(%q) err=%v, want nil", test.name, err)
}
if w := "/bucket-name/" + test.want; !strings.Contains(g, w) {
t.Errorf("SignedURL(%q)=%q, want substring %q", test.name, g, w)
}
}
}
func TestCondition(t *testing.T) {
t.Parallel()
gotReq := make(chan *http.Request, 1)
hc, close := newTestServer(func(w http.ResponseWriter, r *http.Request) {
io.Copy(ioutil.Discard, r.Body)
gotReq <- r
w.WriteHeader(200)
})
defer close()
ctx := context.Background()
c, err := NewClient(ctx, option.WithHTTPClient(hc))
if err != nil {
t.Fatal(err)
}
obj := c.Bucket("buck").Object("obj")
dst := c.Bucket("dstbuck").Object("dst")
tests := []struct {
fn func()
want string
}{
{
func() { obj.Generation(1234).NewReader(ctx) },
"GET /buck/obj?generation=1234",
},
{
func() { obj.If(Conditions{GenerationMatch: 1234}).NewReader(ctx) },
"GET /buck/obj?ifGenerationMatch=1234",
},
{
func() { obj.If(Conditions{GenerationNotMatch: 1234}).NewReader(ctx) },
"GET /buck/obj?ifGenerationNotMatch=1234",
},
{
func() { obj.If(Conditions{MetagenerationMatch: 1234}).NewReader(ctx) },
"GET /buck/obj?ifMetagenerationMatch=1234",
},
{
func() { obj.If(Conditions{MetagenerationNotMatch: 1234}).NewReader(ctx) },
"GET /buck/obj?ifMetagenerationNotMatch=1234",
},
{
func() { obj.If(Conditions{MetagenerationNotMatch: 1234}).Attrs(ctx) },
"GET /storage/v1/b/buck/o/obj?alt=json&ifMetagenerationNotMatch=1234&projection=full",
},
{
func() { obj.If(Conditions{MetagenerationMatch: 1234}).Update(ctx, ObjectAttrsToUpdate{}) },
"PATCH /storage/v1/b/buck/o/obj?alt=json&ifMetagenerationMatch=1234&projection=full",
},
{
func() { obj.Generation(1234).Delete(ctx) },
"DELETE /storage/v1/b/buck/o/obj?alt=json&generation=1234",
},
{
func() {
w := obj.If(Conditions{GenerationMatch: 1234}).NewWriter(ctx)
w.ContentType = "text/plain"
w.Close()
},
"POST /upload/storage/v1/b/buck/o?alt=json&ifGenerationMatch=1234&projection=full&uploadType=multipart",
},
{
func() {
w := obj.If(Conditions{DoesNotExist: true}).NewWriter(ctx)
w.ContentType = "text/plain"
w.Close()
},
"POST /upload/storage/v1/b/buck/o?alt=json&ifGenerationMatch=0&projection=full&uploadType=multipart",
},
{
func() {
dst.If(Conditions{MetagenerationMatch: 5678}).CopierFrom(obj.If(Conditions{GenerationMatch: 1234})).Run(ctx)
},
"POST /storage/v1/b/buck/o/obj/rewriteTo/b/dstbuck/o/dst?alt=json&ifMetagenerationMatch=5678&ifSourceGenerationMatch=1234&projection=full",
},
}
for i, tt := range tests {
tt.fn()
select {
case r := <-gotReq:
got := r.Method + " " + r.RequestURI
if got != tt.want {
t.Errorf("%d. RequestURI = %q; want %q", i, got, tt.want)
}
case <-time.After(5 * time.Second):
t.Fatalf("%d. timeout", i)
}
if err != nil {
t.Fatal(err)
}
}
// Test an error, too:
err = obj.Generation(1234).NewWriter(ctx).Close()
if err == nil || !strings.Contains(err.Error(), "NewWriter: generation not supported") {
t.Errorf("want error about unsupported generation; got %v", err)
}
}
func TestConditionErrors(t *testing.T) {
t.Parallel()
for _, conds := range []Conditions{
{GenerationMatch: 0},
{DoesNotExist: false}, // same as above, actually
{GenerationMatch: 1, GenerationNotMatch: 2},
{GenerationNotMatch: 2, DoesNotExist: true},
{MetagenerationMatch: 1, MetagenerationNotMatch: 2},
} {
if err := conds.validate(""); err == nil {
t.Errorf("%+v: got nil, want error", conds)
}
}
}
// Test object compose.
func TestObjectCompose(t *testing.T) {
t.Parallel()
gotURL := make(chan string, 1)
gotBody := make(chan []byte, 1)
hc, close := newTestServer(func(w http.ResponseWriter, r *http.Request) {
body, _ := ioutil.ReadAll(r.Body)
gotURL <- r.URL.String()
gotBody <- body
w.Write([]byte("{}"))
})
defer close()
ctx := context.Background()
c, err := NewClient(ctx, option.WithHTTPClient(hc))
if err != nil {
t.Fatal(err)
}
testCases := []struct {
desc string
dst *ObjectHandle
srcs []*ObjectHandle
attrs *ObjectAttrs
wantReq raw.ComposeRequest
wantURL string
wantErr bool
}{
{
desc: "basic case",
dst: c.Bucket("foo").Object("bar"),
srcs: []*ObjectHandle{
c.Bucket("foo").Object("baz"),
c.Bucket("foo").Object("quux"),
},
wantURL: "/storage/v1/b/foo/o/bar/compose?alt=json",
wantReq: raw.ComposeRequest{
Destination: &raw.Object{Bucket: "foo"},
SourceObjects: []*raw.ComposeRequestSourceObjects{
{Name: "baz"},
{Name: "quux"},
},
},
},
{
desc: "with object attrs",
dst: c.Bucket("foo").Object("bar"),
srcs: []*ObjectHandle{
c.Bucket("foo").Object("baz"),
c.Bucket("foo").Object("quux"),
},
attrs: &ObjectAttrs{
Name: "not-bar",
ContentType: "application/json",
},
wantURL: "/storage/v1/b/foo/o/bar/compose?alt=json",
wantReq: raw.ComposeRequest{
Destination: &raw.Object{
Bucket: "foo",
Name: "not-bar",
ContentType: "application/json",
},
SourceObjects: []*raw.ComposeRequestSourceObjects{
{Name: "baz"},
{Name: "quux"},
},
},
},
{
desc: "with conditions",
dst: c.Bucket("foo").Object("bar").If(Conditions{
GenerationMatch: 12,
MetagenerationMatch: 34,
}),
srcs: []*ObjectHandle{
c.Bucket("foo").Object("baz").Generation(56),
c.Bucket("foo").Object("quux").If(Conditions{GenerationMatch: 78}),
},
wantURL: "/storage/v1/b/foo/o/bar/compose?alt=json&ifGenerationMatch=12&ifMetagenerationMatch=34",
wantReq: raw.ComposeRequest{
Destination: &raw.Object{Bucket: "foo"},
SourceObjects: []*raw.ComposeRequestSourceObjects{
{
Name: "baz",
Generation: 56,
},
{
Name: "quux",
ObjectPreconditions: &raw.ComposeRequestSourceObjectsObjectPreconditions{
IfGenerationMatch: 78,
},
},
},
},
},
{
desc: "no sources",
dst: c.Bucket("foo").Object("bar"),
wantErr: true,
},
{
desc: "destination, no bucket",
dst: c.Bucket("").Object("bar"),
srcs: []*ObjectHandle{
c.Bucket("foo").Object("baz"),
},
wantErr: true,
},
{
desc: "destination, no object",
dst: c.Bucket("foo").Object(""),
srcs: []*ObjectHandle{
c.Bucket("foo").Object("baz"),
},
wantErr: true,
},
{
desc: "source, different bucket",
dst: c.Bucket("foo").Object("bar"),
srcs: []*ObjectHandle{
c.Bucket("otherbucket").Object("baz"),
},
wantErr: true,
},
{
desc: "source, no object",
dst: c.Bucket("foo").Object("bar"),
srcs: []*ObjectHandle{
c.Bucket("foo").Object(""),
},
wantErr: true,
},
{
desc: "destination, bad condition",
dst: c.Bucket("foo").Object("bar").Generation(12),
srcs: []*ObjectHandle{
c.Bucket("foo").Object("baz"),
},
wantErr: true,
},
{
desc: "source, bad condition",
dst: c.Bucket("foo").Object("bar"),
srcs: []*ObjectHandle{
c.Bucket("foo").Object("baz").If(Conditions{MetagenerationMatch: 12}),
},
wantErr: true,
},
}
for _, tt := range testCases {
composer := tt.dst.ComposerFrom(tt.srcs...)
if tt.attrs != nil {
composer.ObjectAttrs = *tt.attrs
}
_, err := composer.Run(ctx)
if gotErr := err != nil; gotErr != tt.wantErr {
t.Errorf("%s: got error %v; want err %t", tt.desc, err, tt.wantErr)
continue
}
if tt.wantErr {
continue
}
url, body := <-gotURL, <-gotBody
if url != tt.wantURL {
t.Errorf("%s: request URL\ngot %q\nwant %q", tt.desc, url, tt.wantURL)
}
var req raw.ComposeRequest
if err := json.Unmarshal(body, &req); err != nil {
t.Errorf("%s: json.Unmarshal %v (body %s)", tt.desc, err, body)
}
if !testutil.Equal(req, tt.wantReq) {
// Print to JSON.
wantReq, _ := json.Marshal(tt.wantReq)
t.Errorf("%s: request body\ngot %s\nwant %s", tt.desc, body, wantReq)
}
}
}
// Test that ObjectIterator's Next and NextPage methods correctly terminate
// if there is nothing to iterate over.
func TestEmptyObjectIterator(t *testing.T) {
t.Parallel()
hClient, close := newTestServer(func(w http.ResponseWriter, r *http.Request) {
io.Copy(ioutil.Discard, r.Body)
fmt.Fprintf(w, "{}")
})
defer close()
ctx := context.Background()
client, err := NewClient(ctx, option.WithHTTPClient(hClient))
if err != nil {
t.Fatal(err)
}
it := client.Bucket("b").Objects(ctx, nil)
_, err = it.Next()
if err != iterator.Done {
t.Errorf("got %v, want Done", err)
}
}
// Test that BucketIterator's Next method correctly terminates if there is
// nothing to iterate over.
func TestEmptyBucketIterator(t *testing.T) {
t.Parallel()
hClient, close := newTestServer(func(w http.ResponseWriter, r *http.Request) {
io.Copy(ioutil.Discard, r.Body)
fmt.Fprintf(w, "{}")
})
defer close()
ctx := context.Background()
client, err := NewClient(ctx, option.WithHTTPClient(hClient))
if err != nil {
t.Fatal(err)
}
it := client.Buckets(ctx, "project")
_, err = it.Next()
if err != iterator.Done {
t.Errorf("got %v, want Done", err)
}
}
func TestCodecUint32(t *testing.T) {
t.Parallel()
for _, u := range []uint32{0, 1, 256, 0xFFFFFFFF} {
s := encodeUint32(u)
d, err := decodeUint32(s)
if err != nil {
t.Fatal(err)
}
if d != u {
t.Errorf("got %d, want input %d", d, u)
}
}
}
func TestBucketAttrs(t *testing.T) {
for _, c := range []struct {
attrs BucketAttrs
raw raw.Bucket
}{{
attrs: BucketAttrs{
Lifecycle: Lifecycle{
Rules: []LifecycleRule{{
Action: LifecycleAction{
Type: SetStorageClassAction,
StorageClass: "NEARLINE",
},
Condition: LifecycleCondition{
AgeInDays: 10,
Liveness: Live,
CreatedBefore: time.Date(2017, 1, 2, 3, 4, 5, 6, time.UTC),
MatchesStorageClasses: []string{"MULTI_REGIONAL", "REGIONAL", "STANDARD"},
NumNewerVersions: 3,
},
}, {
Action: LifecycleAction{
Type: DeleteAction,
},
Condition: LifecycleCondition{
AgeInDays: 30,
Liveness: Live,
CreatedBefore: time.Date(2017, 1, 2, 3, 4, 5, 6, time.UTC),
MatchesStorageClasses: []string{"NEARLINE"},
NumNewerVersions: 10,
},
}, {
Action: LifecycleAction{
Type: DeleteAction,
},
Condition: LifecycleCondition{
Liveness: Archived,
},
}},
},
},
raw: raw.Bucket{
Lifecycle: &raw.BucketLifecycle{
Rule: []*raw.BucketLifecycleRule{{
Action: &raw.BucketLifecycleRuleAction{
Type: SetStorageClassAction,
StorageClass: "NEARLINE",
},
Condition: &raw.BucketLifecycleRuleCondition{
Age: 10,
IsLive: googleapi.Bool(true),
CreatedBefore: "2017-01-02",
MatchesStorageClass: []string{"MULTI_REGIONAL", "REGIONAL", "STANDARD"},
NumNewerVersions: 3,
},
}, {
Action: &raw.BucketLifecycleRuleAction{
Type: DeleteAction,
},
Condition: &raw.BucketLifecycleRuleCondition{
Age: 30,
IsLive: googleapi.Bool(true),
CreatedBefore: "2017-01-02",
MatchesStorageClass: []string{"NEARLINE"},
NumNewerVersions: 10,
},
}, {
Action: &raw.BucketLifecycleRuleAction{
Type: DeleteAction,
},
Condition: &raw.BucketLifecycleRuleCondition{
IsLive: googleapi.Bool(false),
},
}},
},
},
}} {
if got := c.attrs.toRawBucket(); !testutil.Equal(*got, c.raw) {
t.Errorf("toRawBucket: got %v, want %v", *got, c.raw)
}
}
}
func newTestServer(handler func(w http.ResponseWriter, r *http.Request)) (*http.Client, func()) {
ts := httptest.NewTLSServer(http.HandlerFunc(handler))
tlsConf := &tls.Config{InsecureSkipVerify: true}
tr := &http.Transport{
TLSClientConfig: tlsConf,
DialTLS: func(netw, addr string) (net.Conn, error) {
return tls.Dial("tcp", ts.Listener.Addr().String(), tlsConf)
},
}
return &http.Client{Transport: tr}, func() {
tr.CloseIdleConnections()
ts.Close()
}
}
| {
"pile_set_name": "Github"
} |
/*
* Private peripheral timer/watchdog blocks for ARM 11MPCore and A9MP
*
* Copyright (c) 2006-2007 CodeSourcery.
* Copyright (c) 2011 Linaro Limited
* Written by Paul Brook, Peter Maydell
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include "qemu/osdep.h"
#include "hw/ptimer.h"
#include "hw/timer/arm_mptimer.h"
#include "qapi/error.h"
#include "qemu/main-loop.h"
#include "qom/cpu.h"
#define PTIMER_POLICY \
(PTIMER_POLICY_WRAP_AFTER_ONE_PERIOD | \
PTIMER_POLICY_CONTINUOUS_TRIGGER | \
PTIMER_POLICY_NO_IMMEDIATE_TRIGGER | \
PTIMER_POLICY_NO_IMMEDIATE_RELOAD | \
PTIMER_POLICY_NO_COUNTER_ROUND_DOWN)
/* This device implements the per-cpu private timer and watchdog block
* which is used in both the ARM11MPCore and Cortex-A9MP.
*/
static inline int get_current_cpu(ARMMPTimerState *s)
{
int cpu_id = current_cpu ? current_cpu->cpu_index : 0;
if (cpu_id >= s->num_cpu) {
hw_error("arm_mptimer: num-cpu %d but this cpu is %d!\n",
s->num_cpu, cpu_id);
}
return cpu_id;
}
static inline void timerblock_update_irq(TimerBlock *tb)
{
qemu_set_irq(tb->irq, tb->status && (tb->control & 4));
}
/* Return conversion factor from mpcore timer ticks to qemu timer ticks. */
static inline uint32_t timerblock_scale(uint32_t control)
{
return (((control >> 8) & 0xff) + 1) * 10;
}
static inline void timerblock_set_count(struct ptimer_state *timer,
uint32_t control, uint64_t *count)
{
/* PTimer would trigger interrupt for periodic timer when counter set
* to 0, MPtimer under certain condition only.
*/
if ((control & 3) == 3 && (control & 0xff00) == 0 && *count == 0) {
*count = ptimer_get_limit(timer);
}
ptimer_set_count(timer, *count);
}
static inline void timerblock_run(struct ptimer_state *timer,
uint32_t control, uint32_t load)
{
if ((control & 1) && ((control & 0xff00) || load != 0)) {
ptimer_run(timer, !(control & 2));
}
}
static void timerblock_tick(void *opaque)
{
TimerBlock *tb = (TimerBlock *)opaque;
/* Periodic timer with load = 0 and prescaler != 0 would re-trigger
* IRQ after one period, otherwise it either stops or wraps around.
*/
if ((tb->control & 2) && (tb->control & 0xff00) == 0 &&
ptimer_get_limit(tb->timer) == 0) {
ptimer_stop(tb->timer);
}
tb->status = 1;
timerblock_update_irq(tb);
}
static uint64_t timerblock_read(void *opaque, hwaddr addr,
unsigned size)
{
TimerBlock *tb = (TimerBlock *)opaque;
switch (addr) {
case 0: /* Load */
return ptimer_get_limit(tb->timer);
case 4: /* Counter. */
return ptimer_get_count(tb->timer);
case 8: /* Control. */
return tb->control;
case 12: /* Interrupt status. */
return tb->status;
default:
return 0;
}
}
static void timerblock_write(void *opaque, hwaddr addr,
uint64_t value, unsigned size)
{
TimerBlock *tb = (TimerBlock *)opaque;
uint32_t control = tb->control;
switch (addr) {
case 0: /* Load */
/* Setting load to 0 stops the timer without doing the tick if
* prescaler = 0.
*/
if ((control & 1) && (control & 0xff00) == 0 && value == 0) {
ptimer_stop(tb->timer);
}
ptimer_set_limit(tb->timer, value, 1);
timerblock_run(tb->timer, control, value);
break;
case 4: /* Counter. */
/* Setting counter to 0 stops the one-shot timer, or periodic with
* load = 0, without doing the tick if prescaler = 0.
*/
if ((control & 1) && (control & 0xff00) == 0 && value == 0 &&
(!(control & 2) || ptimer_get_limit(tb->timer) == 0)) {
ptimer_stop(tb->timer);
}
timerblock_set_count(tb->timer, control, &value);
timerblock_run(tb->timer, control, value);
break;
case 8: /* Control. */
if ((control & 3) != (value & 3)) {
ptimer_stop(tb->timer);
}
if ((control & 0xff00) != (value & 0xff00)) {
ptimer_set_period(tb->timer, timerblock_scale(value));
}
if (value & 1) {
uint64_t count = ptimer_get_count(tb->timer);
/* Re-load periodic timer counter if needed. */
if ((value & 2) && count == 0) {
timerblock_set_count(tb->timer, value, &count);
}
timerblock_run(tb->timer, value, count);
}
tb->control = value;
break;
case 12: /* Interrupt status. */
tb->status &= ~value;
timerblock_update_irq(tb);
break;
}
}
/* Wrapper functions to implement the "read timer/watchdog for
* the current CPU" memory regions.
*/
static uint64_t arm_thistimer_read(void *opaque, hwaddr addr,
unsigned size)
{
ARMMPTimerState *s = (ARMMPTimerState *)opaque;
int id = get_current_cpu(s);
return timerblock_read(&s->timerblock[id], addr, size);
}
static void arm_thistimer_write(void *opaque, hwaddr addr,
uint64_t value, unsigned size)
{
ARMMPTimerState *s = (ARMMPTimerState *)opaque;
int id = get_current_cpu(s);
timerblock_write(&s->timerblock[id], addr, value, size);
}
static const MemoryRegionOps arm_thistimer_ops = {
.read = arm_thistimer_read,
.write = arm_thistimer_write,
.valid = {
.min_access_size = 4,
.max_access_size = 4,
},
.endianness = DEVICE_NATIVE_ENDIAN,
};
static const MemoryRegionOps timerblock_ops = {
.read = timerblock_read,
.write = timerblock_write,
.valid = {
.min_access_size = 4,
.max_access_size = 4,
},
.endianness = DEVICE_NATIVE_ENDIAN,
};
static void timerblock_reset(TimerBlock *tb)
{
tb->control = 0;
tb->status = 0;
if (tb->timer) {
ptimer_stop(tb->timer);
ptimer_set_limit(tb->timer, 0, 1);
ptimer_set_period(tb->timer, timerblock_scale(0));
}
}
static void arm_mptimer_reset(DeviceState *dev)
{
ARMMPTimerState *s = ARM_MPTIMER(dev);
int i;
for (i = 0; i < ARRAY_SIZE(s->timerblock); i++) {
timerblock_reset(&s->timerblock[i]);
}
}
static void arm_mptimer_init(Object *obj)
{
ARMMPTimerState *s = ARM_MPTIMER(obj);
memory_region_init_io(&s->iomem, obj, &arm_thistimer_ops, s,
"arm_mptimer_timer", 0x20);
sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->iomem);
}
static void arm_mptimer_realize(DeviceState *dev, Error **errp)
{
SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
ARMMPTimerState *s = ARM_MPTIMER(dev);
int i;
if (s->num_cpu < 1 || s->num_cpu > ARM_MPTIMER_MAX_CPUS) {
error_setg(errp, "num-cpu must be between 1 and %d",
ARM_MPTIMER_MAX_CPUS);
return;
}
/* We implement one timer block per CPU, and expose multiple MMIO regions:
* * region 0 is "timer for this core"
* * region 1 is "timer for core 0"
* * region 2 is "timer for core 1"
* and so on.
* The outgoing interrupt lines are
* * timer for core 0
* * timer for core 1
* and so on.
*/
for (i = 0; i < s->num_cpu; i++) {
TimerBlock *tb = &s->timerblock[i];
QEMUBH *bh = qemu_bh_new(timerblock_tick, tb);
tb->timer = ptimer_init(bh, PTIMER_POLICY);
sysbus_init_irq(sbd, &tb->irq);
memory_region_init_io(&tb->iomem, OBJECT(s), &timerblock_ops, tb,
"arm_mptimer_timerblock", 0x20);
sysbus_init_mmio(sbd, &tb->iomem);
}
}
static const VMStateDescription vmstate_timerblock = {
.name = "arm_mptimer_timerblock",
.version_id = 3,
.minimum_version_id = 3,
.fields = (VMStateField[]) {
VMSTATE_UINT32(control, TimerBlock),
VMSTATE_UINT32(status, TimerBlock),
VMSTATE_PTIMER(timer, TimerBlock),
VMSTATE_END_OF_LIST()
}
};
static const VMStateDescription vmstate_arm_mptimer = {
.name = "arm_mptimer",
.version_id = 3,
.minimum_version_id = 3,
.fields = (VMStateField[]) {
VMSTATE_STRUCT_VARRAY_UINT32(timerblock, ARMMPTimerState, num_cpu,
3, vmstate_timerblock, TimerBlock),
VMSTATE_END_OF_LIST()
}
};
static Property arm_mptimer_properties[] = {
DEFINE_PROP_UINT32("num-cpu", ARMMPTimerState, num_cpu, 0),
DEFINE_PROP_END_OF_LIST()
};
static void arm_mptimer_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = arm_mptimer_realize;
dc->vmsd = &vmstate_arm_mptimer;
dc->reset = arm_mptimer_reset;
dc->props = arm_mptimer_properties;
}
static const TypeInfo arm_mptimer_info = {
.name = TYPE_ARM_MPTIMER,
.parent = TYPE_SYS_BUS_DEVICE,
.instance_size = sizeof(ARMMPTimerState),
.instance_init = arm_mptimer_init,
.class_init = arm_mptimer_class_init,
};
static void arm_mptimer_register_types(void)
{
type_register_static(&arm_mptimer_info);
}
type_init(arm_mptimer_register_types)
| {
"pile_set_name": "Github"
} |
<header>Anonym användare</header>
Om en root på en klient som inte finns på <a href=root_access>Root-tillgång</a>-listanförsöker komma åt filer på denna delade resurs, kommer denna root att ha samma rättigheter som den användare du angivit här.
<hr>
| {
"pile_set_name": "Github"
} |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.junit;
import static org.apache.activemq.command.ActiveMQDestination.QUEUE_TYPE;
import java.io.Serializable;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.Map;
import javax.jms.BytesMessage;
import javax.jms.Connection;
import javax.jms.JMSException;
import javax.jms.MapMessage;
import javax.jms.Message;
import javax.jms.MessageProducer;
import javax.jms.ObjectMessage;
import javax.jms.Session;
import javax.jms.StreamMessage;
import javax.jms.TextMessage;
import org.apache.activemq.ActiveMQConnectionFactory;
import org.apache.activemq.broker.BrokerFactory;
import org.apache.activemq.broker.BrokerPlugin;
import org.apache.activemq.broker.BrokerService;
import org.apache.activemq.broker.region.Destination;
import org.apache.activemq.broker.region.policy.PolicyEntry;
import org.apache.activemq.broker.region.policy.PolicyMap;
import org.apache.activemq.command.ActiveMQDestination;
import org.apache.activemq.plugin.StatisticsBrokerPlugin;
import org.apache.activemq.pool.PooledConnectionFactory;
import org.junit.rules.ExternalResource;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* A JUnit Rule that embeds an ActiveMQ broker into a test.
*/
public class EmbeddedActiveMQBroker extends ExternalResource {
Logger log = LoggerFactory.getLogger(this.getClass());
BrokerService brokerService;
InternalClient internalClient;
/**
* Create an embedded ActiveMQ broker using defaults
* <p>
* The defaults are:
* - the broker name is 'embedded-broker'
* - JMX is enable but no management connector is created.
* - Persistence is disabled
*/
public EmbeddedActiveMQBroker() {
brokerService = new BrokerService();
brokerService.setUseJmx(true);
brokerService.getManagementContext().setCreateConnector(false);
brokerService.setUseShutdownHook(false);
brokerService.setPersistent(false);
brokerService.setBrokerName("embedded-broker");
}
/**
* Create an embedded ActiveMQ broker using a configuration URI
*/
public EmbeddedActiveMQBroker(String configurationURI) {
try {
brokerService = BrokerFactory.createBroker(configurationURI);
} catch (Exception ex) {
throw new RuntimeException("Exception encountered creating embedded ActiveMQ broker from configuration URI: " + configurationURI, ex);
}
}
/**
* Create an embedded ActiveMQ broker using a configuration URI
*/
public EmbeddedActiveMQBroker(URI configurationURI) {
try {
brokerService = BrokerFactory.createBroker(configurationURI);
} catch (Exception ex) {
throw new RuntimeException("Exception encountered creating embedded ActiveMQ broker from configuration URI: " + configurationURI, ex);
}
}
public static void setMessageProperties(Message message, Map<String, Object> properties) {
if (properties != null && properties.size() > 0) {
for (Map.Entry<String, Object> property : properties.entrySet()) {
try {
message.setObjectProperty(property.getKey(), property.getValue());
} catch (JMSException jmsEx) {
throw new EmbeddedActiveMQBrokerException(String.format("Failed to set property {%s = %s}", property.getKey(), property.getValue().toString()), jmsEx);
}
}
}
}
/**
* Customize the configuration of the embedded ActiveMQ broker
* <p>
* This method is called before the embedded ActiveMQ broker is started, and can
* be overridden to this method to customize the broker configuration.
*/
protected void configure() {
}
/**
* Start the embedded ActiveMQ broker, blocking until the broker has successfully started.
* <p/>
* The broker will normally be started by JUnit using the before() method. This method allows the broker to
* be started manually to support advanced testing scenarios.
*/
public void start() {
try {
this.configure();
brokerService.start();
internalClient = new InternalClient();
internalClient.start();
} catch (Exception ex) {
throw new RuntimeException("Exception encountered starting embedded ActiveMQ broker: {}" + this.getBrokerName(), ex);
}
brokerService.waitUntilStarted();
}
/**
* Stop the embedded ActiveMQ broker, blocking until the broker has stopped.
* <p/>
* The broker will normally be stopped by JUnit using the after() method. This method allows the broker to
* be stopped manually to support advanced testing scenarios.
*/
public void stop() {
if (internalClient != null) {
internalClient.stop();
internalClient = null;
}
if (!brokerService.isStopped()) {
try {
brokerService.stop();
} catch (Exception ex) {
log.warn("Exception encountered stopping embedded ActiveMQ broker: {}" + this.getBrokerName(), ex);
}
}
brokerService.waitUntilStopped();
}
/**
* Start the embedded ActiveMQ Broker
* <p/>
* Invoked by JUnit to setup the resource
*/
@Override
protected void before() throws Throwable {
log.info("Starting embedded ActiveMQ broker: {}", this.getBrokerName());
this.start();
super.before();
}
/**
* Stop the embedded ActiveMQ Broker
* <p/>
* Invoked by JUnit to tear down the resource
*/
@Override
protected void after() {
log.info("Stopping Embedded ActiveMQ Broker: {}", this.getBrokerName());
super.after();
this.stop();
}
/**
* Create an ActiveMQConnectionFactory for the embedded ActiveMQ Broker
*
* @return a new ActiveMQConnectionFactory
*/
public ActiveMQConnectionFactory createConnectionFactory() {
ActiveMQConnectionFactory connectionFactory = new ActiveMQConnectionFactory();
connectionFactory.setBrokerURL(getVmURL());
return connectionFactory;
}
/**
* Create an PooledConnectionFactory for the embedded ActiveMQ Broker
*
* @return a new PooledConnectionFactory
*/
public PooledConnectionFactory createPooledConnectionFactory() {
ActiveMQConnectionFactory connectionFactory = createConnectionFactory();
PooledConnectionFactory pooledConnectionFactory = new PooledConnectionFactory(connectionFactory);
return pooledConnectionFactory;
}
/**
* Get the BrokerService for the embedded ActiveMQ broker.
* <p/>
* This may be required for advanced configuration of the BrokerService.
*
* @return the embedded ActiveMQ broker
*/
public BrokerService getBrokerService() {
return brokerService;
}
/**
* Get the failover VM URL for the embedded ActiveMQ Broker
* <p/>
* NOTE: The create=false option is appended to the URL to avoid the automatic creation of brokers
* and the resulting duplicate broker errors
*
* @return the VM URL for the embedded broker
*/
public String getVmURL() {
return getVmURL(true);
}
/**
* Get the VM URL for the embedded ActiveMQ Broker
* <p/>
* NOTE: The create=false option is appended to the URL to avoid the automatic creation of brokers
* and the resulting duplicate broker errors
*
* @param failoverURL if true a failover URL will be returned
* @return the VM URL for the embedded broker
*/
public String getVmURL(boolean failoverURL) {
if (failoverURL) {
return String.format("failover:(%s?create=false)", brokerService.getVmConnectorURI().toString());
}
return brokerService.getVmConnectorURI().toString() + "?create=false";
}
/**
* Get the failover VM URI for the embedded ActiveMQ Broker
* <p/>
* NOTE: The create=false option is appended to the URI to avoid the automatic creation of brokers
* and the resulting duplicate broker errors
*
* @return the VM URI for the embedded broker
*/
public URI getVmURI() {
return getVmURI(true);
}
/**
* Get the VM URI for the embedded ActiveMQ Broker
* <p/>
* NOTE: The create=false option is appended to the URI to avoid the automatic creation of brokers
* and the resulting duplicate broker errors
*
* @param failoverURI if true a failover URI will be returned
* @return the VM URI for the embedded broker
*/
public URI getVmURI(boolean failoverURI) {
URI result;
try {
result = new URI(getVmURL(failoverURI));
} catch (URISyntaxException uriEx) {
throw new RuntimeException("Unable to create failover URI", uriEx);
}
return result;
}
/**
* Get the name of the embedded ActiveMQ Broker
*
* @return name of the embedded broker
*/
public String getBrokerName() {
return brokerService.getBrokerName();
}
public void setBrokerName(String brokerName) {
brokerService.setBrokerName(brokerName);
}
public boolean isStatisticsPluginEnabled() {
BrokerPlugin[] plugins = brokerService.getPlugins();
if (null != plugins) {
for (BrokerPlugin plugin : plugins) {
if (plugin instanceof StatisticsBrokerPlugin) {
return true;
}
}
}
return false;
}
public void enableStatisticsPlugin() {
if (!isStatisticsPluginEnabled()) {
BrokerPlugin[] newPlugins;
BrokerPlugin[] currentPlugins = brokerService.getPlugins();
if (null != currentPlugins && 0 < currentPlugins.length) {
newPlugins = new BrokerPlugin[currentPlugins.length + 1];
System.arraycopy(currentPlugins, 0, newPlugins, 0, currentPlugins.length);
} else {
newPlugins = new BrokerPlugin[1];
}
newPlugins[newPlugins.length - 1] = new StatisticsBrokerPlugin();
brokerService.setPlugins(newPlugins);
}
}
public void disableStatisticsPlugin() {
if (isStatisticsPluginEnabled()) {
BrokerPlugin[] currentPlugins = brokerService.getPlugins();
if (1 < currentPlugins.length) {
BrokerPlugin[] newPlugins = new BrokerPlugin[currentPlugins.length - 1];
int i = 0;
for (BrokerPlugin plugin : currentPlugins) {
if (!(plugin instanceof StatisticsBrokerPlugin)) {
newPlugins[i++] = plugin;
}
}
brokerService.setPlugins(newPlugins);
} else {
brokerService.setPlugins(null);
}
}
}
public boolean isAdvisoryForDeliveryEnabled() {
return getDefaultPolicyEntry().isAdvisoryForDelivery();
}
public void enableAdvisoryForDelivery() {
getDefaultPolicyEntry().setAdvisoryForDelivery(true);
}
public void disableAdvisoryForDelivery() {
getDefaultPolicyEntry().setAdvisoryForDelivery(false);
}
public boolean isAdvisoryForConsumedEnabled() {
return getDefaultPolicyEntry().isAdvisoryForConsumed();
}
public void enableAdvisoryForConsumed() {
getDefaultPolicyEntry().setAdvisoryForConsumed(true);
}
public void disableAdvisoryForConsumed() {
getDefaultPolicyEntry().setAdvisoryForConsumed(false);
}
public boolean isAdvisoryForDiscardingMessagesEnabled() {
return getDefaultPolicyEntry().isAdvisoryForDiscardingMessages();
}
public void enableAdvisoryForDiscardingMessages() {
getDefaultPolicyEntry().setAdvisoryForDiscardingMessages(true);
}
public void disableAdvisoryForDiscardingMessages() {
getDefaultPolicyEntry().setAdvisoryForDiscardingMessages(false);
}
public boolean isAdvisoryForFastProducersEnabled() {
return getDefaultPolicyEntry().isAdvisoryForFastProducers();
}
public void enableAdvisoryForFastProducers() {
getDefaultPolicyEntry().setAdvisoryForFastProducers(true);
}
public void disableAdvisoryForFastProducers() {
getDefaultPolicyEntry().setAdvisoryForFastProducers(false);
}
public boolean isAdvisoryForSlowConsumersEnabled() {
return getDefaultPolicyEntry().isAdvisoryForSlowConsumers();
}
public void enableAdvisoryForSlowConsumers() {
getDefaultPolicyEntry().setAdvisoryForSlowConsumers(true);
}
public void disableAdvisoryForSlowConsumers() {
getDefaultPolicyEntry().setAdvisoryForSlowConsumers(false);
}
/**
* Get the number of messages in a specific JMS Destination.
* <p/>
* The full name of the JMS destination including the prefix should be provided - i.e. queue://myQueue
* or topic://myTopic. If the destination type prefix is not included in the destination name, a prefix
* of "queue://" is assumed.
*
* @param destinationName the full name of the JMS Destination
* @return the number of messages in the JMS Destination
*/
public long getMessageCount(String destinationName) {
if (null == brokerService) {
throw new IllegalStateException("BrokerService has not yet been created - was before() called?");
}
// TODO: Figure out how to do this for Topics
Destination destination = getDestination(destinationName);
if (destination == null) {
throw new RuntimeException("Failed to find destination: " + destinationName);
}
// return destination.getMessageStore().getMessageCount();
return destination.getDestinationStatistics().getMessages().getCount();
}
/**
* Get the ActiveMQ destination
* <p/>
* The full name of the JMS destination including the prefix should be provided - i.e. queue://myQueue
* or topic://myTopic. If the destination type prefix is not included in the destination name, a prefix
* of "queue://" is assumed.
*
* @param destinationName the full name of the JMS Destination
* @return the ActiveMQ destination, null if not found
*/
public Destination getDestination(String destinationName) {
if (null == brokerService) {
throw new IllegalStateException("BrokerService has not yet been created - was before() called?");
}
Destination destination = null;
try {
destination = brokerService.getDestination(ActiveMQDestination.createDestination(destinationName, QUEUE_TYPE));
} catch (RuntimeException runtimeEx) {
throw runtimeEx;
} catch (Exception ex) {
throw new EmbeddedActiveMQBrokerException("Unexpected exception getting destination from broker", ex);
}
return destination;
}
private PolicyEntry getDefaultPolicyEntry() {
PolicyMap destinationPolicy = brokerService.getDestinationPolicy();
if (null == destinationPolicy) {
destinationPolicy = new PolicyMap();
brokerService.setDestinationPolicy(destinationPolicy);
}
PolicyEntry defaultEntry = destinationPolicy.getDefaultEntry();
if (null == defaultEntry) {
defaultEntry = new PolicyEntry();
destinationPolicy.setDefaultEntry(defaultEntry);
}
return defaultEntry;
}
public BytesMessage createBytesMessage() {
return internalClient.createBytesMessage();
}
public TextMessage createTextMessage() {
return internalClient.createTextMessage();
}
public MapMessage createMapMessage() {
return internalClient.createMapMessage();
}
public ObjectMessage createObjectMessage() {
return internalClient.createObjectMessage();
}
public StreamMessage createStreamMessage() {
return internalClient.createStreamMessage();
}
public BytesMessage createMessage(byte[] body) {
return this.createMessage(body, null);
}
public TextMessage createMessage(String body) {
return this.createMessage(body, null);
}
public MapMessage createMessage(Map<String, Object> body) {
return this.createMessage(body, null);
}
public ObjectMessage createMessage(Serializable body) {
return this.createMessage(body, null);
}
public BytesMessage createMessage(byte[] body, Map<String, Object> properties) {
BytesMessage message = this.createBytesMessage();
if (body != null) {
try {
message.writeBytes(body);
} catch (JMSException jmsEx) {
throw new EmbeddedActiveMQBrokerException(String.format("Failed to set body {%s} on BytesMessage", new String(body)), jmsEx);
}
}
setMessageProperties(message, properties);
return message;
}
public TextMessage createMessage(String body, Map<String, Object> properties) {
TextMessage message = this.createTextMessage();
if (body != null) {
try {
message.setText(body);
} catch (JMSException jmsEx) {
throw new EmbeddedActiveMQBrokerException(String.format("Failed to set body {%s} on TextMessage", body), jmsEx);
}
}
setMessageProperties(message, properties);
return message;
}
public MapMessage createMessage(Map<String, Object> body, Map<String, Object> properties) {
MapMessage message = this.createMapMessage();
if (body != null) {
for (Map.Entry<String, Object> entry : body.entrySet()) {
try {
message.setObject(entry.getKey(), entry.getValue());
} catch (JMSException jmsEx) {
throw new EmbeddedActiveMQBrokerException(String.format("Failed to set body entry {%s = %s} on MapMessage", entry.getKey(), entry.getValue().toString()), jmsEx);
}
}
}
setMessageProperties(message, properties);
return message;
}
public ObjectMessage createMessage(Serializable body, Map<String, Object> properties) {
ObjectMessage message = this.createObjectMessage();
if (body != null) {
try {
message.setObject(body);
} catch (JMSException jmsEx) {
throw new EmbeddedActiveMQBrokerException(String.format("Failed to set body {%s} on ObjectMessage", body.toString()), jmsEx);
}
}
setMessageProperties(message, properties);
return message;
}
public void pushMessage(String destinationName, Message message) {
if (destinationName == null) {
throw new IllegalArgumentException("pushMessage failure - destination name is required");
} else if (message == null) {
throw new IllegalArgumentException("pushMessage failure - a Message is required");
}
ActiveMQDestination destination = ActiveMQDestination.createDestination(destinationName, ActiveMQDestination.QUEUE_TYPE);
internalClient.pushMessage(destination, message);
}
public BytesMessage pushMessage(String destinationName, byte[] body) {
BytesMessage message = createMessage(body, null);
pushMessage(destinationName, message);
return message;
}
public TextMessage pushMessage(String destinationName, String body) {
TextMessage message = createMessage(body, null);
pushMessage(destinationName, message);
return message;
}
public MapMessage pushMessage(String destinationName, Map<String, Object> body) {
MapMessage message = createMessage(body, null);
pushMessage(destinationName, message);
return message;
}
public ObjectMessage pushMessage(String destinationName, Serializable body) {
ObjectMessage message = createMessage(body, null);
pushMessage(destinationName, message);
return message;
}
public BytesMessage pushMessageWithProperties(String destinationName, byte[] body, Map<String, Object> properties) {
BytesMessage message = createMessage(body, properties);
pushMessage(destinationName, message);
return message;
}
public TextMessage pushMessageWithProperties(String destinationName, String body, Map<String, Object> properties) {
TextMessage message = createMessage(body, properties);
pushMessage(destinationName, message);
return message;
}
public MapMessage pushMessageWithProperties(String destinationName, Map<String, Object> body, Map<String, Object> properties) {
MapMessage message = createMessage(body, properties);
pushMessage(destinationName, message);
return message;
}
public ObjectMessage pushMessageWithProperties(String destinationName, Serializable body, Map<String, Object> properties) {
ObjectMessage message = createMessage(body, properties);
pushMessage(destinationName, message);
return message;
}
public Message peekMessage(String destinationName) {
if (null == brokerService) {
throw new NullPointerException("peekMessage failure - BrokerService is null");
}
if (destinationName == null) {
throw new IllegalArgumentException("peekMessage failure - destination name is required");
}
ActiveMQDestination destination = ActiveMQDestination.createDestination(destinationName, ActiveMQDestination.QUEUE_TYPE);
Destination brokerDestination = null;
try {
brokerDestination = brokerService.getDestination(destination);
} catch (Exception ex) {
throw new EmbeddedActiveMQBrokerException("peekMessage failure - unexpected exception getting destination from BrokerService", ex);
}
if (brokerDestination == null) {
throw new IllegalStateException(String.format("peekMessage failure - destination %s not found in broker %s", destination.toString(), brokerService.getBrokerName()));
}
org.apache.activemq.command.Message[] messages = brokerDestination.browse();
if (messages != null && messages.length > 0) {
return (Message) messages[0];
}
return null;
}
public BytesMessage peekBytesMessage(String destinationName) {
return (BytesMessage) peekMessage(destinationName);
}
public TextMessage peekTextMessage(String destinationName) {
return (TextMessage) peekMessage(destinationName);
}
public MapMessage peekMapMessage(String destinationName) {
return (MapMessage) peekMessage(destinationName);
}
public ObjectMessage peekObjectMessage(String destinationName) {
return (ObjectMessage) peekMessage(destinationName);
}
public StreamMessage peekStreamMessage(String destinationName) {
return (StreamMessage) peekMessage(destinationName);
}
public static class EmbeddedActiveMQBrokerException extends RuntimeException {
public EmbeddedActiveMQBrokerException(String message) {
super(message);
}
public EmbeddedActiveMQBrokerException(String message, Exception cause) {
super(message, cause);
}
}
private class InternalClient {
ActiveMQConnectionFactory connectionFactory;
Connection connection;
Session session;
MessageProducer producer;
public InternalClient() {
}
void start() {
connectionFactory = createConnectionFactory();
try {
connection = connectionFactory.createConnection();
session = connection.createSession(false, Session.AUTO_ACKNOWLEDGE);
producer = session.createProducer(null);
connection.start();
} catch (JMSException jmsEx) {
throw new EmbeddedActiveMQBrokerException("Internal Client creation failure", jmsEx);
}
}
void stop() {
if (null != connection) {
try {
connection.close();
} catch (JMSException jmsEx) {
log.warn("JMSException encounter closing InternalClient connection - ignoring", jmsEx);
}
}
}
public BytesMessage createBytesMessage() {
checkSession();
try {
return session.createBytesMessage();
} catch (JMSException jmsEx) {
throw new EmbeddedActiveMQBrokerException("Failed to create BytesMessage", jmsEx);
}
}
public TextMessage createTextMessage() {
checkSession();
try {
return session.createTextMessage();
} catch (JMSException jmsEx) {
throw new EmbeddedActiveMQBrokerException("Failed to create TextMessage", jmsEx);
}
}
public MapMessage createMapMessage() {
checkSession();
try {
return session.createMapMessage();
} catch (JMSException jmsEx) {
throw new EmbeddedActiveMQBrokerException("Failed to create MapMessage", jmsEx);
}
}
public ObjectMessage createObjectMessage() {
checkSession();
try {
return session.createObjectMessage();
} catch (JMSException jmsEx) {
throw new EmbeddedActiveMQBrokerException("Failed to create ObjectMessage", jmsEx);
}
}
public StreamMessage createStreamMessage() {
checkSession();
try {
return session.createStreamMessage();
} catch (JMSException jmsEx) {
throw new EmbeddedActiveMQBrokerException("Failed to create StreamMessage", jmsEx);
}
}
public void pushMessage(ActiveMQDestination destination, Message message) {
if (producer == null) {
throw new IllegalStateException("JMS MessageProducer is null - has the InternalClient been started?");
}
try {
producer.send(destination, message);
} catch (JMSException jmsEx) {
throw new EmbeddedActiveMQBrokerException(String.format("Failed to push %s to %s", message.getClass().getSimpleName(), destination.toString()), jmsEx);
}
}
void checkSession() {
if (session == null) {
throw new IllegalStateException("JMS Session is null - has the InternalClient been started?");
}
}
}
}
| {
"pile_set_name": "Github"
} |
// Copyright 2016 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
using System;
using UnityEngine;
[System.Obsolete("Replaced by GvrControllerInput.")]
[AddComponentMenu("")]
public class GvrController : GvrControllerInput {
public new static GvrConnectionState State {
get {
return GvrControllerInput.State;
}
}
public new static GvrControllerApiStatus ApiStatus {
get {
return GvrControllerInput.ApiStatus;
}
}
public new static Quaternion Orientation {
get {
return GvrControllerInput.Orientation;
}
}
public new static Vector3 Gyro {
get {
return GvrControllerInput.Gyro;
}
}
public new static Vector3 Accel {
get {
return GvrControllerInput.Accel;
}
}
public new static bool IsTouching {
get {
return GvrControllerInput.IsTouching;
}
}
public new static bool TouchDown {
get {
return GvrControllerInput.TouchDown;
}
}
public new static bool TouchUp {
get {
return GvrControllerInput.TouchUp;
}
}
public new static Vector2 TouchPos {
get {
return GvrControllerInput.TouchPos;
}
}
public new static bool Recentered {
get {
return GvrControllerInput.Recentered;
}
}
public new static bool ClickButton {
get {
return GvrControllerInput.ClickButton;
}
}
public new static bool ClickButtonDown {
get {
return GvrControllerInput.ClickButtonDown;
}
}
public new static bool ClickButtonUp {
get {
return GvrControllerInput.ClickButtonUp;
}
}
public new static bool AppButton {
get {
return GvrControllerInput.AppButton;
}
}
public new static bool AppButtonDown {
get {
return GvrControllerInput.AppButtonDown;
}
}
public new static bool AppButtonUp {
get {
return GvrControllerInput.AppButtonUp;
}
}
public new static bool HomeButtonDown {
get {
return GvrControllerInput.HomeButtonDown;
}
}
public new static bool HomeButtonState {
get {
return GvrControllerInput.HomeButtonState;
}
}
public new static string ErrorDetails {
get {
return GvrControllerInput.ErrorDetails;
}
}
// Returns the GVR C library controller state pointer (gvr_controller_state*).
public new static IntPtr StatePtr {
get {
return GvrControllerInput.StatePtr;
}
}
public new static bool IsCharging {
get {
return GvrControllerInput.IsCharging;
}
}
public new static GvrControllerBatteryLevel BatteryLevel {
get {
return GvrControllerInput.BatteryLevel;
}
}
}
| {
"pile_set_name": "Github"
} |
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
require("rxjs-compat/add/operator/sample");
//# sourceMappingURL=sample.js.map | {
"pile_set_name": "Github"
} |
<?php
/**
* FecShop file.
*
* @link http://www.fecshop.com/
* @copyright Copyright (c) 2016 FecShop Software LLC
* @license http://www.fecshop.com/license/
*/
return [
'fecshop' => 'zh_CN app fecshop',
// 页面底部条款部分
// 'Follow Us' => '关注我们',
// 'Follow us in social media' => '在社交平台关注我们',
// 'Sign up for newsletter' => '订阅邮件',
// 'General Links' => '文字条款',
'Enter your email adress' =>'输入您的邮箱',
'Contact Us' => '联系我们',
'Shopping Cart' => '购物车',
'Return Policy' => '退款条约',
'Privacy Policy' => '隐私条约',
'About Us' => '关于我们',
'My Favorite' => '我的收藏',
'My Reviews' => '我的评论',
'My Order' => '我的订单',
'My Account' => '我的账户',
'Site Map' => '网站地图',
'Wx Micro Program' => '微信小程序',
'Feature Product' => '人气推荐',
// 'Follow' => '关注我们',
// 'Newsletter' => '订阅邮件',
// 'All rights reserved' => '保留所有权利',
//'Copyright Notice' => '版权声明',
'Captcha can not empty' => '验证码不能为空',
// 首页
'Language' => '语言',
'Currency' => '货币',
// 顶部页面部分
'Welcome!' => '欢迎您!',
'Logout' => '退出',
'Sign In / Join Free' => '登录账户',
'My Orders' => '我的订单',
'My Favorites' => '我的收藏',
'My Review' => '我的评论',
'Products keyword' => '搜索产品',
// 自定义菜单翻译
'custom menu' => '自定义菜单',
'my custom menu 2' => '我的自定义菜单2',
'my custom menu 3' => '我的自定义菜单3',
'Home' => '首页',
// 首页的产品列表块
'best seller' => '热销产品',
'featured products' => '特色产品',
'more' => '更多',
// 分类页面翻译
'Sort By' => '排序',
'Sort' => '排序',
'Filter' => '过滤',
'Hot' => '销量',
'Review' => '评论',
'Favorite' => '收藏',
'New' => '上架时间',
'$ Low to High' => '¥ 价格由低到高',
'$ High to Low' => '¥ 价格由高到低',
'Refine By' => '过滤选项',
'clear all' => '清空所有过滤',
// 分类侧栏属性过滤
'style' => '风格',
'dresses-length' => '裙长',
'Sexy & Club' => '性感&俱乐部',
// 用户登录页面
'Login or Create an Account' => '登录 创建用户',
'New Customers' => '新用户',
'By creating an account with our store, you will be able to move through the checkout process faster, store multiple shipping addresses, view and track your orders in your account and more.' => '在我们的店铺里面注册账户,您可以快速的下单,保存您的货运地址,查看或追踪您的订单信息,等等',
'Register' => '注册',
'Login' => '登录',
'E-mail' => '邮箱地址',
'Registered Customers' => '已注册用户',
'If you have an account with us, please log in.' => '如果您已经注册了一个用户,请直接登录',
'Email Address' => '邮箱地址',
'Password' => '密码',
'Captcha' => '验证码',
'click refresh' => '点击刷新',
'Sign In' => '登录',
'Forgot Your Password?' => '忘记密码?',
'user password is not correct' => '用户的账号密码不正确',
// newsletter
'ERROR,Your email address has subscribe , Please do not repeat the subscription' => '您的电子邮件地址已订阅,请不要重复订阅',
'newsletter email address is empty' => '邮件订阅邮箱地址为空',
'The email address format is incorrect!' => '电子邮件地址格式不正确!',
'Your subscribed email was successful, You can {urlB} click Here to Home Page {urlE}, Thank You.' => '您的订阅电子邮件已成功,您可以{urlB}点击此处访问主页{urlE},谢谢。',
// 用户注册页面
'Create an Account' => '创建新账户',
'Personal Information' => '个人账户信息',
'First Name' => '名',
'Last Name' => '姓',
'Sign Up for Newsletter' => '订阅邮件',
'Login Information' => '登录信息',
'Confirm Password' => '确认密码',
'Captcha' => '验证码',
'Submit' => '提交',
'Back' => '返回',
'This is a required field.' => '这是一个必填选项',
'Please enter a valid email address. For example [email protected].' => '请填写一个正确的邮箱,譬如:[email protected]。',
'first name length must between' => '名字的长度范围:',
'last name length must between'=> '姓的长度范围',
'Please enter 6 or more characters. Leading or trailing spaces will be ignored.' => '请输入6个或更多字符。 前导或尾随空格将被忽略。',
'Please make sure your passwords match. ' => '请确保您的密码和确认密码一致。',
'Captcha is not right' => '验证码不正确',
'Email is not a valid email address.' => '您输入的邮箱格式格式不正确',
// 用户中心页面
'My Dashboard' => '我的信息中心',
'Hello' => '你好',
'From your My Account Dashboard you have the ability to view a snapshot of your recent account activity and update your account information. Select a link below to view or edit information.' => '您可以在我的帐户信息中心查看最近帐户活动的快照,并更新您的帐户信息。 选择以下链接可查看或编辑信息。',
'Contact Information' => '联系信息',
'Edit' => '编辑',
'My Address Book' => '我的货运地址',
'You Can Manager Your Address' => '您可以编辑您的货运地址',
'Manager Addresses' => '编辑货运地址',
'My Order' => '我的订单',
'You Can View Your Order' => '您可以查看您的订单信息',
'View' => '查看',
'Account Dashboard' => '帐户中心',
'Account Information' => '账户信息',
'Address Book' => '货运地址',
'My Product Reviews' => '我的产品评论',
// 账户中心密码找回
'Forgot Password' => '密码找回',
'Your Email Address' => '您的邮箱地址',
'Confirm your identity to reset password' => '确认您的身份',
'Send Authorization Code' => '发送授权码',
'Reset Password' => '重置密码',
'Reset Password Success' => '重置密码成功',
'Confirm your identity to reset password ,If you still can\'t find it, click {logUrlB} support center {logUrlE} for help' => '确认您的身份以重置密码,如果您仍旧没有发现邮件,点击{logUrlB}帮助中心{logUrlE}寻求帮助',
'email is not exist' => '该邮箱不存在',
'We\'ve sent a message to the email address' => '我们已经发送了一封邮件给这个邮箱地址',
'Please follow the instructions provided in the message to reset your password.' => '请按照消息中提供的说明重置密码。',
'Didn\'t receive the mail from us?' => '没有收到我们的邮件?',
'Check your bulk or junk email folder.' => '检查您的批量或垃圾电子邮件文件夹。',
'click here to retry' => '点击这里重试',
'click here' => '点击这里',
'Email address do not exist, please {logUrlB} click here {logUrlE} to re-enter!' => '邮件地址不存在,请{logUrlB}点击这里{logUrlE}重新进入',
'Select your new password' => '请填写您的新密码',
'Reset you account success, you can {logUrlB} click here {logUrlE} to login .' => '重置密码成功,您可以{logUrlB}点击这里{logUrlE}进行登录',
'Your Reset Password Token is Expired, You can {logUrlB} click here {logUrlE} to retrieve it ' => '您重置密码的token已经过期,您可以{logUrlB}点击这里{logUrlE}重新发送',
// 用户中心 - 账户信息 - 编辑
'Edit Account' => '编辑账号',
'Change Password' => '更改密码',
'Current Password' => '当前密码',
'New Password' => '新密码',
'Confirm New Password' => '确认新密码',
'edit account info success' => '编辑账号信息成功',
// 用户中心 - 货运地址 - 编辑
'Country' => '国家',
'State' => '省',
'City' => '城市',
'street1' => '街道1',
'street2' => '街道2',
'Zip Code' => '邮政编码',
'Is Default' => '默认',
'Save Address' => '保存地址',
'You Must Fill All Field' => '您必须需填写所有的字段',
'Please select region, state or province' => '请选择地区,州或省',
'Edit Address' => '编辑收货地址',
// 用户中心 - 货运地址 - 列表
'Customer Address' => '客户地址',
'Address' => '地址',
'Operation' => '操作',
'Delete' => '删除',
'Default' => '默认',
'Add New Address' => '增加新地址',
'Modify' => '修改',
// 用户中心 - 我的订单 - 列表
'Page:' => '页数:',
'Reorder' => '重新下单',
'View Order' => '查看订单',
'Customer Order' => '我的订单',
'Order #' => '订单编号',
'Date' => '日期',
'Ship To' => '收货人',
'Order Total' => '订单总额',
'Order Status' => '订单状态',
// 用户中心 - 我的订单 - 详细
'Order#' => '订单编号',
'Order Status' => '订单状态',
'Order Date' => '订单日期',
'pending' => '未付款',
'pending' => '未付款',
'pending' => '未付款',
'suspected_fraud' => '涉嫌欺诈',
'processing' => '已付款',
'Shipping Address' => '货运地址',
'T:' => '手机/电话:',
'Payment Method' => '支付方式',
'Shipping Method' => '货运方式',
'Items Ordered' => '订单产品列表',
'Product Name' => '产品名称',
'Product Image' => '产品图片',
'Sku' => '产品编码',
'Price' => '价格',
'Qty' => '个数',
'Subtotal' => '小计',
'Shipping Cost' => '运费',
'Discount' => '折扣优惠',
'Grand Total' => '总额',
'Product Info' => '产品信息',
// 用户中心 - 我的产品评论
'Your Review is accept.' => '您的评论已通过',
'Your Review is refused.' => '您的评论已被拒绝',
'Your Review is awaiting moderation...' => '您的评论在等待审核',
// 用户中心 - 我的收藏
'Favorite Date:' => '收藏时间:',
// contacts页面
'Name' => '名字',
'Telephone' => '电话',
'Comment' => '留言内容',
'Contacts' => '联系我们',
'Contact us Send Success' => '联系我们发送成功',
// 产品详细页面 - 颜色尺码
'My Color:' => '我的颜色:',
'My Size:' => '我的尺码:',
'My Size2:' => '我的尺码2:',
'My Size3:' => '我的尺码3:',
'Color:' => '颜色:',
'color:' => '颜色:',
'color' => '颜色',
'size' => '尺码',
'Size:' => '尺码:',
//'color' => '颜色',
//'size' => '尺码',
'one-color' => '单色',
'red' => '红色',
'white' => '白色',
'black' => '黑色',
'blue' => '蓝色',
'green' => '绿色',
'yellow' => '黄色',
'gray' => '灰色',
'khaki' => '黄褐色',
'ivory' => '象牙色',
'beige' => '米色',
'orange' => '橙色',
'cyan' => '青色',
'leopard' => '豹纹',
'camouflage' => '伪装色',
'silver' => '银白色',
'pink' => '粉红色',
'purple' => '紫色',
'brown' => '棕色',
'golden' => '金色',
'multicolor' => '彩色',
'white & blue' => '白 & 蓝',
'white & black' => '白 & 黑',
// 产品详细页面 - 产品描述部分
'price' => '产品价格',
'You already favorite this product' => '您已经收藏该产品',
'Average rating' => '平均评分',
'reviews' => '评论',
'Item Code:' => '产品编号:',
'Qty:' => '个数:',
'Add To Cart' => '加入购物车',
'Add to Favorites' => '收藏该产品',
'Description' => '产品描述',
'Reviews' => '评论',
'Shipping & Payment' => '货运 & 支付',
'Customers Who Bought This Item Also Bought' => '买个这个产品的用户还买的产品',
'weight' => '重量',
'long' => '长',
'width' => '宽',
'high' => '高',
'volume weight' => '体积重',
// 产品详细页面 - 支付描述部分
'Payment Methods:' => '支付方式:',
'FECSHOP.com accepts PayPal, Credit Card, Western Union and Wire Transfer as secure payment methods:' => 'FECSHOP.com接受 贝宝,信用卡,西联汇款和电汇作为安全支付方式:',
'Global:' => '全部:',
'1. PayPal' => '1. 贝宝',
'1) Login To Your Account or use Credit Card Express.' => '1)登录到您的帐户或使用信用卡。',
'2) Enter your Card Details, the order will be shipped to your PayPal address. And click "Submit".' => '2)输入您的信用卡详情,订单将发送到您的贝宝地址。 然后点击“提交”。',
'3) Your Payment will be processed and a receipt will be sent to your email inbox.' => '3)您的付款将被处理,并且收据将发送到您的电子邮件收件箱。',
'1) Choose your shipping address OR create a new one.' => '1)选择您的送货地址或创建一个新的。',
'2) Enter your Card Details and click "Submit".' => '2)输入您的信用卡详细信息,然后单击“提交”。',
'3) Your Payment will be processed and a receipt will be sent to your email inbox.' => '3)您的付款将被处理,并且收据将发送到您的电子邮件收件箱。',
'2. Credit Card' => '2. 信用卡',
// 产品评论部分
'Rate' => '评分',
'Your Name' => '您的名字',
'Review content can not empty' => '评论内容不能为空',
'Summary of Your Review' => '您的产品评论概要',
'Your review content' => '您的产品评论内容',
'Summary can not empty' => '评论概要不能为空',
'Average rating :' => '平均评分',
'Product Review' => '产品评论',
'By' => '由',
'Your comment is awaiting moderation' => '您的评论正在等待审核',
'Add Review' => '添加评论',
'View All Review' => '查看所有评论',
'Wholesale Prices :' => '批发价格:',
'Price:' => '价格:',
'Summary' => '概要',
// 产品搜索页面
'Show Per Page:' => '每页个数',
'Search results for \'{searchText}\' returns no results' => '搜索词 \'{searchText}\',找不到搜索结果',
// 购物车
'Your Shopping Cart is empty' => '您的购物车为空',
'Start shopping now!' => '现在开始购物',
'Please {urlB}log in{urlE} to view the products you have previously added to your Shopping Cart.' => '请{urlB}登录{urlE}您的账户来查看你之前加入购物车的产品',
'You have no items in your favorite.' => '您没有收藏过产品',
'You have submitted no reviews'=> '您没有提交过评论',
'Unit Price' => '单价',
'Discount Codes' => '优惠券',
'Enter your coupon code if you have one.' => '如果您有优惠券,请在下面填写',
'Cancel Coupon' => '取消优惠券',
'Add Coupon' => '添加优惠券',
'Dashboard' => '我的信息中心',
'Dashboard' => '我的信息中心',
'OR' => '或',
'Proceed to Pay' => '继续支付',
'Add Coupon' => '添加优惠券',
// 购物车优惠券
'Coupon is not available or has expired' => '优惠券不可用或已过期',
'add coupon fail' => '添加优惠券失败',
'The coupon can not be used if the product amount in the shopping cart is less than {conditions} dollars' => '如果您的购物车金额小于{conditions}美元,该优惠券是不可用的',
// 下单页面
'Checkout' => '支付',
'Welcome to the checkout,Fill in the fields below to complete your purchase' => '欢迎结帐,填写以下字段以完成购买',
'Already registered? Click here to login' => '已经注册? 点击此处登录',
'Coupon codes (optional)' => '优惠券代码(可选)',
'Place order now' => '支付订单',
'Please wait, processing your order...' => '正在处理您的订单,请稍候...',
'Create an account for later use' => '创建帐户以供以后使用',
'Billing address' => '帐单地址',
'Street' => '街道',
'New Address' => '新地址',
'Check / Money Order' => '现金支付',
'product: [ {product_name} ] is stock out' => '产品:[ {product_name} ] 没有足够的库存',
'Off-line Money Payments' => '线下现金支付',
'PayPal Website Payments Standard' => '贝宝标准支付',
'You will be redirected to the PayPal website when you place an order. ' => '当您下订单时,您将被重定向到PayPal网站。',
'Free shipping( 7-20 work days)' => '免邮费( 7-20 工作日)',
'Fast Shipping( 5-10 work days)' => '快速邮寄( 5-10 工作日)',
'Review your order' => '订单详细',
'you must login your account before you use coupon' => '您必须在使用优惠券之前登录您',
'email address is empty, you must Fill in email' => '电子邮件地址为空,您必须填写电子邮件',
'email address format is incorrect' => '电子邮件地址格式不正确',
'This email is registered , you must fill in another email' => '此电子邮件已注册,您必须填写另一封电子邮件',
'Password length must be greater than or equal to {passwdMinLength}' => '密码长度必须大于或等于{passwdMinLength}',
'Password length must be less than or equal to {passwdMaxLength}' => '密码长度必须小于或等于{passwdMaxLength}',
'The passwords are inconsistent' => '密码不一致',
'All Category' => '全部分类',
'Product Name: {product_name}, The product is not in stock' => '产品名称: {product_name}, 产品没有库存了',
'Follow' => '关注我们',
'Brand' => '品牌',
'this email is exist!' => '该邮箱地址已经被注册',
// paypal 跳转中间页面
'Click here if you are not redirected within 10 seconds ...' => '如果您在10秒内未重定向,请点击此处...',
'You will be redirected to the PayPal website in a few seconds...' => '您将在几秒钟后重定向到PayPal网站...',
// 订单支付成功页面
'Continue Shopping' => '继续购物',
'Your order has been received,Thank you for your purchase!' => '您的订单已收到,感谢您的购买!',
'Your order # is:' => '您的订单#是:',
'You will receive an order confirmation email with details of your order and a link to track its progress.' => '您将收到一封订单确认电子邮件,其中包含您订单的详细信息以及用于跟踪其进度的链接。',
// 404 页面
'We Couldn’t Find this Page' => '我们无法找到这个页面',
'Please contact us if you think this is a server error, Thank you.' => '如果您认为这是服务器错误,请与我们联系,谢谢。',
'Bring me back Home' => '带我去首页',
];
| {
"pile_set_name": "Github"
} |
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
<html lang="en">
<head>
<meta content="text/html; charset=iso-8859-1" http-equiv="content-type">
<title>Rename file</title>
<link rel="stylesheet" type="text/css" href="Help.css">
</head>
<body>
<h1>Rename file</h1>
<p>This tool will rename a user-specified input raster or vector data set,
including all associated files.</p>
<h2 class="SeeAlso">See Also:</h2>
<ul>
<li>None</li>
</ul>
<h2 class="SeeAlso">Scripting:</h2>
<p>This is an example of a Python script using this tool:</p>
<p style="background-color: rgb(240,240,240);">
<code>
wd = pluginHost.getWorkingDirectory() <br>
input_file = wd + "input.dep" <br>
output_file = wd + "output.dep" <br>
args = [input_file, output_file] <br>
pluginHost.runPlugin("RenameFile", args, False) <br>
</code>
</p>
<p>And the following is a Groovy script also using this tool:</p>
<p style="background-color: rgb(240,240,240);">
<code>
def wd = pluginHost.getWorkingDirectory() <br>
def inputFile = wd + "input.dep" <br>
def outputFile = wd + "output.dep" <br>
String[] args = [inputFile, outputFile] <br>
pluginHost.runPlugin("RenameFile", args, false) <br>
</code>
</p>
<h2 class="SeeAlso">Credits:</h2>
<ul>
<li>John Lindsay ([email protected], 2013)</li>
</ul>
</body>
</html>
| {
"pile_set_name": "Github"
} |
<?
IncludeModuleLangFile(__FILE__);
/*
RegisterModuleDependences('intranet', 'OnPlannerInit', 'calendar', 'CCalendarEventHandlers', 'OnPlannerInit');
RegisterModuleDependences('intranet', 'OnPlannerAction', 'calendar', 'CCalendarEventHandlers', 'OnPlannerAction');
*/
class CCalendarEventHandlers
{
public static function OnPlannerInit($params)
{
global $USER, $DB, $CACHE_MANAGER;
if (!isset($params['USER_ID']) || intval($params['USER_ID']) <= 0)
{
if (!is_object($USER))
{
return false;
}
$userId = $USER->GetID();
}
else
{
$userId = $params['USER_ID'];
}
$CACHE_MANAGER->RegisterTag('calendar_user_'.$userId);
$date_from = CCalendar::Date(time() - date('Z', time()) + CCalendar::GetCurrentOffsetUTC($userId), false);
$ts_date_from = CCalendar::Timestamp($date_from) - CCalendar::GetCurrentOffsetUTC($userId);
$date_from = CCalendar::Date($ts_date_from);
$ts_date_to = $ts_date_from + CCalendar::GetDayLen() - 1;
$date_to = $date_from;
$arEvents = array();
$eventTime = -1;
$arNewEvents = CCalendarEvent::GetList(array(
'arFilter' => array(
"OWNER_ID" => $userId,
"FROM_LIMIT" => $date_from,
"TO_LIMIT" => $date_to
),
'arOrder' => Array('DATE_FROM_TS_UTC' => 'asc'),
'parseRecursion' => true,
'preciseLimits' => true,
'userId' => $userId,
'skipDeclined' => true,
'fetchAttendees' => false,
'fetchMeetings' => true
));
if (count($arNewEvents) > 0)
{
$now = time() + CTimeZone::GetOffset($userId);
$today = ConvertTimeStamp($now, 'SHORT');
$format = $DB->dateFormatToPHP(IsAmPmMode() ? 'H:MI T' : 'HH:MI');
foreach ($arNewEvents as $arEvent)
{
if ($arEvent['IS_MEETING'] && $arEvent['MEETING_STATUS'] == 'N')
continue;
$fromTo = CCalendarEvent::GetEventFromToForUser($arEvent, $userId);
$ts_from = $fromTo['TS_FROM'];
//$ts_to = $fromTo['TS_TO'];
$ts_from_utc = $arEvent['DATE_FROM_TS_UTC'];
$ts_to_utc = $arEvent['DATE_TO_TS_UTC'];
if ($arEvent['RRULE'])
{
$ts_from_utc = $fromTo['TS_FROM'] - CCalendar::GetCurrentOffsetUTC($userId);
$ts_to_utc = $ts_from_utc + $arEvent['DT_LENGTH'];
}
if ($arEvent['RRULE'] && ($ts_to_utc <= $ts_date_from || $ts_from_utc >= $ts_date_to))
continue;
if(($eventTime < 0 || $eventTime > $ts_from) && $ts_from >= $now)
$eventTime = $ts_from;
if($params['FULL'])
{
$arEvents[] = array(
'ID' => $arEvent['ID'],
'OWNER_ID' => $userId,
'CREATED_BY' => $arEvent['CREATED_BY'],
'NAME' => $arEvent['NAME'],
'DATE_FROM' => $fromTo['DATE_FROM'],
'DATE_TO' => $fromTo['DATE_TO'],
'TIME_FROM' => FormatDate($format, $fromTo['TS_FROM']),
'TIME_TO' => FormatDate($format, $fromTo['TS_TO']),
'IMPORTANCE' => $arEvent['IMPORTANCE'],
'ACCESSIBILITY' => $arEvent['ACCESSIBILITY'],
'DATE_FROM_TODAY' => $today == ConvertTimeStamp($fromTo['TS_FROM'], 'SHORT'),
'DATE_TO_TODAY' => $today == ConvertTimeStamp($fromTo['TS_TO'], 'SHORT'),
'SORT' => $fromTo['TS_FROM']
);
}
}
}
// Sort
usort($arEvents, array('CCalendarEventHandlers', 'DateSort'));
CJSCore::RegisterExt('calendar_planner_handler', array(
'js' => '/bitrix/js/calendar/core_planner_handler.js',
'css' => '/bitrix/js/calendar/core_planner_handler.css',
'lang' => BX_ROOT.'/modules/calendar/lang/'.LANGUAGE_ID.'/core_planner_handler.php',
'rel' => array('date', 'timer')
));
return array(
'DATA' => array(
'CALENDAR_ENABLED' => true,
'EVENTS' => $arEvents,
'EVENT_TIME' => $eventTime < 0 ? '' : (FormatDate(IsAmPmMode() ? "g:i a" : "H:i", $eventTime)),
),
'SCRIPTS' => array('calendar_planner_handler')
);
}
public static function OnPlannerAction($action, $params)
{
switch($action)
{
case 'calendar_add':
return self::plannerActionAdd(array(
'NAME' => $_REQUEST['name'],
'FROM' => $_REQUEST['from'],
'TO' => $_REQUEST['to'],
'ABSENCE' => $_REQUEST['absence']
));
break;
case 'calendar_show':
return self::plannerActionShow(array(
'ID' => intval($_REQUEST['id']),
'SITE_ID' => $params['SITE_ID']
));
break;
}
}
protected static function getEvent($arParams)
{
global $USER;
$userId = $USER->GetID();
$date_from = CCalendar::Date(time() - date('Z', time()) + CCalendar::GetCurrentOffsetUTC(), false);
$ts_date_from = CCalendar::Timestamp($date_from) - CCalendar::GetCurrentOffsetUTC();
$date_from = CCalendar::Date($ts_date_from);
$date_to = $date_from;
$ts_date_to = $ts_date_from + CCalendar::GetDayLen() - 1;
$res = CCalendarEvent::GetList(
array(
'arFilter' => array(
"ID" => $arParams['ID'],
"FROM_LIMIT" => $date_from,
"TO_LIMIT" => $date_to
),
'parseRecursion' => true,
'fetchAttendees' => true,
'checkPermissions' => true,
'skipDeclined' => true
)
);
$arEvents = array();
foreach ($res as $arEvent)
{
if ($arEvent['IS_MEETING'] && $arEvent['MEETING_STATUS'] == 'N')
continue;
$fromTo = CCalendarEvent::GetEventFromToForUser($arEvent, $userId);
if ($arEvent['RRULE'])
{
$ts_from_utc = $fromTo['TS_FROM'] - CCalendar::GetCurrentOffsetUTC();
$ts_to_utc = $ts_from_utc + $arEvent['DT_LENGTH'];
if ($ts_to_utc <= $ts_date_from || $ts_from_utc >= $ts_date_to)
continue;
}
$arEvents[] = $arEvent;
}
if (is_array($arEvents) && count($arEvents) > 0)
{
$arEvent = $arEvents[0];
$arEvent['GUESTS'] = array();
if ($arEvent['IS_MEETING'] && is_array($arEvent['~ATTENDEES']))
{
$arGuests = $arEvent['~ATTENDEES'];
foreach ($arGuests as $guest)
{
$arEvent['GUESTS'][] = array(
'id' => $guest['USER_ID'],
'name' => CUser::FormatName(CSite::GetNameFormat(null, $arParams['SITE_ID']), $guest, true),
'status' => $guest['STATUS'],
'accessibility' => $guest['ACCESSIBILITY'],
'bHost' => $guest['USER_ID'] == $arEvent['MEETING_HOST'],
);
if ($guest['USER_ID'] == $USER->GetID())
{
$arEvent['STATUS'] = $guest['STATUS'];
}
}
}
$set = CCalendar::GetSettings();
$url = str_replace(
'#user_id#', $arEvent['CREATED_BY'], $set['path_to_user_calendar']
).'?EVENT_ID='.$arEvent['ID'];
$fromTo = CCalendarEvent::GetEventFromToForUser($arEvent, $USER->GetID());
return array(
'ID' => $arEvent['ID'],
'NAME' => $arEvent['NAME'],
'DETAIL_TEXT' => $arEvent['DESCRIPTION'],
'DATE_FROM' => $fromTo['DATE_FROM'],
'DATE_TO' => $fromTo['DATE_TO'],
'ACCESSIBILITY' => $arEvent['ACCESSIBILITY'],
'IMPORTANCE' => $arEvent['IMPORTANCE'],
'STATUS' => $arEvent['STATUS'],
'IS_MEETING' => $arEvent['IS_MEETING'] ? 'Y' : 'N',
'GUESTS' => $arEvent['GUESTS'],
'UF_WEBDAV_CAL_EVENT' => $arEvent['UF_WEBDAV_CAL_EVENT'],
'URL' => $url,
);
}
}
protected static function MakeDateTime($date, $time)
{
global $DB;
if (!IsAmPmMode())
{
$date_start = $date.' '.$time.':00';
$date_start = FormatDate(
$DB->DateFormatToPhp(FORMAT_DATETIME),
MakeTimeStamp(
$date.' '.$time,
FORMAT_DATE.' HH:MI'
)
);
}
else
{
$date_start = FormatDate(
$DB->DateFormatToPhp(FORMAT_DATETIME),
MakeTimeStamp(
$date.' '.$time,
FORMAT_DATE.' H:MI T'
)
);
}
return $date_start;
}
protected static function plannerActionAdd($arParams)
{
global $USER;
$today = ConvertTimeStamp(time()+CTimeZone::GetOffset(), 'SHORT');
$data = array(
'CAL_TYPE' => 'user',
'OWNER_ID' => $USER->GetID(),
'NAME' => $arParams['NAME'],
'DT_FROM' => self::MakeDateTime($today, $arParams['FROM']),
'DT_TO' => self::MakeDateTime($today, $arParams['TO']),
);
if ($arParams['ABSENCE'] == 'Y')
{
$data['ACCESSIBILITY'] = 'absent';
}
CCalendar::SaveEvent(array(
'arFields' => $data,
'userId' => $USER->GetID(),
'autoDetectSection' => true,
'autoCreateSection' => true
));
}
protected static function plannerActionShow($arParams)
{
global $DB, $USER;
$res = false;
if($arParams['ID'] > 0)
{
$event = self::getEvent(array(
'ID' => $arParams['ID'],
'SITE_ID' => $arParams['SITE_ID']
));
if ($event)
{
$today = ConvertTimeStamp(time() + CTimeZone::GetOffset(), 'SHORT');
$now = time();
$res = array(
'ID' => $event['ID'],
'NAME' => $event['NAME'],
'DESCRIPTION' => CCalendarEvent::ParseText($event['DETAIL_TEXT'], $event['ID'], $event['UF_WEBDAV_CAL_EVENT']),
'URL' => '/company/personal/user/'.$USER->GetID().'/calendar/?EVENT_ID=' .$event['ID'],
'DATE_FROM' => MakeTimeStamp($event['DATE_FROM']),
'DATE_TO' => MakeTimeStamp($event['DATE_TO']),
'STATUS' => $event['STATUS'],
);
$res['DATE_FROM_TODAY'] = ConvertTimeStamp($res['DATE_FROM'],'SHORT') == $today;
$res['DATE_TO_TODAY'] = ConvertTimeStamp($res['DATE_TO'], 'SHORT') == $today;
if ($res['DATE_FROM_TODAY'])
{
if (IsAmPmMode())
{
$res['DATE_F'] = FormatDate("today g:i a", $res['DATE_FROM']);
$res['DATE_T'] = FormatDate("g:i a", $res['DATE_TO']);
}
else
{
$res['DATE_F'] = FormatDate("today H:i", $res['DATE_FROM']);
$res['DATE_T'] = FormatDate("H:i", $res['DATE_TO']);
}
if ($res['DATE_TO_TODAY'])
$res['DATE_F'] .= ' - '.$res['DATE_T'];
if ($res['DATE_FROM'] > $now)
{
$res['DATE_F_TO'] = GetMessage('TM_IN').' '.FormatDate('Hdiff', time()*2-($res['DATE_FROM'] - CTimeZone::GetOffset()));
}
}
else if ($res['DATE_TO_TODAY'])
{
$res['DATE_F'] = FormatDate(str_replace(
array('#today#', '#time#'),
array('today', 'H:i'),
GetMessage('TM_TILL')
), $res['DATE_TO']);
}
else
{
$fmt = preg_replace('/:s$/', '', $DB->DateFormatToPHP(CSite::GetDateFormat("FULL")));
$res['DATE_F'] = FormatDate($fmt, $res['DATE_FROM']);
$res['DATE_F_TO'] = FormatDate($fmt, $res['DATE_TO']);
}
if ($event['IS_MEETING'] == 'Y')
{
$arGuests = array('Y' => array(), 'N' => array(), 'Q' => array());
foreach ($event['GUESTS'] as $key => $guest)
{
$guest['url'] = str_replace(
array('#ID#', '#USER_ID#'),
$guest['id'],
COption::GetOptionString('intranet', 'path_user', '/company/personal/user/#USER_ID#/', $arParams['SITE_ID'])
);
if ($guest['bHost'])
{
$res['HOST'] = $guest;
}
else
{
$arGuests[$guest['status']][] = $guest;
}
}
$res['GUESTS'] = array_merge($arGuests['Y'], $arGuests['N'], $arGuests['Q']);
}
if (strlen($res['DESCRIPTION']) > 150)
{
$res['DESCRIPTION'] = CUtil::closetags(substr($res['DESCRIPTION'], 0, 150)).'...';
}
$res = array('EVENT' => $res);
}
}
else
{
$res = array('error' => 'event not found');
}
return $res;
}
private static function DateSort($a, $b)
{
if ($a['SORT'] == $b['SORT'])
return 0;
if ($a['SORT'] < $b['SORT'])
return -1;
return 1;
}
}
?> | {
"pile_set_name": "Github"
} |
package getter
// Storage is an interface that knows how to lookup downloaded directories
// as well as download and update directories from their sources into the
// proper location.
type Storage interface {
// Dir returns the directory on local disk where the directory source
// can be loaded from.
Dir(string) (string, bool, error)
// Get will download and optionally update the given directory.
Get(string, string, bool) error
}
| {
"pile_set_name": "Github"
} |
{
"navigationBarTitleText": "上传文件"
}
| {
"pile_set_name": "Github"
} |
16
comment:c
C -1.2131910 0.4206030 -0.9562340
C -1.1325450 0.0776310 1.3883179
C 0.1584410 0.0736640 1.7848459
C 1.5433220 0.4122250 -0.1100640
C 1.0358320 -0.5288610 -0.9406110
C -0.3323900 -0.5247450 -1.3605950
H -2.2736931 0.3303150 -1.1767660
H -1.7445281 0.9714180 1.4741040
H 0.6217310 0.9638990 2.2014670
H 2.5444241 0.3157350 0.3022330
H 1.6198460 -1.4262190 -1.1389530
H -0.7101280 -1.4191900 -1.8534880
H 0.6934610 -0.8563960 1.9413540
H -1.6682709 -0.8491300 1.2156010
H 1.0958930 1.3952990 -0.0208070
H -0.8868340 1.4015840 -0.6307570
| {
"pile_set_name": "Github"
} |
#ifndef CORRECT_FEC_H
#define CORRECT_FEC_H
// libcorrect's libfec shim header
// this is a partial implementation of libfec
// header signatures derived from found usages of libfec -- some things may be different
#include <correct.h>
// Reed-Solomon
void *init_rs_char(int symbol_size, int primitive_polynomial, int first_consecutive_root,
int root_gap, int number_roots, unsigned int pad);
void free_rs_char(void *rs);
void encode_rs_char(void *rs, const unsigned char *msg, unsigned char *parity);
void decode_rs_char(void *rs, unsigned char *block, int *erasure_locations, int num_erasures);
// Convolutional Codes
// Polynomials
// These have been determined via find_conv_libfec_poly.c
// We could just make up new ones, but we use libfec's here so that
// codes encoded by this library can be decoded by the original libfec
// and vice-versa
#define V27POLYA 0155
#define V27POLYB 0117
#define V29POLYA 0657
#define V29POLYB 0435
#define V39POLYA 0755
#define V39POLYB 0633
#define V39POLYC 0447
#define V615POLYA 042631
#define V615POLYB 047245
#define V615POLYC 056507
#define V615POLYD 073363
#define V615POLYE 077267
#define V615POLYF 064537
// Convolutional Methods
void *create_viterbi27(int num_decoded_bits);
int init_viterbi27(void *vit, int _mystery);
int update_viterbi27_blk(void *vit, unsigned char *encoded_soft, int n_encoded_groups);
int chainback_viterbi27(void *vit, unsigned char *decoded, unsigned int n_decoded_bits, unsigned int _mystery);
void delete_viterbi27(void *vit);
void *create_viterbi29(int num_decoded_bits);
int init_viterbi29(void *vit, int _mystery);
int update_viterbi29_blk(void *vit, unsigned char *encoded_soft, int n_encoded_groups);
int chainback_viterbi29(void *vit, unsigned char *decoded, unsigned int n_decoded_bits, unsigned int _mystery);
void delete_viterbi29(void *vit);
void *create_viterbi39(int num_decoded_bits);
int init_viterbi39(void *vit, int _mystery);
int update_viterbi39_blk(void *vit, unsigned char *encoded_soft, int n_encoded_groups);
int chainback_viterbi39(void *vit, unsigned char *decoded, unsigned int n_decoded_bits, unsigned int _mystery);
void delete_viterbi39(void *vit);
void *create_viterbi615(int num_decoded_bits);
int init_viterbi615(void *vit, int _mystery);
int update_viterbi615_blk(void *vit, unsigned char *encoded_soft, int n_encoded_groups);
int chainback_viterbi615(void *vit, unsigned char *decoded, unsigned int n_decoded_bits, unsigned int _mystery);
void delete_viterbi615(void *vit);
// Misc other
static inline int parity(unsigned int x) { return __builtin_parity(x); }
#endif
| {
"pile_set_name": "Github"
} |
#region License
/*
* Copyright (C) 1999-2020 John Källén.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; see the file COPYING. If not, write to
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#endregion
using Reko.Core;
using Reko.Core.Lib;
using Reko.Core.Machine;
using Reko.Gui;
using Reko.Gui.Forms;
using Reko.Scanning;
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Linq;
using System.Text;
using System.Windows.Forms;
namespace Reko.UserInterfaces.WindowsForms.Forms
{
public class JumpTableInteractor
{
private JumpTableDialog dlg;
public void Attach(JumpTableDialog dlg)
{
this.dlg = dlg;
dlg.IsIndirectTable.CheckedChanged += IsIndirectTable_CheckedChanged;
dlg.Load += Dlg_Load;
dlg.FormClosing += Dlg_FormClosing;
dlg.EntryCount.ValueChanged += EntryCount_ValueChanged;
dlg.Entries.SelectedIndexChanged += Entries_SelectedIndexChanged;
dlg.FarAddress.CheckedChanged += FarAddress_CheckedChanged;
dlg.RelativeAddress.CheckedChanged += RelativeAddress_CheckedChanged;
dlg.SegmentOffsets.CheckedChanged += Offsets_CheckedChanged;
}
private void EnableControls()
{
dlg.IndirectTable.Enabled = dlg.IsIndirectTable.Checked;
dlg.IndirectLabel.Enabled = dlg.IsIndirectTable.Checked;
}
private void BuildAddressTable()
{
var vectorBuilder = new VectorBuilder(null, dlg.Program, null);
var addresses = new List<Address>();
if (dlg.Program.Platform.TryParseAddress(dlg.JumpTableStartAddress.Text, out Address addrTable))
{
var stride = TableStride();
var state = dlg.Program.Architecture.CreateProcessorState();
state.InstructionPointer = dlg.Instruction.Address;
addresses = vectorBuilder.BuildTable(addrTable, stride * (int)dlg.EntryCount.Value, null, stride, state);
}
dlg.Entries.DataSource = addresses;
dlg.Entries.SelectedIndex = addresses.Count - 1;
}
private void EnableSegmentedPanel(bool hasValue)
{
//foreach (Control control in dlg.SegmentedAddressPanel.Controls)
//{
// control.Enabled = hasValue;
//}
if (hasValue)
{
dlg.SegmentList.DataSource = dlg.Program.SegmentMap.Segments.Values
.Select(seg => new ListOption(seg.Name, seg.Address))
.ToList();
}
}
public UserIndirectJump GetResults()
{
var vb = new VectorBuilder(dlg.Services, dlg.Program, new DirectedGraphImpl<object>());
var stride = 4; //$TODO: get from dialog
var entries = vb.BuildTable(dlg.VectorAddress, stride * (int)dlg.EntryCount.Value, null, stride, null);
var table = new ImageMapVectorTable(dlg.VectorAddress, entries.ToArray(), 0);
return new UserIndirectJump
{
Address = dlg.Instruction.Address,
Table = table,
IndexRegister = dlg.Program.Architecture.GetRegister(dlg.IndexRegister.SelectedValue.ToString())
};
}
private void SetRadioButtons()
{
if (dlg.Stride == dlg.Program.Platform.PointerType.Size)
{
dlg.FarAddress.Checked = true;
}
else if (dlg.Stride == dlg.Program.Platform.FramePointerType.Size)
{
dlg.SegmentOffsets.Checked = true;
}
}
private int TableStride()
{
if (dlg.FarAddress.Checked || dlg.RelativeAddress.Checked)
{
return dlg.Program.Platform.PointerType.Size;
}
else
{
return 2;
}
}
private void Dlg_Load(object sender, EventArgs e)
{
dlg.CaptionLabel.Text = $"Jump table for {dlg.Instruction.Address}";
dlg.JumpInstructionAddress.Text = dlg.Instruction.Address.ToString();
dlg.InstructionLabel.Text = dlg.Instruction.ToString().Replace('\t', ' ');
if (dlg.VectorAddress != null)
{
dlg.JumpTableStartAddress.Text = dlg.VectorAddress.ToString();
}
EnableSegmentedPanel(dlg.Program.SegmentMap.BaseAddress.Selector.HasValue);
dlg.IndexRegister.DataSource = dlg.Program.Architecture.GetRegisters().ToList();
dlg.SegmentList.DataSource = dlg.Program.SegmentMap.Segments.Values
.Select(s => s.Name)
.OrderBy(s => s)
.ToList();
SetRadioButtons();
BuildAddressTable();
EnableControls();
}
private void Dlg_FormClosing(object sender, FormClosingEventArgs e)
{
if ((Gui.DialogResult)dlg.DialogResult != Gui.DialogResult.OK)
return;
if (dlg.Program.Platform.TryParseAddress(dlg.JumpTableStartAddress.Text, out Address addr))
{
dlg.VectorAddress = addr;
}
}
private void EntryCount_ValueChanged(object sender, EventArgs e)
{
BuildAddressTable();
}
private void Entries_SelectedIndexChanged(object sender, EventArgs e)
{
var addr = (Address)dlg.Entries.SelectedItem;
string text;
if (addr != null)
{
var dasm = dlg.Program.CreateDisassembler(dlg.Program.Architecture, addr);
text = string.Join(
Environment.NewLine,
dasm.TakeWhile(i => (i.InstructionClass & InstrClass.Transfer) == 0)
.Take(400)
.Select(i => i.ToString()));
}
else
{
text = "";
}
dlg.Disassembly.Text = text;
}
private void IsIndirectTable_CheckedChanged(object sender, EventArgs e)
{
BuildAddressTable();
EnableControls();
}
private void FarAddress_CheckedChanged(object sender, EventArgs e)
{
BuildAddressTable();
EnableControls();
}
private void RelativeAddress_CheckedChanged(object sender, EventArgs e)
{
BuildAddressTable();
EnableControls();
}
private void Offsets_CheckedChanged(object sender, EventArgs e)
{
BuildAddressTable();
EnableControls();
}
}
}
| {
"pile_set_name": "Github"
} |
<!DOCTYPE html>
<html>
<head>
<style>
.container {
display: flex;
flex-direction: column;
height: 100px;
width: 50px;
}
.first {
flex: 1 1 auto;
}
.test {
flex: 0 0 auto;
background-color: green;
display: flex;
}
td {
padding: 23px;
}
</style>
</head>
<script src="../../resources/check-layout.js"></script>
<body onload="checkLayout('.test')">
<div class="container">
<div class="first">
</div>
<div class="test" data-expected-height=50>
<table>
<tr><td></td></tr>
</table>
</div>
</div>
</body>
</html>
| {
"pile_set_name": "Github"
} |
/* Generate attribute information (insn-attr.h) from machine description.
Copyright (C) 1991-2019 Free Software Foundation, Inc.
Contributed by Richard Kenner ([email protected])
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
#include "bconfig.h"
#include "system.h"
#include "coretypes.h"
#include "tm.h"
#include "rtl.h"
#include "errors.h"
#include "read-md.h"
#include "gensupport.h"
static vec<rtx> const_attrs, reservations;
static void
gen_attr (md_rtx_info *info)
{
const char *p;
rtx attr = info->def;
int is_const = GET_CODE (XEXP (attr, 2)) == CONST;
if (is_const)
const_attrs.safe_push (attr);
printf ("#define HAVE_ATTR_%s 1\n", XSTR (attr, 0));
/* If numeric attribute, don't need to write an enum. */
if (GET_CODE (attr) == DEFINE_ENUM_ATTR)
printf ("extern enum %s get_attr_%s (%s);\n\n",
XSTR (attr, 1), XSTR (attr, 0),
(is_const ? "void" : "rtx_insn *"));
else
{
p = XSTR (attr, 1);
if (*p == '\0')
printf ("extern int get_attr_%s (%s);\n", XSTR (attr, 0),
(is_const ? "void" : "rtx_insn *"));
else
printf ("extern enum attr_%s get_attr_%s (%s);\n\n",
XSTR (attr, 0), XSTR (attr, 0),
(is_const ? "void" : "rtx_insn *"));
}
/* If `length' attribute, write additional function definitions and define
variables used by `insn_current_length'. */
if (! strcmp (XSTR (attr, 0), "length"))
{
puts ("\
extern void shorten_branches (rtx_insn *);\n\
extern int insn_default_length (rtx_insn *);\n\
extern int insn_min_length (rtx_insn *);\n\
extern int insn_variable_length_p (rtx_insn *);\n\
extern int insn_current_length (rtx_insn *);\n\n\
#include \"insn-addr.h\"\n");
}
}
/* Check that attribute NAME is used in define_insn_reservation condition
EXP. Return true if it is. */
static bool
check_tune_attr (const char *name, rtx exp)
{
switch (GET_CODE (exp))
{
case AND:
if (check_tune_attr (name, XEXP (exp, 0)))
return true;
return check_tune_attr (name, XEXP (exp, 1));
case IOR:
return (check_tune_attr (name, XEXP (exp, 0))
&& check_tune_attr (name, XEXP (exp, 1)));
case EQ_ATTR:
return strcmp (XSTR (exp, 0), name) == 0;
default:
return false;
}
}
/* Try to find a const attribute (usually cpu or tune) that is used
in all define_insn_reservation conditions. */
static bool
find_tune_attr (rtx exp)
{
unsigned int i;
rtx attr;
switch (GET_CODE (exp))
{
case AND:
case IOR:
if (find_tune_attr (XEXP (exp, 0)))
return true;
return find_tune_attr (XEXP (exp, 1));
case EQ_ATTR:
if (strcmp (XSTR (exp, 0), "alternative") == 0)
return false;
FOR_EACH_VEC_ELT (const_attrs, i, attr)
if (strcmp (XSTR (attr, 0), XSTR (exp, 0)) == 0)
{
unsigned int j;
rtx resv;
FOR_EACH_VEC_ELT (reservations, j, resv)
if (! check_tune_attr (XSTR (attr, 0), XEXP (resv, 2)))
return false;
return true;
}
return false;
default:
return false;
}
}
int
main (int argc, const char **argv)
{
bool have_annul_true = false;
bool have_annul_false = false;
int num_insn_reservations = 0;
int i;
progname = "genattr";
if (!init_rtx_reader_args (argc, argv))
return (FATAL_EXIT_CODE);
puts ("/* Generated automatically by the program `genattr'");
puts (" from the machine description file `md'. */\n");
puts ("#ifndef GCC_INSN_ATTR_H");
puts ("#define GCC_INSN_ATTR_H\n");
puts ("#include \"insn-attr-common.h\"\n");
/* Read the machine description. */
md_rtx_info info;
while (read_md_rtx (&info))
{
rtx def = info.def;
switch (GET_CODE (def))
{
case DEFINE_ATTR:
case DEFINE_ENUM_ATTR:
gen_attr (&info);
break;
case DEFINE_DELAY:
for (i = 0; i < XVECLEN (def, 1); i += 3)
{
if (XVECEXP (def, 1, i + 1))
have_annul_true = true;
if (XVECEXP (def, 1, i + 2))
have_annul_false = true;
}
break;
case DEFINE_INSN_RESERVATION:
num_insn_reservations++;
reservations.safe_push (def);
break;
default:
break;
}
}
printf ("extern int num_delay_slots (rtx_insn *);\n");
printf ("extern int eligible_for_delay (rtx_insn *, int, rtx_insn *, int);\n\n");
printf ("extern int const_num_delay_slots (rtx_insn *);\n\n");
printf ("#define ANNUL_IFTRUE_SLOTS %d\n", have_annul_true);
printf ("extern int eligible_for_annul_true (rtx_insn *, int, rtx_insn *, int);\n");
printf ("#define ANNUL_IFFALSE_SLOTS %d\n", have_annul_false);
printf ("extern int eligible_for_annul_false (rtx_insn *, int, rtx_insn *, int);\n");
if (num_insn_reservations > 0)
{
bool has_tune_attr
= find_tune_attr (XEXP (reservations[0], 2));
/* Output interface for pipeline hazards recognition based on
DFA (deterministic finite state automata. */
printf ("\n/* DFA based pipeline interface. */");
printf ("\n#ifndef AUTOMATON_ALTS\n");
printf ("#define AUTOMATON_ALTS 0\n");
printf ("#endif\n\n");
printf ("\n#ifndef AUTOMATON_STATE_ALTS\n");
printf ("#define AUTOMATON_STATE_ALTS 0\n");
printf ("#endif\n\n");
printf ("#ifndef CPU_UNITS_QUERY\n");
printf ("#define CPU_UNITS_QUERY 0\n");
printf ("#endif\n\n");
/* Interface itself: */
if (has_tune_attr)
{
printf ("/* Initialize fn pointers for internal_dfa_insn_code\n");
printf (" and insn_default_latency. */\n");
printf ("extern void init_sched_attrs (void);\n\n");
printf ("/* Internal insn code number used by automata. */\n");
printf ("extern int (*internal_dfa_insn_code) (rtx_insn *);\n\n");
printf ("/* Insn latency time defined in define_insn_reservation. */\n");
printf ("extern int (*insn_default_latency) (rtx_insn *);\n\n");
}
else
{
printf ("#define init_sched_attrs() do { } while (0)\n\n");
printf ("/* Internal insn code number used by automata. */\n");
printf ("extern int internal_dfa_insn_code (rtx_insn *);\n\n");
printf ("/* Insn latency time defined in define_insn_reservation. */\n");
printf ("extern int insn_default_latency (rtx_insn *);\n\n");
}
printf ("/* Return nonzero if there is a bypass for given insn\n");
printf (" which is a data producer. */\n");
printf ("extern int bypass_p (rtx_insn *);\n\n");
printf ("/* Insn latency time on data consumed by the 2nd insn.\n");
printf (" Use the function if bypass_p returns nonzero for\n");
printf (" the 1st insn. */\n");
printf ("extern int insn_latency (rtx_insn *, rtx_insn *);\n\n");
printf ("/* Maximal insn latency time possible of all bypasses for this insn.\n");
printf (" Use the function if bypass_p returns nonzero for\n");
printf (" the 1st insn. */\n");
printf ("extern int maximal_insn_latency (rtx_insn *);\n\n");
printf ("\n#if AUTOMATON_ALTS\n");
printf ("/* The following function returns number of alternative\n");
printf (" reservations of given insn. It may be used for better\n");
printf (" insns scheduling heuristics. */\n");
printf ("extern int insn_alts (rtx);\n\n");
printf ("#endif\n\n");
printf ("/* Maximal possible number of insns waiting results being\n");
printf (" produced by insns whose execution is not finished. */\n");
printf ("extern const int max_insn_queue_index;\n\n");
printf ("/* Pointer to data describing current state of DFA. */\n");
printf ("typedef void *state_t;\n\n");
printf ("/* Size of the data in bytes. */\n");
printf ("extern int state_size (void);\n\n");
printf ("/* Initiate given DFA state, i.e. Set up the state\n");
printf (" as all functional units were not reserved. */\n");
printf ("extern void state_reset (state_t);\n");
printf ("/* The following function returns negative value if given\n");
printf (" insn can be issued in processor state described by given\n");
printf (" DFA state. In this case, the DFA state is changed to\n");
printf (" reflect the current and future reservations by given\n");
printf (" insn. Otherwise the function returns minimal time\n");
printf (" delay to issue the insn. This delay may be zero\n");
printf (" for superscalar or VLIW processors. If the second\n");
printf (" parameter is NULL the function changes given DFA state\n");
printf (" as new processor cycle started. */\n");
printf ("extern int state_transition (state_t, rtx);\n");
printf ("\n#if AUTOMATON_STATE_ALTS\n");
printf ("/* The following function returns number of possible\n");
printf (" alternative reservations of given insn in given\n");
printf (" DFA state. It may be used for better insns scheduling\n");
printf (" heuristics. By default the function is defined if\n");
printf (" macro AUTOMATON_STATE_ALTS is defined because its\n");
printf (" implementation may require much memory. */\n");
printf ("extern int state_alts (state_t, rtx);\n");
printf ("#endif\n\n");
printf ("extern int min_issue_delay (state_t, rtx_insn *);\n");
printf ("/* The following function returns nonzero if no one insn\n");
printf (" can be issued in current DFA state. */\n");
printf ("extern int state_dead_lock_p (state_t);\n");
printf ("/* The function returns minimal delay of issue of the 2nd\n");
printf (" insn after issuing the 1st insn in given DFA state.\n");
printf (" The 1st insn should be issued in given state (i.e.\n");
printf (" state_transition should return negative value for\n");
printf (" the insn and the state). Data dependencies between\n");
printf (" the insns are ignored by the function. */\n");
printf ("extern int "
"min_insn_conflict_delay (state_t, rtx_insn *, rtx_insn *);\n");
printf ("/* The following function outputs reservations for given\n");
printf (" insn as they are described in the corresponding\n");
printf (" define_insn_reservation. */\n");
printf ("extern void print_reservation (FILE *, rtx_insn *);\n");
printf ("\n#if CPU_UNITS_QUERY\n");
printf ("/* The following function returns code of functional unit\n");
printf (" with given name (see define_cpu_unit). */\n");
printf ("extern int get_cpu_unit_code (const char *);\n");
printf ("/* The following function returns nonzero if functional\n");
printf (" unit with given code is currently reserved in given\n");
printf (" DFA state. */\n");
printf ("extern int cpu_unit_reservation_p (state_t, int);\n");
printf ("#endif\n\n");
printf ("/* The following function returns true if insn\n");
printf (" has a dfa reservation. */\n");
printf ("extern bool insn_has_dfa_reservation_p (rtx_insn *);\n\n");
printf ("/* Clean insn code cache. It should be called if there\n");
printf (" is a chance that condition value in a\n");
printf (" define_insn_reservation will be changed after\n");
printf (" last call of dfa_start. */\n");
printf ("extern void dfa_clean_insn_cache (void);\n\n");
printf ("extern void dfa_clear_single_insn_cache (rtx_insn *);\n\n");
printf ("/* Initiate and finish work with DFA. They should be\n");
printf (" called as the first and the last interface\n");
printf (" functions. */\n");
printf ("extern void dfa_start (void);\n");
printf ("extern void dfa_finish (void);\n");
}
else
{
/* Otherwise we do no scheduling, but we need these typedefs
in order to avoid uglifying other code with more ifdefs. */
printf ("typedef void *state_t;\n\n");
}
/* Special-purpose attributes should be tested with if, not #ifdef. */
const char * const special_attrs[] = { "length", "enabled",
"preferred_for_size",
"preferred_for_speed", 0 };
for (const char * const *p = special_attrs; *p; p++)
{
printf ("#ifndef HAVE_ATTR_%s\n"
"#define HAVE_ATTR_%s 0\n"
"#endif\n", *p, *p);
}
/* We make an exception here to provide stub definitions for
insn_*_length* / get_attr_enabled functions. */
puts ("#if !HAVE_ATTR_length\n"
"extern int hook_int_rtx_insn_unreachable (rtx_insn *);\n"
"#define insn_default_length hook_int_rtx_insn_unreachable\n"
"#define insn_min_length hook_int_rtx_insn_unreachable\n"
"#define insn_variable_length_p hook_int_rtx_insn_unreachable\n"
"#define insn_current_length hook_int_rtx_insn_unreachable\n"
"#include \"insn-addr.h\"\n"
"#endif\n"
"extern int hook_int_rtx_1 (rtx);\n"
"#if !HAVE_ATTR_enabled\n"
"#define get_attr_enabled hook_int_rtx_1\n"
"#endif\n"
"#if !HAVE_ATTR_preferred_for_size\n"
"#define get_attr_preferred_for_size hook_int_rtx_1\n"
"#endif\n"
"#if !HAVE_ATTR_preferred_for_speed\n"
"#define get_attr_preferred_for_speed hook_int_rtx_1\n"
"#endif\n");
/* Output flag masks for use by reorg.
Flags are used to hold branch direction for use by eligible_for_... */
printf ("\n#define ATTR_FLAG_forward\t0x1\n");
printf ("#define ATTR_FLAG_backward\t0x2\n");
puts ("\n#endif /* GCC_INSN_ATTR_H */");
if (ferror (stdout) || fflush (stdout) || fclose (stdout))
return FATAL_EXIT_CODE;
return SUCCESS_EXIT_CODE;
}
| {
"pile_set_name": "Github"
} |
# Kurdish translation for gala
# Copyright (c) 2017 Rosetta Contributors and Canonical Ltd 2017
# This file is distributed under the same license as the gala package.
# FIRST AUTHOR <EMAIL@ADDRESS>, 2017.
#
msgid ""
msgstr ""
"Project-Id-Version: gala\n"
"Report-Msgid-Bugs-To: https://github.com/elementary/gala/issues\n"
"POT-Creation-Date: 2020-08-19 23:19+0000\n"
"PO-Revision-Date: 2017-05-04 19:50+0000\n"
"Last-Translator: Rokar ✌ <Unknown>\n"
"Language-Team: Kurdish <[email protected]>\n"
"Language: ku\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"X-Launchpad-Export-Date: 2017-05-06 05:41+0000\n"
"X-Generator: Launchpad (build 18366)\n"
#: src/ScreenshotManager.vala:251
msgid "Screenshots"
msgstr ""
#: src/WindowManager.vala:2191
msgid "Does the display look OK?"
msgstr ""
#: src/WindowManager.vala:2194
msgid "Keep This Configuration"
msgstr ""
#: src/WindowManager.vala:2195
msgid "Restore Previous Configuration"
msgstr ""
#: daemon/MenuDaemon.vala:111
msgid "Minimize"
msgstr "Biçûk bike"
#: daemon/MenuDaemon.vala:124
msgid "Move"
msgstr "Guhestin"
#: daemon/MenuDaemon.vala:132
msgid "Resize"
msgstr "Mezinahîyê biguherîne"
#: daemon/MenuDaemon.vala:140
msgid "Always on Top"
msgstr ""
#: daemon/MenuDaemon.vala:148
msgid "Always on Visible Workspace"
msgstr ""
#: daemon/MenuDaemon.vala:156
msgid "Move to Workspace Left"
msgstr ""
#: daemon/MenuDaemon.vala:164
msgid "Move to Workspace Right"
msgstr ""
#: daemon/MenuDaemon.vala:172
msgid "Close"
msgstr "Bigire"
#: daemon/MenuDaemon.vala:205
msgid "Unmaximize"
msgstr "na ewperî dibe"
#: daemon/MenuDaemon.vala:205
msgid "Maximize"
msgstr "Mezintirîn"
#: daemon/MenuDaemon.vala:268
msgid "Change Wallpaper…"
msgstr ""
#: daemon/MenuDaemon.vala:285
msgid "Display Settings…"
msgstr ""
#: daemon/MenuDaemon.vala:302
msgid "System Settings…"
msgstr ""
#: data/gala.appdata.xml.in:7
msgid "Multitasking & Window Management"
msgstr ""
#: data/gala.appdata.xml.in:8
msgid "A window & compositing manager for Pantheon"
msgstr ""
#: data/gala.appdata.xml.in:10
msgid ""
"A window & compositing manager based on libmutter and designed by "
"elementary for use with Pantheon."
msgstr ""
#: data/gala.appdata.xml.in:15 data/gala.appdata.xml.in:26
#: data/gala.appdata.xml.in:36 data/gala.appdata.xml.in:45
#: data/gala.appdata.xml.in:56
msgid "Improvements:"
msgstr ""
#: data/gala.appdata.xml.in:17
msgid "Close the Alt + Tab switcher by pressing Esc without releasing Alt"
msgstr ""
#: data/gala.appdata.xml.in:18
msgid "Increase maximum zoom level and provide feedback when unable to zoom"
msgstr ""
#: data/gala.appdata.xml.in:19
msgid "Show a context menu when secondary clicking the background"
msgstr ""
#: data/gala.appdata.xml.in:20 data/gala.appdata.xml.in:30
#: data/gala.appdata.xml.in:39 data/gala.appdata.xml.in:50
msgid "Updated translations"
msgstr ""
#: data/gala.appdata.xml.in:28
msgid ""
"Fix “Always on Visible Workspace” windows disappearing when changing "
"workspaces"
msgstr ""
#: data/gala.appdata.xml.in:29
msgid ""
"Fix windows on non-primary displays disappearing when changing workspaces"
msgstr ""
#: data/gala.appdata.xml.in:38
msgid "Fix crash when changing workspaces while a transient window is opening"
msgstr ""
#: data/gala.appdata.xml.in:47
msgid "Don't show Gala Background Services in the dock"
msgstr ""
#: data/gala.appdata.xml.in:48
msgid "Fixes for media key handling"
msgstr ""
#: data/gala.appdata.xml.in:49
msgid "Show keyboard shortcuts in HeaderBar menus"
msgstr ""
#: data/gala.appdata.xml.in:58
msgid "Improve window shadows for legacy apps"
msgstr ""
#: data/gala.appdata.xml.in:59
msgid "Fix potential crash when taking screenshots"
msgstr ""
#: data/gala.appdata.xml.in:60
msgid "Fix notification position after changing DPI"
msgstr ""
#: data/gala.appdata.xml.in:61
msgid "Fix animations for moving and maximizing tiled windows"
msgstr ""
#: data/gala.appdata.xml.in:67
msgid "elementary, Inc."
msgstr ""
#: data/gala-multitaskingview.desktop.in:4
#: data/gala-multitaskingview.desktop.in:6
msgid "Multitasking View"
msgstr ""
#: data/gala-multitaskingview.desktop.in:5
msgid "View all open windows and workspaces"
msgstr ""
#: data/gala-multitaskingview.desktop.in:7
msgid "multitasking-view"
msgstr ""
#: data/gala-other.desktop.in:4
msgid "Other"
msgstr "Wekî Din"
#: data/gala-other.desktop.in:5
msgid "Fallback desktop file for notifications from outdated applications."
msgstr ""
#: data/gala-other.desktop.in:6
msgid "applications-other"
msgstr ""
#~ msgid "Zoom in"
#~ msgstr "Nêzîk bike"
#~ msgid "Zoom out"
#~ msgstr "Dûr bike"
#~ msgid "Arrangement of buttons on the titlebar"
#~ msgstr "Pergala bişkokên ku di darikê sernavan de ye"
#~ msgid "Enable Animations"
#~ msgstr "Derbasbariya dîmenên livkar"
#~ msgid ""
#~ "Whether animations should be displayed. Note: This is a global key, it "
#~ "changes the behaviour of the window manager, the panel etc."
#~ msgstr ""
#~ "Heke xebitandina wêneyên livkar pêwist be. Nişe: Ev mifteya giştî ye, "
#~ "rewşa gerînendeyê paceyê, panelê hwd diguherîne."
| {
"pile_set_name": "Github"
} |
<div class="basic-diacritics">
<p>
Lores ipsum dolòr sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor
invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero
eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren,
no sea takimata sanctus est Lorem ipsum dolor sit amet. Lorem ipsum dolor
sit ảmet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt
ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et
accusam et justò duo dolores et ea rebum. Stet clita kasd gubergren, no sea
takimata sanctus est Lorem ipsum dolor sit amet.
</p>
</div>
| {
"pile_set_name": "Github"
} |
#! /usr/bin/env python
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#pylint: disable=invalid-name
"""
Generate vocabulary for a tokenized text file.
"""
import sys
import argparse
import collections
import logging
parser = argparse.ArgumentParser(
description="Generate vocabulary for a tokenized text file.")
parser.add_argument(
"--min_frequency",
dest="min_frequency",
type=int,
default=0,
help="Minimum frequency of a word to be included in the vocabulary.")
parser.add_argument(
"--max_vocab_size",
dest="max_vocab_size",
type=int,
help="Maximum number of tokens in the vocabulary")
parser.add_argument(
"--downcase",
dest="downcase",
type=bool,
help="If set to true, downcase all text before processing.",
default=False)
parser.add_argument(
"infile",
nargs="?",
type=argparse.FileType("r"),
default=sys.stdin,
help="Input tokenized text file to be processed.")
parser.add_argument(
"--delimiter",
dest="delimiter",
type=str,
default=" ",
help="Delimiter character for tokenizing. Use \" \" and \"\" for word and char level respectively."
)
args = parser.parse_args()
# Counter for all tokens in the vocabulary
cnt = collections.Counter()
for line in args.infile:
if args.downcase:
line = line.lower()
if args.delimiter == "":
tokens = list(line.strip())
else:
tokens = line.strip().split(args.delimiter)
tokens = [_ for _ in tokens if len(_) > 0]
cnt.update(tokens)
logging.info("Found %d unique tokens in the vocabulary.", len(cnt))
# Filter tokens below the frequency threshold
if args.min_frequency > 0:
filtered_tokens = [(w, c) for w, c in cnt.most_common()
if c > args.min_frequency]
cnt = collections.Counter(dict(filtered_tokens))
logging.info("Found %d unique tokens with frequency > %d.",
len(cnt), args.min_frequency)
# Sort tokens by 1. frequency 2. lexically to break ties
word_with_counts = cnt.most_common()
word_with_counts = sorted(
word_with_counts, key=lambda x: (x[1], x[0]), reverse=True)
# Take only max-vocab
if args.max_vocab_size is not None:
word_with_counts = word_with_counts[:args.max_vocab_size]
for word, count in word_with_counts:
print("{}\t{}".format(word, count))
| {
"pile_set_name": "Github"
} |
/*
** $Id: ldebug.c,v 2.29.1.6 2008/05/08 16:56:26 roberto Exp $
** Debug Interface
** See Copyright Notice in lua.h
*/
#include <stdarg.h>
#include <stddef.h>
#include <string.h>
#define ldebug_c
#define LUA_CORE
#include "lua.h"
#include "lapi.h"
#include "lcode.h"
#include "ldebug.h"
#include "ldo.h"
#include "lfunc.h"
#include "lobject.h"
#include "lopcodes.h"
#include "lstate.h"
#include "lstring.h"
#include "ltable.h"
#include "ltm.h"
#include "lvm.h"
static const char *getfuncname (lua_State *L, CallInfo *ci, const char **name);
static int currentpc (lua_State *L, CallInfo *ci) {
if (!isLua(ci)) return -1; /* function is not a Lua function? */
if (ci == L->ci)
ci->savedpc = L->savedpc;
return pcRel(ci->savedpc, ci_func(ci)->l.p);
}
static int currentline (lua_State *L, CallInfo *ci) {
int pc = currentpc(L, ci);
if (pc < 0)
return -1; /* only active lua functions have current-line information */
else
return getline(ci_func(ci)->l.p, pc);
}
/*
** this function can be called asynchronous (e.g. during a signal)
*/
LUA_API int lua_sethook (lua_State *L, lua_Hook func, int mask, int count) {
if (func == NULL || mask == 0) { /* turn off hooks? */
mask = 0;
func = NULL;
}
L->hook = func;
L->basehookcount = count;
resethookcount(L);
L->hookmask = cast_byte(mask);
return 1;
}
LUA_API lua_Hook lua_gethook (lua_State *L) {
return L->hook;
}
LUA_API int lua_gethookmask (lua_State *L) {
return L->hookmask;
}
LUA_API int lua_gethookcount (lua_State *L) {
return L->basehookcount;
}
LUA_API int lua_getstack (lua_State *L, int level, lua_Debug *ar) {
int status;
CallInfo *ci;
lua_lock(L);
for (ci = L->ci; level > 0 && ci > L->base_ci; ci--) {
level--;
if (f_isLua(ci)) /* Lua function? */
level -= ci->tailcalls; /* skip lost tail calls */
}
if (level == 0 && ci > L->base_ci) { /* level found? */
status = 1;
ar->i_ci = cast_int(ci - L->base_ci);
}
else if (level < 0) { /* level is of a lost tail call? */
status = 1;
ar->i_ci = 0;
}
else status = 0; /* no such level */
lua_unlock(L);
return status;
}
static Proto *getluaproto (CallInfo *ci) {
return (isLua(ci) ? ci_func(ci)->l.p : NULL);
}
static const char *findlocal (lua_State *L, CallInfo *ci, int n) {
const char *name;
Proto *fp = getluaproto(ci);
if (fp && (name = luaF_getlocalname(fp, n, currentpc(L, ci))) != NULL)
return name; /* is a local variable in a Lua function */
else {
StkId limit = (ci == L->ci) ? L->top : (ci+1)->func;
if (limit - ci->base >= n && n > 0) /* is 'n' inside 'ci' stack? */
return "(*temporary)";
else
return NULL;
}
}
LUA_API const char *lua_getlocal (lua_State *L, const lua_Debug *ar, int n) {
CallInfo *ci = L->base_ci + ar->i_ci;
const char *name = findlocal(L, ci, n);
lua_lock(L);
if (name)
luaA_pushobject(L, ci->base + (n - 1));
lua_unlock(L);
return name;
}
LUA_API const char *lua_setlocal (lua_State *L, const lua_Debug *ar, int n) {
CallInfo *ci = L->base_ci + ar->i_ci;
const char *name = findlocal(L, ci, n);
lua_lock(L);
if (name)
setobjs2s(L, ci->base + (n - 1), L->top - 1);
L->top--; /* pop value */
lua_unlock(L);
return name;
}
static void funcinfo (lua_Debug *ar, Closure *cl) {
if (cl->c.isC) {
ar->source = "=[C]";
ar->linedefined = -1;
ar->lastlinedefined = -1;
ar->what = "C";
}
else {
ar->source = getstr(cl->l.p->source);
ar->linedefined = cl->l.p->linedefined;
ar->lastlinedefined = cl->l.p->lastlinedefined;
ar->what = (ar->linedefined == 0) ? "main" : "Lua";
}
luaO_chunkid(ar->short_src, ar->source, LUA_IDSIZE);
}
static void info_tailcall (lua_Debug *ar) {
ar->name = ar->namewhat = "";
ar->what = "tail";
ar->lastlinedefined = ar->linedefined = ar->currentline = -1;
ar->source = "=(tail call)";
luaO_chunkid(ar->short_src, ar->source, LUA_IDSIZE);
ar->nups = 0;
}
static void collectvalidlines (lua_State *L, Closure *f) {
if (f == NULL || f->c.isC) {
setnilvalue(L->top);
}
else {
Table *t = luaH_new(L, 0, 0);
int *lineinfo = f->l.p->lineinfo;
int i;
for (i=0; i<f->l.p->sizelineinfo; i++)
setbvalue(luaH_setnum(L, t, lineinfo[i]), 1);
sethvalue(L, L->top, t);
}
incr_top(L);
}
static int auxgetinfo (lua_State *L, const char *what, lua_Debug *ar,
Closure *f, CallInfo *ci) {
int status = 1;
if (f == NULL) {
info_tailcall(ar);
return status;
}
for (; *what; what++) {
switch (*what) {
case 'S': {
funcinfo(ar, f);
break;
}
case 'l': {
ar->currentline = (ci) ? currentline(L, ci) : -1;
break;
}
case 'u': {
ar->nups = f->c.nupvalues;
break;
}
case 'n': {
ar->namewhat = (ci) ? getfuncname(L, ci, &ar->name) : NULL;
if (ar->namewhat == NULL) {
ar->namewhat = ""; /* not found */
ar->name = NULL;
}
break;
}
case 'L':
case 'f': /* handled by lua_getinfo */
break;
default: status = 0; /* invalid option */
}
}
return status;
}
LUA_API int lua_getinfo (lua_State *L, const char *what, lua_Debug *ar) {
int status;
Closure *f = NULL;
CallInfo *ci = NULL;
lua_lock(L);
if (*what == '>') {
StkId func = L->top - 1;
luai_apicheck(L, ttisfunction(func));
what++; /* skip the '>' */
f = clvalue(func);
L->top--; /* pop function */
}
else if (ar->i_ci != 0) { /* no tail call? */
ci = L->base_ci + ar->i_ci;
lua_assert(ttisfunction(ci->func));
f = clvalue(ci->func);
}
status = auxgetinfo(L, what, ar, f, ci);
if (strchr(what, 'f')) {
if (f == NULL) setnilvalue(L->top);
else setclvalue(L, L->top, f);
incr_top(L);
}
if (strchr(what, 'L'))
collectvalidlines(L, f);
lua_unlock(L);
return status;
}
/*
** {======================================================
** Symbolic Execution and code checker
** =======================================================
*/
#define check(x) if (!(x)) return 0;
#define checkjump(pt,pc) check(0 <= pc && pc < pt->sizecode)
#define checkreg(pt,reg) check((reg) < (pt)->maxstacksize)
static int precheck (const Proto *pt) {
check(pt->maxstacksize <= MAXSTACK);
check(pt->numparams+(pt->is_vararg & VARARG_HASARG) <= pt->maxstacksize);
check(!(pt->is_vararg & VARARG_NEEDSARG) ||
(pt->is_vararg & VARARG_HASARG));
check(pt->sizeupvalues <= pt->nups);
check(pt->sizelineinfo == pt->sizecode || pt->sizelineinfo == 0);
check(pt->sizecode > 0 && GET_OPCODE(pt->code[pt->sizecode-1]) == OP_RETURN);
return 1;
}
#define checkopenop(pt,pc) luaG_checkopenop((pt)->code[(pc)+1])
int luaG_checkopenop (Instruction i) {
switch (GET_OPCODE(i)) {
case OP_CALL:
case OP_TAILCALL:
case OP_RETURN:
case OP_SETLIST: {
check(GETARG_B(i) == 0);
return 1;
}
default: return 0; /* invalid instruction after an open call */
}
}
static int checkArgMode (const Proto *pt, int r, enum OpArgMask mode) {
switch (mode) {
case OpArgN: check(r == 0); break;
case OpArgU: break;
case OpArgR: checkreg(pt, r); break;
case OpArgK:
check(ISK(r) ? INDEXK(r) < pt->sizek : r < pt->maxstacksize);
break;
}
return 1;
}
static Instruction symbexec (const Proto *pt, int lastpc, int reg) {
int pc;
int last; /* stores position of last instruction that changed `reg' */
last = pt->sizecode-1; /* points to final return (a `neutral' instruction) */
check(precheck(pt));
for (pc = 0; pc < lastpc; pc++) {
Instruction i = pt->code[pc];
OpCode op = GET_OPCODE(i);
int a = GETARG_A(i);
int b = 0;
int c = 0;
check(op < NUM_OPCODES);
checkreg(pt, a);
switch (getOpMode(op)) {
case iABC: {
b = GETARG_B(i);
c = GETARG_C(i);
check(checkArgMode(pt, b, getBMode(op)));
check(checkArgMode(pt, c, getCMode(op)));
break;
}
case iABx: {
b = GETARG_Bx(i);
if (getBMode(op) == OpArgK) check(b < pt->sizek);
break;
}
case iAsBx: {
b = GETARG_sBx(i);
if (getBMode(op) == OpArgR) {
int dest = pc+1+b;
check(0 <= dest && dest < pt->sizecode);
if (dest > 0) {
int j;
/* check that it does not jump to a setlist count; this
is tricky, because the count from a previous setlist may
have the same value of an invalid setlist; so, we must
go all the way back to the first of them (if any) */
for (j = 0; j < dest; j++) {
Instruction d = pt->code[dest-1-j];
if (!(GET_OPCODE(d) == OP_SETLIST && GETARG_C(d) == 0)) break;
}
/* if 'j' is even, previous value is not a setlist (even if
it looks like one) */
check((j&1) == 0);
}
}
break;
}
}
if (testAMode(op)) {
if (a == reg) last = pc; /* change register `a' */
}
if (testTMode(op)) {
check(pc+2 < pt->sizecode); /* check skip */
check(GET_OPCODE(pt->code[pc+1]) == OP_JMP);
}
switch (op) {
case OP_LOADBOOL: {
if (c == 1) { /* does it jump? */
check(pc+2 < pt->sizecode); /* check its jump */
check(GET_OPCODE(pt->code[pc+1]) != OP_SETLIST ||
GETARG_C(pt->code[pc+1]) != 0);
}
break;
}
case OP_LOADNIL: {
if (a <= reg && reg <= b)
last = pc; /* set registers from `a' to `b' */
break;
}
case OP_GETUPVAL:
case OP_SETUPVAL: {
check(b < pt->nups);
break;
}
case OP_GETGLOBAL:
case OP_SETGLOBAL: {
check(ttisstring(&pt->k[b]));
break;
}
case OP_SELF: {
checkreg(pt, a+1);
if (reg == a+1) last = pc;
break;
}
case OP_CONCAT: {
check(b < c); /* at least two operands */
break;
}
case OP_TFORLOOP: {
check(c >= 1); /* at least one result (control variable) */
checkreg(pt, a+2+c); /* space for results */
if (reg >= a+2) last = pc; /* affect all regs above its base */
break;
}
case OP_FORLOOP:
case OP_FORPREP:
checkreg(pt, a+3);
/* go through */
case OP_JMP: {
int dest = pc+1+b;
/* not full check and jump is forward and do not skip `lastpc'? */
if (reg != NO_REG && pc < dest && dest <= lastpc)
pc += b; /* do the jump */
break;
}
case OP_CALL:
case OP_TAILCALL: {
if (b != 0) {
checkreg(pt, a+b-1);
}
c--; /* c = num. returns */
if (c == LUA_MULTRET) {
check(checkopenop(pt, pc));
}
else if (c != 0)
checkreg(pt, a+c-1);
if (reg >= a) last = pc; /* affect all registers above base */
break;
}
case OP_RETURN: {
b--; /* b = num. returns */
if (b > 0) checkreg(pt, a+b-1);
break;
}
case OP_SETLIST: {
if (b > 0) checkreg(pt, a + b);
if (c == 0) {
pc++;
check(pc < pt->sizecode - 1);
}
break;
}
case OP_CLOSURE: {
int nup, j;
check(b < pt->sizep);
nup = pt->p[b]->nups;
check(pc + nup < pt->sizecode);
for (j = 1; j <= nup; j++) {
OpCode op1 = GET_OPCODE(pt->code[pc + j]);
check(op1 == OP_GETUPVAL || op1 == OP_MOVE);
}
if (reg != NO_REG) /* tracing? */
pc += nup; /* do not 'execute' these pseudo-instructions */
break;
}
case OP_VARARG: {
check((pt->is_vararg & VARARG_ISVARARG) &&
!(pt->is_vararg & VARARG_NEEDSARG));
b--;
if (b == LUA_MULTRET) check(checkopenop(pt, pc));
checkreg(pt, a+b-1);
break;
}
default: break;
}
}
return pt->code[last];
}
#undef check
#undef checkjump
#undef checkreg
/* }====================================================== */
int luaG_checkcode (const Proto *pt) {
return (symbexec(pt, pt->sizecode, NO_REG) != 0);
}
static const char *kname (Proto *p, int c) {
if (ISK(c) && ttisstring(&p->k[INDEXK(c)]))
return svalue(&p->k[INDEXK(c)]);
else
return "?";
}
static const char *getobjname (lua_State *L, CallInfo *ci, int stackpos,
const char **name) {
if (isLua(ci)) { /* a Lua function? */
Proto *p = ci_func(ci)->l.p;
int pc = currentpc(L, ci);
Instruction i;
*name = luaF_getlocalname(p, stackpos+1, pc);
if (*name) /* is a local? */
return "local";
i = symbexec(p, pc, stackpos); /* try symbolic execution */
lua_assert(pc != -1);
switch (GET_OPCODE(i)) {
case OP_GETGLOBAL: {
int g = GETARG_Bx(i); /* global index */
lua_assert(ttisstring(&p->k[g]));
*name = svalue(&p->k[g]);
return "global";
}
case OP_MOVE: {
int a = GETARG_A(i);
int b = GETARG_B(i); /* move from `b' to `a' */
if (b < a)
return getobjname(L, ci, b, name); /* get name for `b' */
break;
}
case OP_GETTABLE: {
int k = GETARG_C(i); /* key index */
*name = kname(p, k);
return "field";
}
case OP_GETUPVAL: {
int u = GETARG_B(i); /* upvalue index */
*name = p->upvalues ? getstr(p->upvalues[u]) : "?";
return "upvalue";
}
case OP_SELF: {
int k = GETARG_C(i); /* key index */
*name = kname(p, k);
return "method";
}
default: break;
}
}
return NULL; /* no useful name found */
}
static const char *getfuncname (lua_State *L, CallInfo *ci, const char **name) {
Instruction i;
if ((isLua(ci) && ci->tailcalls > 0) || !isLua(ci - 1))
return NULL; /* calling function is not Lua (or is unknown) */
ci--; /* calling function */
i = ci_func(ci)->l.p->code[currentpc(L, ci)];
if (GET_OPCODE(i) == OP_CALL || GET_OPCODE(i) == OP_TAILCALL ||
GET_OPCODE(i) == OP_TFORLOOP)
return getobjname(L, ci, GETARG_A(i), name);
else
return NULL; /* no useful name can be found */
}
/* only ANSI way to check whether a pointer points to an array */
static int isinstack (CallInfo *ci, const TValue *o) {
StkId p;
for (p = ci->base; p < ci->top; p++)
if (o == p) return 1;
return 0;
}
void luaG_typeerror (lua_State *L, const TValue *o, const char *op) {
const char *name = NULL;
const char *t = luaT_typenames[ttype(o)];
const char *kind = (isinstack(L->ci, o)) ?
getobjname(L, L->ci, cast_int(o - L->base), &name) :
NULL;
if (kind)
luaG_runerror(L, "attempt to %s %s " LUA_QS " (a %s value)",
op, kind, name, t);
else
luaG_runerror(L, "attempt to %s a %s value", op, t);
}
void luaG_concaterror (lua_State *L, StkId p1, StkId p2) {
if (ttisstring(p1) || ttisnumber(p1)) p1 = p2;
lua_assert(!ttisstring(p1) && !ttisnumber(p1));
luaG_typeerror(L, p1, "concatenate");
}
void luaG_aritherror (lua_State *L, const TValue *p1, const TValue *p2) {
TValue temp;
if (luaV_tonumber(p1, &temp) == NULL)
p2 = p1; /* first operand is wrong */
luaG_typeerror(L, p2, "perform arithmetic on");
}
int luaG_ordererror (lua_State *L, const TValue *p1, const TValue *p2) {
const char *t1 = luaT_typenames[ttype(p1)];
const char *t2 = luaT_typenames[ttype(p2)];
if (t1[2] == t2[2])
luaG_runerror(L, "attempt to compare two %s values", t1);
else
luaG_runerror(L, "attempt to compare %s with %s", t1, t2);
return 0;
}
static void addinfo (lua_State *L, const char *msg) {
CallInfo *ci = L->ci;
if (isLua(ci)) { /* is Lua code? */
char buff[LUA_IDSIZE]; /* add file:line information */
int line = currentline(L, ci);
luaO_chunkid(buff, getstr(getluaproto(ci)->source), LUA_IDSIZE);
luaO_pushfstring(L, "%s:%d: %s", buff, line, msg);
}
}
void luaG_errormsg (lua_State *L) {
if (L->errfunc != 0) { /* is there an error handling function? */
StkId errfunc = restorestack(L, L->errfunc);
if (!ttisfunction(errfunc)) luaD_throw(L, LUA_ERRERR);
setobjs2s(L, L->top, L->top - 1); /* move argument */
setobjs2s(L, L->top - 1, errfunc); /* push function */
incr_top(L);
luaD_call(L, L->top - 2, 1); /* call it */
}
luaD_throw(L, LUA_ERRRUN);
}
void luaG_runerror (lua_State *L, const char *fmt, ...) {
va_list argp;
va_start(argp, fmt);
addinfo(L, luaO_pushvfstring(L, fmt, argp));
va_end(argp);
luaG_errormsg(L);
}
| {
"pile_set_name": "Github"
} |
/*
Copyright 2007-2017 The NGenerics Team
(https://github.com/ngenerics/ngenerics/wiki/Team)
This program is licensed under the MIT License. You should
have received a copy of the license along with the source code. If not, an online copy
of the license can be found at https://opensource.org/licenses/MIT.
*/
using System.Collections;
using NGenerics.DataStructures.General;
using NUnit.Framework;
namespace NGenerics.Tests.DataStructures.General.SortedListTests
{
[TestFixture]
public class GetEnumerator
{
[Test]
public void Simple()
{
var sortedList = new SortedList<int>();
for (var i = 0; i < 20; i++)
{
sortedList.Add(i);
}
var counter = 0;
var enumerator = sortedList.GetEnumerator();
while (enumerator.MoveNext())
{
Assert.AreEqual(enumerator.Current, counter);
counter++;
}
Assert.AreEqual(counter, 20);
}
[Test]
public void Interface()
{
var sortedList = new SortedList<int>();
for (var i = 0; i < 20; i++)
{
sortedList.Add(i);
}
var counter = 0;
var enumerator = ((IEnumerable)sortedList).GetEnumerator();
while (enumerator.MoveNext())
{
Assert.AreEqual((int)enumerator.Current, counter);
counter++;
}
Assert.AreEqual(counter, 20);
}
}
} | {
"pile_set_name": "Github"
} |
---
title: Fetching content
description: 'Easily connect your Nuxt.js application to your content hosted on Prismic'
position: 210
category: 'Examples'
version: 1.2
fullscreen: false
---
This page is meant to show you some content fetching strategies with this module, please refer to [Prismic documentation](https://prismic.io/docs/vuejs/query-the-api/how-to-query-the-api) to learn more on querying the API.
## From a Page
<code-group>
<code-block label="Quick Query Helper" active>
```javascript[pages/_uid.vue]
export default {
async asyncData({ $prismic, params, error }) {
const document = await $prismic.api.getByUID('page', params.uid)
if (document) {
return { document }
} else {
error({ statusCode: 404, message: 'Page not found' })
}
}
}
```
</code-block>
<code-block label="Predicates">
```javascript[pages/_uid.vue]
export default {
async asyncData({ $prismic, params, error }) {
const document = await $prismic.api.query(
this.$prismic.predicates.at('my.page.uid', params.uid)
)
if (document) {
return { document }
} else {
error({ statusCode: 404, message: 'Page not found' })
}
}
}
```
</code-block>
</code-group>
## From a Component
```javascript[components/AppHeader.vue]
export default {
async fetch() {
this.headerData = await this.$prismic.api.getSingle('header')
},
data() {
return {
headerData: {}
}
}
}
```
## From Vuex Store
```javascript[store/index.js]
export const state = () => ({
settings: {}
})
export const mutations = {
setSettings(state, settings) {
state.settings = settings
}
}
export const actions = {
async loadSettings({ commit }) {
const settings = await this.$prismic.api.getSingle('site_settings')
commit('setSettings', settings)
}
}
```
| {
"pile_set_name": "Github"
} |
/*
* Media device
*
* Copyright (C) 2010 Nokia Corporation
*
* Contacts: Laurent Pinchart <[email protected]>
* Sakari Ailus <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/types.h>
#include <linux/ioctl.h>
#include <linux/media.h>
#include <linux/export.h>
#include <media/media-device.h>
#include <media/media-devnode.h>
#include <media/media-entity.h>
/* -----------------------------------------------------------------------------
* Userspace API
*/
static int media_device_open(struct file *filp)
{
return 0;
}
static int media_device_close(struct file *filp)
{
return 0;
}
static int media_device_get_info(struct media_device *dev,
struct media_device_info __user *__info)
{
struct media_device_info info;
memset(&info, 0, sizeof(info));
strlcpy(info.driver, dev->dev->driver->name, sizeof(info.driver));
strlcpy(info.model, dev->model, sizeof(info.model));
strlcpy(info.serial, dev->serial, sizeof(info.serial));
strlcpy(info.bus_info, dev->bus_info, sizeof(info.bus_info));
info.media_version = MEDIA_API_VERSION;
info.hw_revision = dev->hw_revision;
info.driver_version = dev->driver_version;
return copy_to_user(__info, &info, sizeof(*__info));
}
static struct media_entity *find_entity(struct media_device *mdev, u32 id)
{
struct media_entity *entity;
int next = id & MEDIA_ENT_ID_FLAG_NEXT;
id &= ~MEDIA_ENT_ID_FLAG_NEXT;
spin_lock(&mdev->lock);
media_device_for_each_entity(entity, mdev) {
if ((entity->id == id && !next) ||
(entity->id > id && next)) {
spin_unlock(&mdev->lock);
return entity;
}
}
spin_unlock(&mdev->lock);
return NULL;
}
static long media_device_enum_entities(struct media_device *mdev,
struct media_entity_desc __user *uent)
{
struct media_entity *ent;
struct media_entity_desc u_ent;
memset(&u_ent, 0, sizeof(u_ent));
if (copy_from_user(&u_ent.id, &uent->id, sizeof(u_ent.id)))
return -EFAULT;
ent = find_entity(mdev, u_ent.id);
if (ent == NULL)
return -EINVAL;
u_ent.id = ent->id;
u_ent.name[0] = '\0';
if (ent->name)
strlcpy(u_ent.name, ent->name, sizeof(u_ent.name));
u_ent.type = ent->type;
u_ent.revision = ent->revision;
u_ent.flags = ent->flags;
u_ent.group_id = ent->group_id;
u_ent.pads = ent->num_pads;
u_ent.links = ent->num_links - ent->num_backlinks;
memcpy(&u_ent.raw, &ent->info, sizeof(ent->info));
if (copy_to_user(uent, &u_ent, sizeof(u_ent)))
return -EFAULT;
return 0;
}
static void media_device_kpad_to_upad(const struct media_pad *kpad,
struct media_pad_desc *upad)
{
upad->entity = kpad->entity->id;
upad->index = kpad->index;
upad->flags = kpad->flags;
}
static long media_device_enum_links(struct media_device *mdev,
struct media_links_enum __user *ulinks)
{
struct media_entity *entity;
struct media_links_enum links;
if (copy_from_user(&links, ulinks, sizeof(links)))
return -EFAULT;
entity = find_entity(mdev, links.entity);
if (entity == NULL)
return -EINVAL;
if (links.pads) {
unsigned int p;
for (p = 0; p < entity->num_pads; p++) {
struct media_pad_desc pad;
memset(&pad, 0, sizeof(pad));
media_device_kpad_to_upad(&entity->pads[p], &pad);
if (copy_to_user(&links.pads[p], &pad, sizeof(pad)))
return -EFAULT;
}
}
if (links.links) {
struct media_link_desc __user *ulink;
unsigned int l;
for (l = 0, ulink = links.links; l < entity->num_links; l++) {
struct media_link_desc link;
/* Ignore backlinks. */
if (entity->links[l].source->entity != entity)
continue;
memset(&link, 0, sizeof(link));
media_device_kpad_to_upad(entity->links[l].source,
&link.source);
media_device_kpad_to_upad(entity->links[l].sink,
&link.sink);
link.flags = entity->links[l].flags;
if (copy_to_user(ulink, &link, sizeof(*ulink)))
return -EFAULT;
ulink++;
}
}
if (copy_to_user(ulinks, &links, sizeof(*ulinks)))
return -EFAULT;
return 0;
}
static long media_device_setup_link(struct media_device *mdev,
struct media_link_desc __user *_ulink)
{
struct media_link *link = NULL;
struct media_link_desc ulink;
struct media_entity *source;
struct media_entity *sink;
int ret;
if (copy_from_user(&ulink, _ulink, sizeof(ulink)))
return -EFAULT;
/* Find the source and sink entities and link.
*/
source = find_entity(mdev, ulink.source.entity);
sink = find_entity(mdev, ulink.sink.entity);
if (source == NULL || sink == NULL)
return -EINVAL;
if (ulink.source.index >= source->num_pads ||
ulink.sink.index >= sink->num_pads)
return -EINVAL;
link = media_entity_find_link(&source->pads[ulink.source.index],
&sink->pads[ulink.sink.index]);
if (link == NULL)
return -EINVAL;
/* Setup the link on both entities. */
ret = __media_entity_setup_link(link, ulink.flags);
if (copy_to_user(_ulink, &ulink, sizeof(ulink)))
return -EFAULT;
return ret;
}
static long media_device_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
struct media_devnode *devnode = media_devnode_data(filp);
struct media_device *dev = to_media_device(devnode);
long ret;
switch (cmd) {
case MEDIA_IOC_DEVICE_INFO:
ret = media_device_get_info(dev,
(struct media_device_info __user *)arg);
break;
case MEDIA_IOC_ENUM_ENTITIES:
ret = media_device_enum_entities(dev,
(struct media_entity_desc __user *)arg);
break;
case MEDIA_IOC_ENUM_LINKS:
mutex_lock(&dev->graph_mutex);
ret = media_device_enum_links(dev,
(struct media_links_enum __user *)arg);
mutex_unlock(&dev->graph_mutex);
break;
case MEDIA_IOC_SETUP_LINK:
mutex_lock(&dev->graph_mutex);
ret = media_device_setup_link(dev,
(struct media_link_desc __user *)arg);
mutex_unlock(&dev->graph_mutex);
break;
default:
ret = -ENOIOCTLCMD;
}
return ret;
}
static const struct media_file_operations media_device_fops = {
.owner = THIS_MODULE,
.open = media_device_open,
.ioctl = media_device_ioctl,
.release = media_device_close,
};
/* -----------------------------------------------------------------------------
* sysfs
*/
static ssize_t show_model(struct device *cd,
struct device_attribute *attr, char *buf)
{
struct media_device *mdev = to_media_device(to_media_devnode(cd));
return sprintf(buf, "%.*s\n", (int)sizeof(mdev->model), mdev->model);
}
static DEVICE_ATTR(model, S_IRUGO, show_model, NULL);
/* -----------------------------------------------------------------------------
* Registration/unregistration
*/
static void media_device_release(struct media_devnode *mdev)
{
}
/**
* media_device_register - register a media device
* @mdev: The media device
*
* The caller is responsible for initializing the media device before
* registration. The following fields must be set:
*
* - dev must point to the parent device
* - model must be filled with the device model name
*/
int __must_check media_device_register(struct media_device *mdev)
{
int ret;
if (WARN_ON(mdev->dev == NULL || mdev->model[0] == 0))
return -EINVAL;
mdev->entity_id = 1;
INIT_LIST_HEAD(&mdev->entities);
spin_lock_init(&mdev->lock);
mutex_init(&mdev->graph_mutex);
/* Register the device node. */
mdev->devnode.fops = &media_device_fops;
mdev->devnode.parent = mdev->dev;
mdev->devnode.release = media_device_release;
ret = media_devnode_register(&mdev->devnode);
if (ret < 0)
return ret;
ret = device_create_file(&mdev->devnode.dev, &dev_attr_model);
if (ret < 0) {
media_devnode_unregister(&mdev->devnode);
return ret;
}
return 0;
}
EXPORT_SYMBOL_GPL(media_device_register);
/**
* media_device_unregister - unregister a media device
* @mdev: The media device
*
*/
void media_device_unregister(struct media_device *mdev)
{
struct media_entity *entity;
struct media_entity *next;
list_for_each_entry_safe(entity, next, &mdev->entities, list)
media_device_unregister_entity(entity);
device_remove_file(&mdev->devnode.dev, &dev_attr_model);
media_devnode_unregister(&mdev->devnode);
}
EXPORT_SYMBOL_GPL(media_device_unregister);
/**
* media_device_register_entity - Register an entity with a media device
* @mdev: The media device
* @entity: The entity
*/
int __must_check media_device_register_entity(struct media_device *mdev,
struct media_entity *entity)
{
/* Warn if we apparently re-register an entity */
WARN_ON(entity->parent != NULL);
entity->parent = mdev;
spin_lock(&mdev->lock);
if (entity->id == 0)
entity->id = mdev->entity_id++;
else
mdev->entity_id = max(entity->id + 1, mdev->entity_id);
list_add_tail(&entity->list, &mdev->entities);
spin_unlock(&mdev->lock);
return 0;
}
EXPORT_SYMBOL_GPL(media_device_register_entity);
/**
* media_device_unregister_entity - Unregister an entity
* @entity: The entity
*
* If the entity has never been registered this function will return
* immediately.
*/
void media_device_unregister_entity(struct media_entity *entity)
{
struct media_device *mdev = entity->parent;
if (mdev == NULL)
return;
spin_lock(&mdev->lock);
list_del(&entity->list);
spin_unlock(&mdev->lock);
entity->parent = NULL;
}
EXPORT_SYMBOL_GPL(media_device_unregister_entity);
| {
"pile_set_name": "Github"
} |
fileFormatVersion: 2
guid: a8a855bdec3a32f468b96ee8f27c52c4
TextScriptImporter:
externalObjects: {}
userData:
assetBundleName:
assetBundleVariant:
| {
"pile_set_name": "Github"
} |
// Protocol Buffers - Google's data interchange format
// Copyright 2008 Google Inc. All rights reserved.
// https://developers.google.com/protocol-buffers/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Author: [email protected] (Kenton Varda)
// Based on original Protocol Buffers design by
// Sanjay Ghemawat, Jeff Dean, and others.
//
// Defines the abstract interface implemented by each of the language-specific
// code generators.
#ifndef GOOGLE_PROTOBUF_COMPILER_CODE_GENERATOR_H__
#define GOOGLE_PROTOBUF_COMPILER_CODE_GENERATOR_H__
#include <google/protobuf/stubs/common.h>
#include <string>
#include <vector>
#include <utility>
namespace google {
namespace protobuf {
namespace io { class ZeroCopyOutputStream; }
class FileDescriptor;
namespace compiler {
class AccessInfoMap;
class Version;
// Defined in this file.
class CodeGenerator;
class GeneratorContext;
// The abstract interface to a class which generates code implementing a
// particular proto file in a particular language. A number of these may
// be registered with CommandLineInterface to support various languages.
class LIBPROTOC_EXPORT CodeGenerator {
public:
inline CodeGenerator() {}
virtual ~CodeGenerator();
// Generates code for the given proto file, generating one or more files in
// the given output directory.
//
// A parameter to be passed to the generator can be specified on the command
// line. This is intended to be used to pass generator specific parameters.
// It is empty if no parameter was given. ParseGeneratorParameter (below),
// can be used to accept multiple parameters within the single parameter
// command line flag.
//
// Returns true if successful. Otherwise, sets *error to a description of
// the problem (e.g. "invalid parameter") and returns false.
virtual bool Generate(const FileDescriptor* file,
const string& parameter,
GeneratorContext* generator_context,
string* error) const = 0;
// Generates code for all given proto files.
//
// WARNING: The canonical code generator design produces one or two output
// files per input .proto file, and we do not wish to encourage alternate
// designs.
//
// A parameter is given as passed on the command line, as in |Generate()|
// above.
//
// Returns true if successful. Otherwise, sets *error to a description of
// the problem (e.g. "invalid parameter") and returns false.
virtual bool GenerateAll(const std::vector<const FileDescriptor*>& files,
const string& parameter,
GeneratorContext* generator_context,
string* error) const;
// This is no longer used, but this class is part of the opensource protobuf
// library, so it has to remain to keep vtables the same for the current
// version of the library. When protobufs does a api breaking change, the
// method can be removed.
virtual bool HasGenerateAll() const { return true; }
private:
GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(CodeGenerator);
};
// CodeGenerators generate one or more files in a given directory. This
// abstract interface represents the directory to which the CodeGenerator is
// to write and other information about the context in which the Generator
// runs.
class LIBPROTOC_EXPORT GeneratorContext {
public:
inline GeneratorContext() {
}
virtual ~GeneratorContext();
// Opens the given file, truncating it if it exists, and returns a
// ZeroCopyOutputStream that writes to the file. The caller takes ownership
// of the returned object. This method never fails (a dummy stream will be
// returned instead).
//
// The filename given should be relative to the root of the source tree.
// E.g. the C++ generator, when generating code for "foo/bar.proto", will
// generate the files "foo/bar.pb.h" and "foo/bar.pb.cc"; note that
// "foo/" is included in these filenames. The filename is not allowed to
// contain "." or ".." components.
virtual io::ZeroCopyOutputStream* Open(const string& filename) = 0;
// Similar to Open() but the output will be appended to the file if exists
virtual io::ZeroCopyOutputStream* OpenForAppend(const string& filename);
// Creates a ZeroCopyOutputStream which will insert code into the given file
// at the given insertion point. See plugin.proto (plugin.pb.h) for more
// information on insertion points. The default implementation
// assert-fails -- it exists only for backwards-compatibility.
//
// WARNING: This feature is currently EXPERIMENTAL and is subject to change.
virtual io::ZeroCopyOutputStream* OpenForInsert(
const string& filename, const string& insertion_point);
// Returns a vector of FileDescriptors for all the files being compiled
// in this run. Useful for languages, such as Go, that treat files
// differently when compiled as a set rather than individually.
virtual void ListParsedFiles(std::vector<const FileDescriptor*>* output);
// Retrieves the version number of the protocol compiler associated with
// this GeneratorContext.
virtual void GetCompilerVersion(Version* version) const;
private:
GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(GeneratorContext);
};
// The type GeneratorContext was once called OutputDirectory. This typedef
// provides backward compatibility.
typedef GeneratorContext OutputDirectory;
// Several code generators treat the parameter argument as holding a
// list of options separated by commas. This helper function parses
// a set of comma-delimited name/value pairs: e.g.,
// "foo=bar,baz,qux=corge"
// parses to the pairs:
// ("foo", "bar"), ("baz", ""), ("qux", "corge")
LIBPROTOC_EXPORT void ParseGeneratorParameter(
const string&, std::vector<std::pair<string, string> >*);
} // namespace compiler
} // namespace protobuf
} // namespace google
#endif // GOOGLE_PROTOBUF_COMPILER_CODE_GENERATOR_H__
| {
"pile_set_name": "Github"
} |
/**
* Default model configuration
* (sails.config.models)
*
* Unless you override them, the following properties will be included
* in each of your models.
*/
module.exports.models = {
// Your app's default connection.
// i.e. the name of one of your app's connections (see `config/connections.js`)
//
// (defaults to localDiskDb)
connection: process.env.SAILS_ADAPTER_NAME || 'postgresql'
};
| {
"pile_set_name": "Github"
} |
package au.com.codeka.warworlds.planetrender
class TemplateException : Exception {
constructor() {}
constructor(cause: Throwable?) : super(cause) {}
constructor(message: String?) : super(message) {}
constructor(message: String?, cause: Throwable?) : super(message, cause) {}
companion object {
private const val serialVersionUID = 1L
}
} | {
"pile_set_name": "Github"
} |
package com.central.oauth.service;
import com.central.common.model.PageResult;
import com.central.oauth.model.TokenVo;
import java.util.Map;
/**
* @author zlt
*/
public interface ITokensService {
/**
* 查询token列表
* @param params 请求参数
* @param clientId 应用id
*/
PageResult<TokenVo> listTokens(Map<String, Object> params, String clientId);
}
| {
"pile_set_name": "Github"
} |
agents:
- goal: [28, 16]
name: agent0
start: [23, 15]
- goal: [21, 19]
name: agent1
start: [24, 1]
- goal: [31, 22]
name: agent2
start: [22, 22]
- goal: [5, 27]
name: agent3
start: [16, 30]
- goal: [3, 30]
name: agent4
start: [3, 27]
- goal: [18, 24]
name: agent5
start: [24, 10]
- goal: [27, 25]
name: agent6
start: [9, 8]
- goal: [10, 1]
name: agent7
start: [28, 5]
- goal: [19, 4]
name: agent8
start: [26, 26]
- goal: [6, 28]
name: agent9
start: [0, 4]
- goal: [19, 15]
name: agent10
start: [6, 10]
- goal: [27, 17]
name: agent11
start: [9, 10]
- goal: [22, 2]
name: agent12
start: [16, 25]
- goal: [16, 7]
name: agent13
start: [16, 7]
- goal: [3, 23]
name: agent14
start: [1, 9]
- goal: [22, 12]
name: agent15
start: [5, 6]
- goal: [7, 17]
name: agent16
start: [1, 4]
- goal: [0, 12]
name: agent17
start: [27, 9]
- goal: [15, 2]
name: agent18
start: [17, 17]
- goal: [21, 28]
name: agent19
start: [26, 3]
- goal: [28, 4]
name: agent20
start: [2, 22]
- goal: [2, 13]
name: agent21
start: [18, 7]
- goal: [2, 27]
name: agent22
start: [21, 1]
- goal: [9, 6]
name: agent23
start: [5, 12]
- goal: [18, 23]
name: agent24
start: [10, 18]
- goal: [10, 19]
name: agent25
start: [28, 13]
- goal: [22, 24]
name: agent26
start: [9, 22]
- goal: [22, 17]
name: agent27
start: [25, 10]
- goal: [31, 0]
name: agent28
start: [23, 5]
- goal: [20, 29]
name: agent29
start: [23, 27]
- goal: [1, 22]
name: agent30
start: [31, 22]
- goal: [21, 10]
name: agent31
start: [3, 22]
- goal: [23, 20]
name: agent32
start: [30, 8]
- goal: [4, 13]
name: agent33
start: [15, 1]
- goal: [14, 8]
name: agent34
start: [5, 2]
- goal: [14, 30]
name: agent35
start: [10, 13]
- goal: [3, 28]
name: agent36
start: [7, 15]
- goal: [13, 7]
name: agent37
start: [22, 8]
- goal: [13, 29]
name: agent38
start: [22, 14]
- goal: [25, 21]
name: agent39
start: [29, 7]
- goal: [18, 1]
name: agent40
start: [16, 0]
- goal: [26, 30]
name: agent41
start: [8, 31]
- goal: [19, 13]
name: agent42
start: [3, 15]
- goal: [7, 9]
name: agent43
start: [29, 29]
- goal: [29, 27]
name: agent44
start: [8, 18]
- goal: [19, 2]
name: agent45
start: [5, 28]
- goal: [5, 29]
name: agent46
start: [15, 17]
- goal: [23, 7]
name: agent47
start: [0, 11]
- goal: [29, 30]
name: agent48
start: [13, 20]
- goal: [25, 7]
name: agent49
start: [8, 25]
- goal: [12, 6]
name: agent50
start: [31, 14]
- goal: [7, 24]
name: agent51
start: [27, 15]
- goal: [17, 30]
name: agent52
start: [12, 3]
- goal: [27, 27]
name: agent53
start: [29, 28]
- goal: [5, 3]
name: agent54
start: [14, 0]
- goal: [13, 16]
name: agent55
start: [29, 5]
- goal: [24, 25]
name: agent56
start: [18, 16]
- goal: [11, 30]
name: agent57
start: [19, 12]
- goal: [16, 16]
name: agent58
start: [30, 15]
- goal: [22, 23]
name: agent59
start: [21, 5]
- goal: [14, 23]
name: agent60
start: [1, 1]
- goal: [27, 14]
name: agent61
start: [4, 28]
- goal: [14, 25]
name: agent62
start: [11, 7]
- goal: [18, 13]
name: agent63
start: [13, 31]
- goal: [3, 27]
name: agent64
start: [14, 17]
- goal: [26, 12]
name: agent65
start: [3, 11]
- goal: [28, 6]
name: agent66
start: [29, 12]
- goal: [23, 26]
name: agent67
start: [18, 12]
- goal: [3, 8]
name: agent68
start: [13, 30]
- goal: [28, 27]
name: agent69
start: [4, 30]
- goal: [24, 30]
name: agent70
start: [23, 8]
- goal: [26, 28]
name: agent71
start: [10, 12]
- goal: [8, 9]
name: agent72
start: [31, 15]
- goal: [1, 0]
name: agent73
start: [11, 3]
- goal: [4, 1]
name: agent74
start: [29, 2]
- goal: [22, 30]
name: agent75
start: [23, 10]
- goal: [25, 15]
name: agent76
start: [1, 20]
- goal: [9, 16]
name: agent77
start: [25, 2]
- goal: [23, 22]
name: agent78
start: [15, 24]
- goal: [8, 18]
name: agent79
start: [12, 21]
- goal: [0, 24]
name: agent80
start: [24, 19]
- goal: [24, 23]
name: agent81
start: [30, 24]
- goal: [26, 7]
name: agent82
start: [18, 3]
- goal: [17, 29]
name: agent83
start: [27, 28]
- goal: [20, 17]
name: agent84
start: [27, 10]
- goal: [25, 1]
name: agent85
start: [17, 26]
- goal: [24, 18]
name: agent86
start: [18, 24]
- goal: [29, 11]
name: agent87
start: [29, 15]
- goal: [19, 14]
name: agent88
start: [11, 29]
- goal: [18, 11]
name: agent89
start: [11, 26]
- goal: [15, 31]
name: agent90
start: [19, 10]
- goal: [4, 7]
name: agent91
start: [10, 3]
- goal: [8, 5]
name: agent92
start: [9, 19]
- goal: [11, 18]
name: agent93
start: [9, 6]
- goal: [22, 15]
name: agent94
start: [19, 6]
- goal: [21, 2]
name: agent95
start: [26, 15]
- goal: [28, 31]
name: agent96
start: [21, 3]
- goal: [5, 26]
name: agent97
start: [14, 10]
- goal: [12, 19]
name: agent98
start: [6, 30]
- goal: [20, 25]
name: agent99
start: [17, 29]
map:
dimensions: [32, 32]
obstacles:
- [27, 1]
- [15, 11]
- [23, 16]
- [26, 18]
- [26, 31]
- [28, 9]
- [14, 19]
- [7, 20]
- [4, 22]
- [6, 5]
- [8, 27]
- [6, 0]
- [26, 17]
- [21, 7]
- [3, 31]
- [22, 10]
- [1, 23]
- [17, 31]
- [9, 24]
- [10, 22]
- [10, 9]
- [30, 4]
- [1, 28]
- [3, 9]
- [20, 18]
- [14, 2]
- [8, 20]
- [27, 20]
- [20, 14]
- [1, 7]
- [3, 26]
- [6, 9]
- [11, 11]
- [5, 8]
- [17, 12]
- [6, 18]
- [5, 19]
- [6, 20]
- [4, 4]
- [1, 17]
- [2, 9]
- [17, 0]
- [16, 20]
- [3, 16]
- [19, 17]
- [5, 31]
- [21, 9]
- [24, 21]
- [16, 15]
- [11, 14]
- [7, 22]
- [7, 23]
- [4, 21]
- [25, 22]
- [9, 26]
- [20, 16]
- [27, 22]
- [13, 18]
- [7, 2]
- [21, 27]
- [25, 25]
- [15, 14]
- [0, 0]
- [12, 25]
- [31, 4]
- [10, 6]
- [23, 21]
- [19, 28]
- [2, 25]
- [1, 16]
- [0, 3]
- [12, 31]
- [10, 24]
- [21, 13]
- [1, 24]
- [19, 27]
- [15, 13]
- [18, 4]
- [8, 29]
- [15, 10]
- [21, 25]
- [24, 11]
- [24, 16]
- [12, 8]
- [0, 30]
- [18, 28]
- [13, 5]
- [18, 10]
- [21, 26]
- [13, 13]
- [15, 19]
- [8, 13]
- [7, 5]
- [27, 21]
- [18, 0]
- [20, 7]
- [31, 31]
- [30, 9]
- [27, 11]
- [18, 6]
- [21, 23]
- [13, 3]
- [16, 5]
- [14, 29]
- [11, 4]
- [15, 9]
- [12, 1]
- [16, 26]
- [24, 9]
- [19, 16]
- [29, 22]
- [27, 8]
- [31, 13]
- [15, 16]
- [12, 5]
- [15, 30]
- [28, 17]
- [12, 28]
- [23, 1]
- [11, 25]
- [1, 13]
- [26, 16]
- [25, 30]
- [7, 25]
- [5, 4]
- [10, 21]
- [31, 29]
- [15, 20]
- [30, 13]
- [5, 22]
- [17, 8]
- [3, 10]
- [28, 11]
- [2, 0]
- [27, 16]
- [26, 2]
- [15, 22]
- [24, 15]
- [19, 11]
- [18, 22]
- [0, 21]
- [7, 1]
- [12, 17]
- [9, 4]
- [26, 11]
- [20, 3]
- [9, 27]
- [19, 1]
- [13, 11]
- [12, 29]
- [29, 9]
- [14, 24]
- [0, 29]
- [17, 7]
- [13, 24]
- [24, 4]
- [29, 13]
- [6, 6]
- [10, 27]
- [15, 8]
- [30, 26]
- [28, 21]
- [26, 8]
- [17, 15]
- [5, 0]
- [4, 15]
- [14, 15]
- [6, 27]
- [17, 20]
- [19, 19]
- [28, 1]
- [16, 28]
- [29, 1]
- [3, 25]
- [24, 20]
- [24, 27]
- [9, 28]
- [15, 18]
- [22, 31]
- [8, 6]
- [7, 27]
- [5, 10]
- [21, 30]
- [9, 25]
- [30, 21]
- [12, 7]
- [1, 8]
- [10, 2]
- [23, 28]
- [29, 0]
- [13, 27]
- [0, 15]
- [13, 28]
- [9, 21]
- [2, 6]
- [29, 18]
- [17, 6]
- [25, 12]
- [10, 29]
- [29, 4]
- [17, 21]
- [24, 22]
- [9, 1]
- [5, 15]
| {
"pile_set_name": "Github"
} |
// FB Alpha PK Scramble driver module
// Based on MAME driver by David Haywood and Pierpaolo Prazzoli
#include "tiles_generic.h"
#include "m68000_intf.h"
#include "burn_ym2203.h"
static UINT8 *AllMem;
static UINT8 *RamEnd;
static UINT8 *AllRam;
static UINT8 *MemEnd;
static UINT8 *Drv68KROM;
static UINT8 *DrvGfxROM;
static UINT8 *DrvNVRAM;
static UINT8 *Drv68KRAM;
static UINT8 *DrvFgRAM;
static UINT8 *DrvMgRAM;
static UINT8 *DrvBgRAM;
static UINT8 *DrvPalRAM;
static UINT32 *DrvPalette;
static UINT8 DrvRecalc;
static UINT16 irq_enable;
static INT32 irq_line_active;
static UINT8 DrvJoy1[8];
static UINT8 DrvDips[3];
static UINT16 DrvInputs[1];
static UINT8 DrvReset;
static struct BurnInputInfo PkscrambleInputList[] = {
{"P1 Coin", BIT_DIGITAL, DrvJoy1 + 4, "p1 coin" },
{"P1 Button 1", BIT_DIGITAL, DrvJoy1 + 0, "p1 fire 1" },
{"P1 Button 2", BIT_DIGITAL, DrvJoy1 + 1, "p1 fire 2" },
{"P1 Button 3", BIT_DIGITAL, DrvJoy1 + 2, "p1 fire 3" },
{"P1 Button 4", BIT_DIGITAL, DrvJoy1 + 3, "p1 fire 4" },
{"Reset", BIT_DIGITAL, &DrvReset, "reset" },
{"Service", BIT_DIGITAL, DrvJoy1 + 7, "service" },
{"Dip A", BIT_DIPSWITCH, DrvDips + 0, "dip" },
{"Dip B", BIT_DIPSWITCH, DrvDips + 1, "dip" },
{"Dip C", BIT_DIPSWITCH, DrvDips + 2, "dip" },
};
STDINPUTINFO(Pkscramble)
static struct BurnDIPInfo PkscrambleDIPList[]=
{
{0x07, 0xff, 0xff, 0xfb, NULL },
{0x08, 0xff, 0xff, 0x49, NULL },
{0x09, 0xff, 0xff, 0x00, NULL },
{0 , 0xfe, 0 , 8, "Level" },
{0x07, 0x01, 0x07, 0x00, "0" },
{0x07, 0x01, 0x07, 0x01, "1" },
{0x07, 0x01, 0x07, 0x02, "2" },
{0x07, 0x01, 0x07, 0x03, "3" },
{0x07, 0x01, 0x07, 0x04, "4" },
{0x07, 0x01, 0x07, 0x05, "5" },
{0x07, 0x01, 0x07, 0x06, "6" },
{0x07, 0x01, 0x07, 0x07, "7" },
{0 , 0xfe, 0 , 8, "Coin to Start" },
{0x08, 0x01, 0x07, 0x01, "1" },
{0x08, 0x01, 0x07, 0x02, "2" },
{0x08, 0x01, 0x07, 0x03, "3" },
{0x08, 0x01, 0x07, 0x04, "4" },
{0x08, 0x01, 0x07, 0x05, "5" },
{0x08, 0x01, 0x07, 0x06, "6" },
{0x08, 0x01, 0x07, 0x07, "7" },
{0x08, 0x01, 0x07, 0x00, "Free Play" },
{0 , 0xfe, 0 , 8, "Coinage" },
{0x08, 0x01, 0x38, 0x08, "1 Coin 1 Credits" },
{0x08, 0x01, 0x38, 0x10, "1 Coin 2 Credits" },
{0x08, 0x01, 0x38, 0x18, "1 Coin 3 Credits" },
{0x08, 0x01, 0x38, 0x20, "1 Coin 4 Credits" },
{0x08, 0x01, 0x38, 0x28, "1 Coin 5 Credits" },
{0x08, 0x01, 0x38, 0x30, "1 Coin 6 Credits" },
{0x08, 0x01, 0x38, 0x38, "1 Coin 7 Credits" },
{0x08, 0x01, 0x38, 0x00, "No Credit" },
{0 , 0xfe, 0 , 2, "Coin Test" },
{0x08, 0x01, 0x40, 0x00, "Off" },
{0x08, 0x01, 0x40, 0x40, "On" },
};
STDDIPINFO(Pkscramble)
static void __fastcall pkscramble_write_word(UINT32 address, UINT16 data)
{
switch (address & 0x7fffe)
{
case 0x49008:
irq_enable = data;
if ((data & 0x2000) == 0 && irq_line_active) {
SekSetIRQLine(1, CPU_IRQSTATUS_NONE);
irq_line_active = 0;
}
return;
case 0x4900c:
case 0x4900e:
BurnYM2203Write(0, (address / 2) & 1, data);
return;
case 0x49010:
case 0x49014:
case 0x49018:
case 0x49020:
case 0x52086:
// nop
return;
}
}
static void __fastcall pkscramble_write_byte(UINT32 address, UINT8 data)
{
bprintf (0, _T("WB: %5.5x, %2.2x\n"), address, data);
}
static UINT16 __fastcall pkscramble_read_word(UINT32 address)
{
switch (address & 0x7ffff)
{
case 0x49000:
case 0x49001:
return (DrvDips[1] * 256) + DrvDips[0];
case 0x49004:
case 0x49005:
return (DrvInputs[0] & ~0x60) + 0x20 + (DrvDips[2] & 0x40);
case 0x4900c:
case 0x4900d:
case 0x4900e:
case 0x4900f:
return BurnYM2203Read(0, (address / 2) & 1);
}
return 0;
}
static UINT8 __fastcall pkscramble_read_byte(UINT32 address)
{
switch (address & 0x7ffff)
{
case 0x49000:
return DrvDips[1];
case 0x49001:
return DrvDips[0];
case 0x49004:
return 0;
case 0x49005:
return (DrvInputs[0] & ~0x60) + 0x20 + (DrvDips[2] & 0x40);
case 0x4900c:
case 0x4900d:
case 0x4900e:
case 0x4900f:
return BurnYM2203Read(0, (address / 2) & 1);
}
return 0;
}
static tilemap_callback( bg )
{
UINT16 *ram = (UINT16*)DrvBgRAM;
UINT16 code = ram[offs * 2 + 0];
UINT16 color = ram[offs * 2 + 1] & 0x7f;
TILE_SET_INFO(0, code, color, 0);
}
static tilemap_callback( mg )
{
UINT16 *ram = (UINT16*)DrvMgRAM;
UINT16 code = ram[offs * 2 + 0];
UINT16 color = ram[offs * 2 + 1] & 0x7f;
TILE_SET_INFO(0, code, color, 0);
}
static tilemap_callback( fg )
{
UINT16 *ram = (UINT16*)DrvFgRAM;
UINT16 code = ram[offs * 2 + 0];
UINT16 color = ram[offs * 2 + 1] & 0x7f;
TILE_SET_INFO(0, code, color, 0);
}
static void DrvIRQHandler(INT32, INT32 nStatus)
{
if (irq_enable & 0x10)
{
SekSetIRQLine(2, (nStatus) ? CPU_IRQSTATUS_ACK : CPU_IRQSTATUS_NONE);
}
}
static INT32 DrvDoReset()
{
memset (AllRam, 0, RamEnd - AllRam);
SekOpen(0);
SekReset();
BurnYM2203Reset();
SekClose();
irq_enable = 0;
irq_line_active = 0;
return 0;
}
static INT32 MemIndex()
{
UINT8 *Next; Next = AllMem;
Drv68KROM = Next; Next += 0x020000;
DrvGfxROM = Next; Next += 0x080000;
DrvPalette = (UINT32*)Next; Next += 0x0800 * sizeof(UINT32);
DrvNVRAM = Next; Next += 0x000100;
AllRam = Next;
Drv68KRAM = Next; Next += 0x003000;
DrvFgRAM = Next; Next += 0x001000;
DrvMgRAM = Next; Next += 0x001000;
DrvBgRAM = Next; Next += 0x002000;
DrvPalRAM = Next; Next += 0x001000;
RamEnd = Next;
MemEnd = Next;
return 0;
}
static void DrvGfxExpand()
{
for (INT32 i = 0x40000; i>=0; i--)
{
DrvGfxROM[i*2+1] = DrvGfxROM[i] >> 4;
DrvGfxROM[i*2+0] = DrvGfxROM[i] & 0xf;
}
}
static INT32 DrvInit()
{
AllMem = NULL;
MemIndex();
INT32 nLen = MemEnd - (UINT8 *)0;
if ((AllMem = (UINT8 *)BurnMalloc(nLen)) == NULL) return 1;
memset(AllMem, 0, nLen);
MemIndex();
{
if (BurnLoadRom(Drv68KROM + 0x000001, 0, 2)) return 1;
if (BurnLoadRom(Drv68KROM + 0x000000, 1, 2)) return 1;
if (BurnLoadRom(DrvGfxROM + 0x000001, 2, 2)) return 1;
if (BurnLoadRom(DrvGfxROM + 0x000000, 3, 2)) return 1;
DrvGfxExpand();
}
SekInit(0, 0x68000);
SekOpen(0);
SekMapMemory(Drv68KROM, 0x000000, 0x01ffff, MAP_ROM);
SekMapMemory(DrvNVRAM, 0x040000, 0x0403ff, MAP_RAM); // 0-ff
SekMapMemory(Drv68KRAM, 0x041000, 0x043fff, MAP_RAM);
SekMapMemory(DrvFgRAM, 0x044000, 0x044fff, MAP_RAM);
SekMapMemory(DrvMgRAM, 0x045000, 0x045fff, MAP_RAM);
SekMapMemory(DrvBgRAM, 0x046000, 0x047fff, MAP_RAM);
SekMapMemory(DrvPalRAM, 0x048000, 0x048fff, MAP_RAM);
SekSetWriteWordHandler(0, pkscramble_write_word);
SekSetWriteByteHandler(0, pkscramble_write_byte);
SekSetReadWordHandler(0, pkscramble_read_word);
SekSetReadByteHandler(0, pkscramble_read_byte);
SekClose();
BurnYM2203Init(1, 3000000, &DrvIRQHandler, 0);
BurnTimerAttachSek(8000000);
BurnYM2203SetRoute(0, BURN_SND_YM2203_YM2203_ROUTE, 0.50, BURN_SND_ROUTE_BOTH);
BurnYM2203SetRoute(0, BURN_SND_YM2203_AY8910_ROUTE_1, 0.25, BURN_SND_ROUTE_BOTH);
BurnYM2203SetRoute(0, BURN_SND_YM2203_AY8910_ROUTE_2, 0.25, BURN_SND_ROUTE_BOTH);
BurnYM2203SetRoute(0, BURN_SND_YM2203_AY8910_ROUTE_3, 0.25, BURN_SND_ROUTE_BOTH);
GenericTilesInit();
GenericTilemapInit(0, TILEMAP_SCAN_ROWS, bg_map_callback, 8, 8, 32, 32);
GenericTilemapInit(1, TILEMAP_SCAN_ROWS, mg_map_callback, 8, 8, 32, 32);
GenericTilemapInit(2, TILEMAP_SCAN_ROWS, fg_map_callback, 8, 8, 32, 32);
GenericTilemapSetGfx(0, DrvGfxROM, 4, 8, 8, 0x080000, 0, 0x7f);
GenericTilemapSetTransparent(1, 15);
GenericTilemapSetTransparent(2, 15);
DrvDoReset();
return 0;
}
static INT32 DrvExit()
{
GenericTilesExit();
SekExit();
BurnYM2203Exit();
BurnFree(AllMem);
return 0;
}
static void DrvPaletteUpdate()
{
UINT16 *p = (UINT16*)DrvPalRAM;
for (INT32 i = 0; i < 0x1000/2; i++)
{
UINT8 r = (p[i] >> 10) & 0x1f;
UINT8 g = (p[i] >> 5) & 0x1f;
UINT8 b = (p[i] >> 0) & 0x1f;
r = (r << 3) | (r >> 2);
g = (g << 3) | (g >> 2);
b = (b << 3) | (b >> 2);
DrvPalette[i] = BurnHighCol(r,g,b,0);
}
}
static INT32 DrvDraw()
{
DrvPaletteUpdate();
GenericTilemapDraw(0, pTransDraw, 0);
GenericTilemapDraw(1, pTransDraw, 0);
GenericTilemapDraw(2, pTransDraw, 0);
BurnTransferCopy(DrvPalette);
return 0;
}
static INT32 DrvFrame()
{
if (DrvReset) {
DrvDoReset();
}
SekNewFrame();
{
DrvInputs[0] = 0;
for (INT32 i = 0; i < 8; i++) {
DrvInputs[0] ^= (DrvJoy1[i] & 1) << i;
}
}
INT32 nInterleave = 256;
INT32 nCyclesTotal[1] = { 8000000 / 60 };
SekOpen(0);
for (INT32 i = 0; i < nInterleave; i++) {
BurnTimerUpdate((nCyclesTotal[0] * (i + 1)) / nInterleave);
if (i == 192) {
if (irq_enable & 0x2000) {
SekSetIRQLine(1, CPU_IRQSTATUS_ACK);
irq_line_active = 1;
}
}
else if (i == 193)
{
if (irq_line_active) {
SekSetIRQLine(1, CPU_IRQSTATUS_NONE);
irq_line_active = 0;
}
}
}
BurnTimerEndFrame(nCyclesTotal[0]);
if (pBurnSoundOut) {
BurnYM2203Update(pBurnSoundOut, nBurnSoundLen);
}
SekClose();
if (pBurnDraw) {
DrvDraw();
}
return 0;
}
static INT32 DrvScan(INT32 nAction, INT32 *pnMin)
{
struct BurnArea ba;
if (pnMin) {
*pnMin = 0x029707;
}
if (nAction & ACB_VOLATILE)
{
memset(&ba, 0, sizeof(ba));
ba.Data = AllRam;
ba.nLen = RamEnd - AllRam;
ba.szName = "All Ram";
BurnAcb(&ba);
SekScan(nAction);
BurnYM2203Scan(nAction, pnMin);
SCAN_VAR(irq_line_active);
SCAN_VAR(irq_enable);
}
if (nAction & ACB_NVRAM)
{
memset(&ba, 0, sizeof(ba));
ba.Data = DrvNVRAM;
ba.nLen = 0x100;
ba.szName = "NV Ram";
BurnAcb(&ba);
}
return 0;
}
// PK Scramble
static struct BurnRomInfo pkscramRomDesc[] = {
{ "pk1.6e", 0x10000, 0x80e972e5, 1 | BRF_PRG | BRF_ESS }, // 0 68k Code
{ "pk2.6j", 0x10000, 0x752c86d1, 1 | BRF_PRG | BRF_ESS }, // 1
{ "pk3.1c", 0x20000, 0x0b18f2bc, 2 | BRF_GRA }, // 2 Graphics Tiles
{ "pk4.1e", 0x20000, 0xa232d993, 2 | BRF_GRA }, // 3
};
STD_ROM_PICK(pkscram)
STD_ROM_FN(pkscram)
struct BurnDriver BurnDrvPkscram = {
"pkscram", NULL, NULL, NULL, "1993",
"PK Scramble\0", NULL, "Cosmo Electronics Corporation", "Miscellaneous",
NULL, NULL, NULL, NULL,
BDF_GAME_WORKING, 2, HARDWARE_MISC_POST90S, GBF_SPORTSMISC, 0,
NULL, pkscramRomInfo, pkscramRomName, NULL, NULL, NULL, NULL, PkscrambleInputInfo, PkscrambleDIPInfo,
DrvInit, DrvExit, DrvFrame, DrvDraw, DrvScan, &DrvRecalc, 0x800,
256, 192, 4, 3
};
| {
"pile_set_name": "Github"
} |
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>wiki bullet examples</title>
<script src="../../build/deps.js"></script>
<script src="../../deps/seajs/sea.js"></script>
<style type="text/css">
#horizonal {
width: 400px;
margin: 5px;
float: left;
border: solid 1px;
}
.horizonal {
float: left;
width: 400px;
border: solid 1px;
margin: 2px;
}
#vertical {
height: 300px;
margin: 5px;
float: left;
border: solid 1px;
}
.vertical {
float: left;
height: 300px;
#border: solid 1px;
margin: 2px;
}
</style>
</head>
<body>
<div id="horizonal">
</div>
<div id="vertical">
</div>
<script>
//http://planetozh.com/blog/2008/04/javascript-basename-and-dirname/
var dir = window.location.href.replace(/\\/g,'/').replace(/\/[^\/]*$/, '');
seajs.config({
alias: {
'DataV': dir + '/../../lib/datav.js',
'Axis': dir + '/../../lib/charts/axis.js',
'Bullet': dir + '/../../lib/charts/bullet.js'
}
});
seajs.use(["Bullet", "DataV"], function (Bullet, DataV) {
var data0 = {
title: "Revenue",
subtitle: "US.$(1,000s)",
ranges: [0, 150, 225, 300],
measures: [270],
markers: [249],
rangeTitles: ["bad", "satisfactory", "good"],
measureTitles: ["value: 270"],
markerTitles: ["mean : 249"]
};
var data1 = {
title: "Profit",
subtitle: "%",
ranges: [0, 20, 25, 30],
measures: [22.5],
markers: [27],
rangeTitles: ["bad", "satisfactory", "good"],
measureTitles: ["value: 0.225"],
markerTitles: ["mean : 0.27"]
};
var data2 = {
title: "Avg Order Size",
subtitle: "US.$",
ranges: [0, 350, 500, 600],
measures: [330],
markers: [550],
rangeTitles: ["bad", "satisfactory", "good"],
measureTitles: ["value: 330"],
markerTitles: ["mean : 550"]
};
var data3 = {
title: "New Customers",
subtitle: "Count",
ranges: [0, 1400, 2000, 2500],
measures: [1700],
markers: [2080],
rangeTitles: ["bad", "satisfactory", "good"],
measureTitles: ["value: 1700"],
markerTitles: ["mean : 2080"]
};
var data4 = {
title: "Cust Satisfaction",
subtitle: "Top Rating of 5",
ranges: [0, 3.5, 4.3, 5],
measures: [4.7],
markers: [4.5],
rangeTitles: ["bad", "satisfactory", "good"],
measureTitles: ["value: 4.7"],
markerTitles: ["mean : 4.5"]
};
var datas = [data0, data1, data2, data3, data4];
datas.forEach(function (d) {
var container = $("<div></div>").appendTo($("#horizonal")).addClass("horizonal");
var options = {width: 400, height: 60,
margin : [12, 20, 20, 130],
color: ["#000", "#ddd"]
};
var bullet = new Bullet(container[0],options);
bullet.setSource(d);
if (d.name === "Profit") {
bullet.axis.tickFormat(function (d) {return d + "%";})
}
bullet.render();
});
data2.title = "Avg Order";
data3.title = "New Cust";
data4.title = "Cust Sat";
datas.forEach(function (d) {
var container = $("<div></div>").appendTo($("#vertical")).addClass("vertical");
var options = {
width: 90, height: 300,
margin : [40, 25, 10, 35],
color: ["#000", "#ddd"],
orient: "vertical"
};
//if (d.
var bullet = new Bullet(container[0], options);
bullet.setSource(d);
if (d.name === "Profit") {
bullet.axis.tickFormat(function (d) {return d + "%";})
}
bullet.render();
});
});
</script>
</body>
</html>
| {
"pile_set_name": "Github"
} |
<?xml version="1.0"?>
<definitions>
<definition>
<key>heading_title</key>
<value><![CDATA[Your Order Has Been Processed!]]></value>
</definition>
<definition>
<key>text_message_account</key>
<value>
<![CDATA[<p>Your order #%s has been created!</p>
<p>You can view your order details by going to the <a href="%s">invoice page</a>.</p>
<p>Please direct any questions you have to the <a href="%s">store owner</a>.</p>
<p>Thank you for shopping with us!</p>]]></value>
</definition>
<definition>
<key>text_message_guest</key>
<value>
<![CDATA[<p>Your order has been successfully processed!</p>
<p>You can view your order details by going to the <a href="%s">invoice page</a>.</p>
<p>Please direct any questions you have to the <a href="%s">store owner</a>.</p>
<p>Thank you for shopping with us!</p>]]></value>
</definition>
<definition>
<key>text_basket</key>
<value><![CDATA[Basket]]></value>
</definition>
<definition>
<key>text_shipping</key>
<value><![CDATA[Shipping]]></value>
</definition>
<definition>
<key>text_payment</key>
<value><![CDATA[Payment]]></value>
</definition>
<definition>
<key>text_guest</key>
<value><![CDATA[Guest Checkout]]></value>
</definition>
<definition>
<key>text_confirm</key>
<value><![CDATA[Confirm]]></value>
</definition>
<definition>
<key>text_success</key>
<value><![CDATA[Success]]></value>
</definition>
<definition>
<key>text_message_failed_order</key>
<value><![CDATA[Oops! We have experienced some issue with your transaction. Your order will be verified manually by administrator.]]></value>
</definition>
<definition>
<key>text_title_failed_order_to_admin</key>
<value><![CDATA[Failed order #%s]]></value>
</definition>
<definition>
<key>text_message_failed_order_to_admin</key>
<value><![CDATA[Failed or missing response from payment verification. Manual verification is required!]]></value>
</definition>
</definitions>
| {
"pile_set_name": "Github"
} |
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
/**
* @author Mario Pastorelli ([email protected])
*/
package pureconfig.example
import pureconfig.ConfigConvert
import pureconfig.ConfigConvert.viaStringTry
package object conf {
// Email doesn't have a Convert instance, we are going to create it here
implicit val emailConvert: ConfigConvert[Email] = viaStringTry[Email](Email.fromString, _.toString)
}
| {
"pile_set_name": "Github"
} |
/*
################################################################################
# THIS FILE IS 100% GENERATED BY ZPROJECT; DO NOT EDIT EXCEPT EXPERIMENTALLY #
# Read the zproject/README.md for information about making permanent changes. #
################################################################################
*/
#include "qczmq.h"
///
// Copy-construct to return the proper wrapped c types
QZhttpServer::QZhttpServer (zhttp_server_t *self, QObject *qObjParent) : QObject (qObjParent)
{
this->self = self;
}
///
// Create a new http server
QZhttpServer::QZhttpServer (QZhttpServerOptions *options, QObject *qObjParent) : QObject (qObjParent)
{
this->self = zhttp_server_new (options->self);
}
///
// Destroy an http server
QZhttpServer::~QZhttpServer ()
{
zhttp_server_destroy (&self);
}
///
// Return the port the server is listening on.
int QZhttpServer::port ()
{
int rv = zhttp_server_port (self);
return rv;
}
///
// Self test of this class.
void QZhttpServer::test (bool verbose)
{
zhttp_server_test (verbose);
}
/*
################################################################################
# THIS FILE IS 100% GENERATED BY ZPROJECT; DO NOT EDIT EXCEPT EXPERIMENTALLY #
# Read the zproject/README.md for information about making permanent changes. #
################################################################################
*/
| {
"pile_set_name": "Github"
} |
package com.freetymekiyan.algorithms.other;
import org.testng.Assert;
import org.testng.annotations.DataProvider;
import org.testng.annotations.Test;
public class ValidParentheseStringTest {
@DataProvider(name = "examples")
public Object[][] getExamples() {
return new Object[][]{
new Object[]{"", ""},
new Object[]{"(()das10())", "(()das10())"},
new Object[]{"()()())", "()()()"},
new Object[]{")(", ""},
new Object[]{"((((((", ""},
new Object[]{"))))))", ""},
};
}
@Test(dataProvider = "examples")
public void testGetValidString(String s, String expected) {
ValidParentheseString v = new ValidParentheseString();
Assert.assertEquals(v.getValidString(s), expected);
}
} | {
"pile_set_name": "Github"
} |
FLEXSLIDER CHANGELOG
2013.02.15 - Version 2.0
* Added changelog.txt
2014.05.7 - Version 2.2.2
* Added flexslider.less
2015.02.11 - Version 2.3.0
* Fix for pauseInvisible attribute for Chrome and the Page Visibility API
2015.02.27 - Version 2.4.0
* Update for improved standards. Adds classes to li nav elements. Reset for li elements in stylesheet.
2015.05.19 - Version 2.5.0
* CSS fix for pausePlay play icon.
* Firefox touchstart event fix.
* Compatibility change for jQuery to 1.7.0+
* Adds customDirectionNav param for custom navigation controls
2015.11.16 - Version 2.6.0
* Adds composer json file keywords
* Scope fix for focused keyword
* Fixes bower demo folder exclusion
* z-index fix for disabled nav arrow
* play/pause accessibility fix
* itemMargin fix for slider items margins
* Fixes accessibility for in focus elements and pagination controls
* Firefox fix for text selection on slider carousel
* Adds data-thumb-alt image alt attribute
2016.05.12 - Version 2.6.1
* smoothHeight now uses innerHeight() instead of height() to account for padding in calculation.
* Defining var altText to prevent error.
* bower.json add fonts folder on main field.
* Changed `true` to `false` in order to make sure whether or not to allow a slider comprised of a single slide.
2016.08.18 - Version 2.6.2
* Fixes overflow issue with varying height images.
* Fixes the visibility of the pagination and the navigation in the "fade" mode.
2016.09.06 - Version 2.6.3
* Rollback fade fixes, due to harsh fade reports.
2018.02.01 - Version 2.7.0
* Fixes resize method call for orientationchange.
* Adds RTL feature - param "rtl" added.
* Adds RTL demo's.
2018.06.15 - Version 2.7.1
* Firefox RTL fixes.
* Adds "isFirefox" param.
2019.03.07 - Version 2.7.2
* Refactor jQuery HTML output for img attributes. | {
"pile_set_name": "Github"
} |
/*
* This file is part of wl1271
*
* Copyright (C) 1998-2009 Texas Instruments. All rights reserved.
* Copyright (C) 2009 Nokia Corporation
*
* Contact: Luciano Coelho <[email protected]>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA
*
*/
#ifndef __TX_H__
#define __TX_H__
#define TX_HW_BLOCK_SPARE_DEFAULT 1
#define TX_HW_BLOCK_SIZE 252
#define TX_HW_MGMT_PKT_LIFETIME_TU 2000
#define TX_HW_AP_MODE_PKT_LIFETIME_TU 8000
#define TX_HW_ATTR_SAVE_RETRIES BIT(0)
#define TX_HW_ATTR_HEADER_PAD BIT(1)
#define TX_HW_ATTR_SESSION_COUNTER (BIT(2) | BIT(3) | BIT(4))
#define TX_HW_ATTR_RATE_POLICY (BIT(5) | BIT(6) | BIT(7) | \
BIT(8) | BIT(9))
#define TX_HW_ATTR_LAST_WORD_PAD (BIT(10) | BIT(11))
#define TX_HW_ATTR_TX_CMPLT_REQ BIT(12)
#define TX_HW_ATTR_TX_DUMMY_REQ BIT(13)
#define TX_HW_ATTR_HOST_ENCRYPT BIT(14)
#define TX_HW_ATTR_OFST_SAVE_RETRIES 0
#define TX_HW_ATTR_OFST_HEADER_PAD 1
#define TX_HW_ATTR_OFST_SESSION_COUNTER 2
#define TX_HW_ATTR_OFST_RATE_POLICY 5
#define TX_HW_ATTR_OFST_LAST_WORD_PAD 10
#define TX_HW_ATTR_OFST_TX_CMPLT_REQ 12
#define TX_HW_RESULT_QUEUE_LEN 16
#define TX_HW_RESULT_QUEUE_LEN_MASK 0xf
#define WL1271_TX_ALIGN_TO 4
#define WL1271_EXTRA_SPACE_TKIP 4
#define WL1271_EXTRA_SPACE_AES 8
#define WL1271_EXTRA_SPACE_MAX 8
/* Used for management frames and dummy packets */
#define WL1271_TID_MGMT 7
struct wl127x_tx_mem {
/*
* Number of extra memory blocks to allocate for this packet
* in addition to the number of blocks derived from the packet
* length.
*/
u8 extra_blocks;
/*
* Total number of memory blocks allocated by the host for
* this packet. Must be equal or greater than the actual
* blocks number allocated by HW.
*/
u8 total_mem_blocks;
} __packed;
struct wl128x_tx_mem {
/*
* Total number of memory blocks allocated by the host for
* this packet.
*/
u8 total_mem_blocks;
/*
* Number of extra bytes, at the end of the frame. the host
* uses this padding to complete each frame to integer number
* of SDIO blocks.
*/
u8 extra_bytes;
} __packed;
/*
* On wl128x based devices, when TX packets are aggregated, each packet
* size must be aligned to the SDIO block size. The maximum block size
* is bounded by the type of the padded bytes field that is sent to the
* FW. Currently the type is u8, so the maximum block size is 256 bytes.
*/
#define WL12XX_BUS_BLOCK_SIZE min(512u, \
(1u << (8 * sizeof(((struct wl128x_tx_mem *) 0)->extra_bytes))))
struct wl1271_tx_hw_descr {
/* Length of packet in words, including descriptor+header+data */
__le16 length;
union {
struct wl127x_tx_mem wl127x_mem;
struct wl128x_tx_mem wl128x_mem;
} __packed;
/* Device time (in us) when the packet arrived to the driver */
__le32 start_time;
/*
* Max delay in TUs until transmission. The last device time the
* packet can be transmitted is: start_time + (1024 * life_time)
*/
__le16 life_time;
/* Bitwise fields - see TX_ATTR... definitions above. */
__le16 tx_attr;
/* Packet identifier used also in the Tx-Result. */
u8 id;
/* The packet TID value (as User-Priority) */
u8 tid;
/* host link ID (HLID) */
u8 hlid;
u8 reserved;
} __packed;
enum wl1271_tx_hw_res_status {
TX_SUCCESS = 0,
TX_HW_ERROR = 1,
TX_DISABLED = 2,
TX_RETRY_EXCEEDED = 3,
TX_TIMEOUT = 4,
TX_KEY_NOT_FOUND = 5,
TX_PEER_NOT_FOUND = 6,
TX_SESSION_MISMATCH = 7,
TX_LINK_NOT_VALID = 8,
};
struct wl1271_tx_hw_res_descr {
/* Packet Identifier - same value used in the Tx descriptor.*/
u8 id;
/* The status of the transmission, indicating success or one of
several possible reasons for failure. */
u8 status;
/* Total air access duration including all retrys and overheads.*/
__le16 medium_usage;
/* The time passed from host xfer to Tx-complete.*/
__le32 fw_handling_time;
/* Total media delay
(from 1st EDCA AIFS counter until TX Complete). */
__le32 medium_delay;
/* LS-byte of last TKIP seq-num (saved per AC for recovery). */
u8 tx_security_sequence_number_lsb;
/* Retry count - number of transmissions without successful ACK.*/
u8 ack_failures;
/* The rate that succeeded getting ACK
(Valid only if status=SUCCESS). */
u8 rate_class_index;
/* for 4-byte alignment. */
u8 spare;
} __packed;
struct wl1271_tx_hw_res_if {
__le32 tx_result_fw_counter;
__le32 tx_result_host_counter;
struct wl1271_tx_hw_res_descr tx_results_queue[TX_HW_RESULT_QUEUE_LEN];
} __packed;
static inline int wl1271_tx_get_queue(int queue)
{
switch (queue) {
case 0:
return CONF_TX_AC_VO;
case 1:
return CONF_TX_AC_VI;
case 2:
return CONF_TX_AC_BE;
case 3:
return CONF_TX_AC_BK;
default:
return CONF_TX_AC_BE;
}
}
static inline int wl1271_tx_get_mac80211_queue(int queue)
{
switch (queue) {
case CONF_TX_AC_VO:
return 0;
case CONF_TX_AC_VI:
return 1;
case CONF_TX_AC_BE:
return 2;
case CONF_TX_AC_BK:
return 3;
default:
return 2;
}
}
static inline int wl1271_tx_total_queue_count(struct wl1271 *wl)
{
int i, count = 0;
for (i = 0; i < NUM_TX_QUEUES; i++)
count += wl->tx_queue_count[i];
return count;
}
void wl1271_tx_work(struct work_struct *work);
void wl1271_tx_work_locked(struct wl1271 *wl);
void wl1271_tx_complete(struct wl1271 *wl);
void wl12xx_tx_reset_wlvif(struct wl1271 *wl, struct wl12xx_vif *wlvif);
void wl12xx_tx_reset(struct wl1271 *wl, bool reset_tx_queues);
void wl1271_tx_flush(struct wl1271 *wl);
u8 wl1271_rate_to_idx(int rate, enum ieee80211_band band);
u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set,
enum ieee80211_band rate_band);
u32 wl1271_tx_min_rate_get(struct wl1271 *wl, u32 rate_set);
u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif,
struct sk_buff *skb);
u8 wl12xx_tx_get_hlid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
struct sk_buff *skb);
void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid);
void wl1271_handle_tx_low_watermark(struct wl1271 *wl);
bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb);
void wl12xx_rearm_rx_streaming(struct wl1271 *wl, unsigned long *active_hlids);
/* from main.c */
void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid);
void wl12xx_rearm_tx_watchdog_locked(struct wl1271 *wl);
#endif
| {
"pile_set_name": "Github"
} |
/*
* Copyright (C) 2009-2010 Freescale Semiconductor, Inc. All Rights Reserved.
* Copyright (C) 2014 Oleksij Rempel <[email protected]>
* Add Alphascale ASM9260 support.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/irqchip.h>
#include <linux/irqdomain.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/stmp_device.h>
#include <asm/exception.h>
#include "alphascale_asm9260-icoll.h"
/*
* this device provide 4 offsets for each register:
* 0x0 - plain read write mode
* 0x4 - set mode, OR logic.
* 0x8 - clr mode, XOR logic.
* 0xc - togle mode.
*/
#define SET_REG 4
#define CLR_REG 8
#define HW_ICOLL_VECTOR 0x0000
#define HW_ICOLL_LEVELACK 0x0010
#define HW_ICOLL_CTRL 0x0020
#define HW_ICOLL_STAT_OFFSET 0x0070
#define HW_ICOLL_INTERRUPT0 0x0120
#define HW_ICOLL_INTERRUPTn(n) ((n) * 0x10)
#define BM_ICOLL_INTR_ENABLE BIT(2)
#define BV_ICOLL_LEVELACK_IRQLEVELACK__LEVEL0 0x1
#define ICOLL_NUM_IRQS 128
enum icoll_type {
ICOLL,
ASM9260_ICOLL,
};
struct icoll_priv {
void __iomem *vector;
void __iomem *levelack;
void __iomem *ctrl;
void __iomem *stat;
void __iomem *intr;
void __iomem *clear;
enum icoll_type type;
};
static struct icoll_priv icoll_priv;
static struct irq_domain *icoll_domain;
/* calculate bit offset depending on number of intterupt per register */
static u32 icoll_intr_bitshift(struct irq_data *d, u32 bit)
{
/*
* mask lower part of hwirq to convert it
* in 0, 1, 2 or 3 and then multiply it by 8 (or shift by 3)
*/
return bit << ((d->hwirq & 3) << 3);
}
/* calculate mem offset depending on number of intterupt per register */
static void __iomem *icoll_intr_reg(struct irq_data *d)
{
/* offset = hwirq / intr_per_reg * 0x10 */
return icoll_priv.intr + ((d->hwirq >> 2) * 0x10);
}
static void icoll_ack_irq(struct irq_data *d)
{
/*
* The Interrupt Collector is able to prioritize irqs.
* Currently only level 0 is used. So acking can use
* BV_ICOLL_LEVELACK_IRQLEVELACK__LEVEL0 unconditionally.
*/
__raw_writel(BV_ICOLL_LEVELACK_IRQLEVELACK__LEVEL0,
icoll_priv.levelack);
}
static void icoll_mask_irq(struct irq_data *d)
{
__raw_writel(BM_ICOLL_INTR_ENABLE,
icoll_priv.intr + CLR_REG + HW_ICOLL_INTERRUPTn(d->hwirq));
}
static void icoll_unmask_irq(struct irq_data *d)
{
__raw_writel(BM_ICOLL_INTR_ENABLE,
icoll_priv.intr + SET_REG + HW_ICOLL_INTERRUPTn(d->hwirq));
}
static void asm9260_mask_irq(struct irq_data *d)
{
__raw_writel(icoll_intr_bitshift(d, BM_ICOLL_INTR_ENABLE),
icoll_intr_reg(d) + CLR_REG);
}
static void asm9260_unmask_irq(struct irq_data *d)
{
__raw_writel(ASM9260_BM_CLEAR_BIT(d->hwirq),
icoll_priv.clear +
ASM9260_HW_ICOLL_CLEARn(d->hwirq));
__raw_writel(icoll_intr_bitshift(d, BM_ICOLL_INTR_ENABLE),
icoll_intr_reg(d) + SET_REG);
}
static struct irq_chip mxs_icoll_chip = {
.irq_ack = icoll_ack_irq,
.irq_mask = icoll_mask_irq,
.irq_unmask = icoll_unmask_irq,
.flags = IRQCHIP_MASK_ON_SUSPEND |
IRQCHIP_SKIP_SET_WAKE,
};
static struct irq_chip asm9260_icoll_chip = {
.irq_ack = icoll_ack_irq,
.irq_mask = asm9260_mask_irq,
.irq_unmask = asm9260_unmask_irq,
.flags = IRQCHIP_MASK_ON_SUSPEND |
IRQCHIP_SKIP_SET_WAKE,
};
asmlinkage void __exception_irq_entry icoll_handle_irq(struct pt_regs *regs)
{
u32 irqnr;
irqnr = __raw_readl(icoll_priv.stat);
__raw_writel(irqnr, icoll_priv.vector);
handle_domain_irq(icoll_domain, irqnr, regs);
}
static int icoll_irq_domain_map(struct irq_domain *d, unsigned int virq,
irq_hw_number_t hw)
{
struct irq_chip *chip;
if (icoll_priv.type == ICOLL)
chip = &mxs_icoll_chip;
else
chip = &asm9260_icoll_chip;
irq_set_chip_and_handler(virq, chip, handle_level_irq);
return 0;
}
static const struct irq_domain_ops icoll_irq_domain_ops = {
.map = icoll_irq_domain_map,
.xlate = irq_domain_xlate_onecell,
};
static void __init icoll_add_domain(struct device_node *np,
int num)
{
icoll_domain = irq_domain_add_linear(np, num,
&icoll_irq_domain_ops, NULL);
if (!icoll_domain)
panic("%s: unable to create irq domain", np->full_name);
}
static void __iomem * __init icoll_init_iobase(struct device_node *np)
{
void __iomem *icoll_base;
icoll_base = of_io_request_and_map(np, 0, np->name);
if (IS_ERR(icoll_base))
panic("%s: unable to map resource", np->full_name);
return icoll_base;
}
static int __init icoll_of_init(struct device_node *np,
struct device_node *interrupt_parent)
{
void __iomem *icoll_base;
icoll_priv.type = ICOLL;
icoll_base = icoll_init_iobase(np);
icoll_priv.vector = icoll_base + HW_ICOLL_VECTOR;
icoll_priv.levelack = icoll_base + HW_ICOLL_LEVELACK;
icoll_priv.ctrl = icoll_base + HW_ICOLL_CTRL;
icoll_priv.stat = icoll_base + HW_ICOLL_STAT_OFFSET;
icoll_priv.intr = icoll_base + HW_ICOLL_INTERRUPT0;
icoll_priv.clear = NULL;
/*
* Interrupt Collector reset, which initializes the priority
* for each irq to level 0.
*/
stmp_reset_block(icoll_priv.ctrl);
icoll_add_domain(np, ICOLL_NUM_IRQS);
return 0;
}
IRQCHIP_DECLARE(mxs, "fsl,icoll", icoll_of_init);
static int __init asm9260_of_init(struct device_node *np,
struct device_node *interrupt_parent)
{
void __iomem *icoll_base;
int i;
icoll_priv.type = ASM9260_ICOLL;
icoll_base = icoll_init_iobase(np);
icoll_priv.vector = icoll_base + ASM9260_HW_ICOLL_VECTOR;
icoll_priv.levelack = icoll_base + ASM9260_HW_ICOLL_LEVELACK;
icoll_priv.ctrl = icoll_base + ASM9260_HW_ICOLL_CTRL;
icoll_priv.stat = icoll_base + ASM9260_HW_ICOLL_STAT_OFFSET;
icoll_priv.intr = icoll_base + ASM9260_HW_ICOLL_INTERRUPT0;
icoll_priv.clear = icoll_base + ASM9260_HW_ICOLL_CLEAR0;
writel_relaxed(ASM9260_BM_CTRL_IRQ_ENABLE,
icoll_priv.ctrl);
/*
* ASM9260 don't provide reset bit. So, we need to set level 0
* manually.
*/
for (i = 0; i < 16 * 0x10; i += 0x10)
writel(0, icoll_priv.intr + i);
icoll_add_domain(np, ASM9260_NUM_IRQS);
set_handle_irq(icoll_handle_irq);
return 0;
}
IRQCHIP_DECLARE(asm9260, "alphascale,asm9260-icoll", asm9260_of_init);
| {
"pile_set_name": "Github"
} |
<html class="reftest-wait">
<head>
<title>mo@movablelimits</title>
<meta charset="utf-8"/>
<script type="text/javascript">
function doTest() {
document.getElementById('a').removeAttribute('movablelimits');
document.documentElement.removeAttribute("class");
}
window.addEventListener("MozReftestInvalidate",doTest, false);
</script>
</head>
<body>
<math>
<munder>
<mo id="a" movablelimits="false">∑</mo>
<mi>x</mi>
</munder>
</math>
</body>
</html> | {
"pile_set_name": "Github"
} |
/*
* Copyright 2018 pragmatic-scala.reactiveplatform.xyz
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package chapter5
object Generics extends App {
// #snip
import java._
var list1: util.List[Int] = new util.ArrayList[Int]
var list2 = new util.ArrayList[Int]
// #snip
}
| {
"pile_set_name": "Github"
} |
{
"images" : [
{
"idiom" : "universal",
"scale" : "1x",
"filename" : "notification.png"
},
{
"idiom" : "universal",
"scale" : "2x",
"filename" : "[email protected]"
},
{
"idiom" : "universal",
"scale" : "3x",
"filename" : "[email protected]"
}
],
"info" : {
"version" : 1,
"author" : "xcode"
}
} | {
"pile_set_name": "Github"
} |
// Copyright 2016 Documize Inc. <[email protected]>. All rights reserved.
//
// This software (Documize Community Edition) is licensed under
// GNU AGPL v3 http://www.gnu.org/licenses/agpl-3.0.en.html
//
// You can operate outside the AGPL restrictions by purchasing
// Documize Enterprise Edition and obtaining a commercial license
// by contacting <[email protected]>.
//
// https://documize.com
import { inject as service } from '@ember/service';
import Component from '@ember/component';
export default Component.extend({
appMeta: service(),
icon: null,
meta: null,
logo: false,
didReceiveAttrs() {
this._super(...arguments);
if (this.get('logo')) {
let cb = + new Date();
this.set('cacheBuster', cb);
}
}
});
| {
"pile_set_name": "Github"
} |
require 'rails/generators'
module Pageflow
module Generators
class ThemeGenerator < Rails::Generators::Base
desc 'Creates a configurable theme based on the default theme.'
argument :name,
required: false,
default: 'custom',
desc: 'The name of the new theme'
source_root File.expand_path('../templates', __FILE__)
def copy_template
directory('themes', File.join('app', 'assets', 'stylesheets', 'pageflow', 'themes'))
empty_directory(File.join('app', 'assets', 'images', 'pageflow', 'themes', name))
copy_file('preview.png', "app/assets/images/pageflow/themes/#{name}/preview.png")
copy_file('preview_thumbnail.png',
"app/assets/images/pageflow/themes/#{name}/preview_thumbnail.png")
end
end
end
end
| {
"pile_set_name": "Github"
} |
//
// Copyright (c) 2017-2020 PSPDFKit GmbH. All rights reserved.
//
// The PSPDFKit Sample applications are licensed with a modified BSD license.
// Please see License for details. This notice may not be removed from
// this file.
//
#import "PDFXDestination.h"
NS_ASSUME_NONNULL_BEGIN
@interface PDFXDestination ()
@end
NS_ASSUME_NONNULL_END
| {
"pile_set_name": "Github"
} |
#ifndef MAIN_H
#define MAIN_H
#include <ctype.h>
#include <errno.h>
#include <fcntl.h>
#include <getopt.h>
#include <math.h>
#include <netinet/in.h>
#include <netinet/tcp.h>
#include <stdarg.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <signal.h>
#include <time.h>
#include <unistd.h>
#include <sys/time.h>
#include <sys/uio.h>
#include "ssl.h"
#include "aprintf.h"
#include "stats.h"
#include "units.h"
#include "zmalloc.h"
struct config;
static void *thread_main(void *);
static int connect_socket(thread *, connection *);
static int reconnect_socket(thread *, connection *);
static int record_rate(aeEventLoop *, long long, void *);
static void socket_connected(aeEventLoop *, int, void *, int);
static void socket_writeable(aeEventLoop *, int, void *, int);
static void socket_readable(aeEventLoop *, int, void *, int);
static int response_complete(http_parser *);
static int header_field(http_parser *, const char *, size_t);
static int header_value(http_parser *, const char *, size_t);
static int response_body(http_parser *, const char *, size_t);
static uint64_t time_us();
static int parse_args(struct config *, char **, struct http_parser_url *, char **, int, char **);
static char *copy_url_part(char *, struct http_parser_url *, enum http_parser_url_fields);
static void print_stats_header();
static void print_stats(char *, stats *, char *(*)(long double));
static void print_stats_latency(stats *);
#endif /* MAIN_H */
| {
"pile_set_name": "Github"
} |
const { assert, skip, test, module: describe, only } = require('qunit');
const { GPU } = require('../../../../../../src');
describe('feature: to-string single precision constants 2d Array');
function testConstant(mode, context, canvas) {
const gpu = new GPU({ mode });
const a = [
[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16],
];
const originalKernel = gpu.createKernel(function() {
let sum = 0;
for (let y = 0; y < 4; y++) {
sum += this.constants.a[y][this.thread.x];
}
return sum;
}, {
canvas,
context,
output: [4],
precision: 'single',
constants: {
a
}
});
const expected = new Float32Array([28,32,36,40]);
const originalResult = originalKernel();
assert.deepEqual(originalResult, expected);
const kernelString = originalKernel.toString();
const Kernel = new Function('return ' + kernelString)();
const newResult = Kernel({ context, constants: { a } })();
assert.deepEqual(newResult, expected);
const b = [
[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1],
];
const expected2 = new Float32Array([4,4,4,4]);
const newResult2 = Kernel({ context, constants: { a: b } })();
assert.deepEqual(newResult2, expected2);
gpu.destroy();
}
(GPU.isSinglePrecisionSupported && GPU.isWebGLSupported ? test : skip)('webgl', () => {
const canvas = document.createElement('canvas');
const context = canvas.getContext('webgl');
testConstant('webgl', context, canvas);
});
(GPU.isSinglePrecisionSupported && GPU.isWebGL2Supported ? test : skip)('webgl2', () => {
const canvas = document.createElement('canvas');
const context = canvas.getContext('webgl2');
testConstant('webgl2', context, canvas);
});
(GPU.isSinglePrecisionSupported && GPU.isHeadlessGLSupported ? test : skip)('headlessgl', () => {
testConstant('headlessgl', require('gl')(1, 1), null);
});
test('cpu', () => {
testConstant('cpu');
});
| {
"pile_set_name": "Github"
} |
LoadWildData::
ld hl, WildDataPointers
ld a, [wCurMap]
; get wild data for current map
ld c, a
ld b, 0
add hl, bc
add hl, bc
ld a, [hli]
ld h, [hl]
ld l, a ; hl now points to wild data for current map
ld a, [hli]
ld [wGrassRate], a
and a
jr z, .NoGrassData ; if no grass data, skip to surfing data
push hl
ld de, wGrassMons ; otherwise, load grass data
ld bc, $14
call CopyData
pop hl
ld bc, $14
add hl, bc
.NoGrassData
ld a, [hli]
ld [wWaterRate], a
and a
ret z ; if no water data, we're done
ld de, wWaterMons ; otherwise, load surfing data
ld bc, $14
jp CopyData
INCLUDE "data/wild/grass_water.asm"
| {
"pile_set_name": "Github"
} |
#include "abr_algo.hh"
#include <cmath>
using namespace std;
double ssim_db(const double ssim)
{
if (ssim != 1) {
return max(MIN_SSIM, min(MAX_SSIM, -10 * log10(1 - ssim)));
} else {
return MAX_SSIM;
}
}
| {
"pile_set_name": "Github"
} |
<?php
namespace FedEx\PickupService\SimpleType;
use FedEx\AbstractSimpleType;
/**
* CodAddTransportationChargeBasisType
*
* @author Jeremy Dunn <[email protected]>
* @package PHP FedEx API wrapper
* @subpackage Pickup Service
*/
class CodAddTransportationChargeBasisType extends AbstractSimpleType
{
const _COD_SURCHARGE = 'COD_SURCHARGE';
const _NET_CHARGE = 'NET_CHARGE';
const _NET_FREIGHT = 'NET_FREIGHT';
const _TOTAL_CUSTOMER_CHARGE = 'TOTAL_CUSTOMER_CHARGE';
}
| {
"pile_set_name": "Github"
} |
/*******************************************************************************
* Copyright (c) 2009, 2017 IBM Corp.
*
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* and Eclipse Distribution License v1.0 which accompany this distribution.
*
* The Eclipse Public License is available at
* http://www.eclipse.org/legal/epl-v10.html
* and the Eclipse Distribution License is available at
* http://www.eclipse.org/org/documents/edl-v10.php.
*
* Contributors:
* Ian Craggs - initial implementation and documentation
* Ian Craggs, Allan Stockdill-Mander - SSL support
* Ian Craggs - multiple server connection support
* Ian Craggs - fix for bug 413429 - connectionLost not called
* Ian Craggs - fix for bug 415042 - using already freed structure
* Ian Craggs - fix for bug 419233 - mutexes not reporting errors
* Ian Craggs - fix for bug 420851
* Ian Craggs - fix for bug 432903 - queue persistence
* Ian Craggs - MQTT 3.1.1 support
* Rong Xiang, Ian Craggs - C++ compatibility
* Ian Craggs - fix for bug 442400: reconnecting after network cable unplugged
* Ian Craggs - fix for bug 444934 - incorrect free in freeCommand1
* Ian Craggs - fix for bug 445891 - assigning msgid is not thread safe
* Ian Craggs - fix for bug 465369 - longer latency than expected
* Ian Craggs - fix for bug 444103 - success/failure callbacks not invoked
* Ian Craggs - fix for bug 484363 - segfault in getReadySocket
* Ian Craggs - automatic reconnect and offline buffering (send while disconnected)
* Ian Craggs - fix for bug 472250
* Ian Craggs - fix for bug 486548
* Ian Craggs - SNI support
* Ian Craggs - auto reconnect timing fix #218
* Ian Craggs - fix for issue #190
*******************************************************************************/
/**
* @file
* \brief Asynchronous API implementation
*
*/
#define _GNU_SOURCE /* for pthread_mutexattr_settype */
#include <stdlib.h>
#if !defined(WIN32) && !defined(WIN64)
#include <sys/time.h>
#endif
#if !defined(NO_PERSISTENCE)
#include "MQTTPersistence.h"
#endif
#include "MQTTAsync.h"
#include "utf-8.h"
#include "MQTTProtocol.h"
#include "MQTTProtocolOut.h"
#include "Thread.h"
#include "SocketBuffer.h"
#include "StackTrace.h"
#include "Heap.h"
#define URI_TCP "tcp://"
#include "VersionInfo.h"
const char *client_timestamp_eye = "MQTTAsyncV3_Timestamp " BUILD_TIMESTAMP;
const char *client_version_eye = "MQTTAsyncV3_Version " CLIENT_VERSION;
void MQTTAsync_global_init(MQTTAsync_init_options* inits)
{
#if defined(OPENSSL)
SSLSocket_handleOpensslInit(inits->do_openssl_init);
#endif
}
#if !defined(min)
#define min(a, b) (((a) < (b)) ? (a) : (b))
#endif
static ClientStates ClientState =
{
CLIENT_VERSION, /* version */
NULL /* client list */
};
ClientStates* bstate = &ClientState;
MQTTProtocol state;
enum MQTTAsync_threadStates
{
STOPPED, STARTING, RUNNING, STOPPING
};
enum MQTTAsync_threadStates sendThread_state = STOPPED;
enum MQTTAsync_threadStates receiveThread_state = STOPPED;
static thread_id_type sendThread_id = 0,
receiveThread_id = 0;
#if defined(WIN32) || defined(WIN64)
static mutex_type mqttasync_mutex = NULL;
static mutex_type socket_mutex = NULL;
static mutex_type mqttcommand_mutex = NULL;
static sem_type send_sem = NULL;
extern mutex_type stack_mutex;
extern mutex_type heap_mutex;
extern mutex_type log_mutex;
BOOL APIENTRY DllMain(HANDLE hModule,
DWORD ul_reason_for_call,
LPVOID lpReserved)
{
switch (ul_reason_for_call)
{
case DLL_PROCESS_ATTACH:
Log(TRACE_MAX, -1, "DLL process attach");
if (mqttasync_mutex == NULL)
{
mqttasync_mutex = CreateMutex(NULL, 0, NULL);
mqttcommand_mutex = CreateMutex(NULL, 0, NULL);
send_sem = CreateEvent(
NULL, /* default security attributes */
FALSE, /* manual-reset event? */
FALSE, /* initial state is nonsignaled */
NULL /* object name */
);
stack_mutex = CreateMutex(NULL, 0, NULL);
heap_mutex = CreateMutex(NULL, 0, NULL);
log_mutex = CreateMutex(NULL, 0, NULL);
socket_mutex = CreateMutex(NULL, 0, NULL);
}
case DLL_THREAD_ATTACH:
Log(TRACE_MAX, -1, "DLL thread attach");
case DLL_THREAD_DETACH:
Log(TRACE_MAX, -1, "DLL thread detach");
case DLL_PROCESS_DETACH:
Log(TRACE_MAX, -1, "DLL process detach");
}
return TRUE;
}
#else
static pthread_mutex_t mqttasync_mutex_store = PTHREAD_MUTEX_INITIALIZER;
static mutex_type mqttasync_mutex = &mqttasync_mutex_store;
static pthread_mutex_t socket_mutex_store = PTHREAD_MUTEX_INITIALIZER;
static mutex_type socket_mutex = &socket_mutex_store;
static pthread_mutex_t mqttcommand_mutex_store = PTHREAD_MUTEX_INITIALIZER;
static mutex_type mqttcommand_mutex = &mqttcommand_mutex_store;
static cond_type_struct send_cond_store = { PTHREAD_COND_INITIALIZER, PTHREAD_MUTEX_INITIALIZER };
static cond_type send_cond = &send_cond_store;
void MQTTAsync_init(void)
{
pthread_mutexattr_t attr;
int rc;
pthread_mutexattr_init(&attr);
pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK);
if ((rc = pthread_mutex_init(mqttasync_mutex, &attr)) != 0)
printf("MQTTAsync: error %d initializing async_mutex\n", rc);
if ((rc = pthread_mutex_init(mqttcommand_mutex, &attr)) != 0)
printf("MQTTAsync: error %d initializing command_mutex\n", rc);
if ((rc = pthread_mutex_init(socket_mutex, &attr)) != 0)
printf("MQTTClient: error %d initializing socket_mutex\n", rc);
if ((rc = pthread_cond_init(&send_cond->cond, NULL)) != 0)
printf("MQTTAsync: error %d initializing send_cond cond\n", rc);
if ((rc = pthread_mutex_init(&send_cond->mutex, &attr)) != 0)
printf("MQTTAsync: error %d initializing send_cond mutex\n", rc);
}
#define WINAPI
#endif
static volatile int initialized = 0;
static List* handles = NULL;
static int tostop = 0;
static List* commands = NULL;
#if defined(WIN32) || defined(WIN64)
#define START_TIME_TYPE DWORD
START_TIME_TYPE MQTTAsync_start_clock(void)
{
return GetTickCount();
}
#elif defined(AIX)
#define START_TIME_TYPE struct timespec
START_TIME_TYPE MQTTAsync_start_clock(void)
{
static struct timespec start;
clock_gettime(CLOCK_REALTIME, &start);
return start;
}
#else
#define START_TIME_TYPE struct timeval
START_TIME_TYPE MQTTAsync_start_clock(void)
{
static struct timeval start;
gettimeofday(&start, NULL);
return start;
}
#endif
#if defined(WIN32) || defined(WIN64)
long MQTTAsync_elapsed(DWORD milliseconds)
{
return GetTickCount() - milliseconds;
}
#elif defined(AIX)
#define assert(a)
long MQTTAsync_elapsed(struct timespec start)
{
struct timespec now, res;
clock_gettime(CLOCK_REALTIME, &now);
ntimersub(now, start, res);
return (res.tv_sec)*1000L + (res.tv_nsec)/1000000L;
}
#else
long MQTTAsync_elapsed(struct timeval start)
{
struct timeval now, res;
gettimeofday(&now, NULL);
timersub(&now, &start, &res);
return (res.tv_sec)*1000 + (res.tv_usec)/1000;
}
#endif
typedef struct
{
MQTTAsync_message* msg;
char* topicName;
int topicLen;
unsigned int seqno; /* only used on restore */
} qEntry;
typedef struct
{
int type;
MQTTAsync_onSuccess* onSuccess;
MQTTAsync_onFailure* onFailure;
MQTTAsync_token token;
void* context;
START_TIME_TYPE start_time;
union
{
struct
{
int count;
char** topics;
int* qoss;
} sub;
struct
{
int count;
char** topics;
} unsub;
struct
{
char* destinationName;
int payloadlen;
void* payload;
int qos;
int retained;
} pub;
struct
{
int internal;
int timeout;
} dis;
struct
{
int currentURI;
int MQTTVersion; /**< current MQTT version being used to connect */
} conn;
} details;
} MQTTAsync_command;
typedef struct MQTTAsync_struct
{
char* serverURI;
int ssl;
Clients* c;
/* "Global", to the client, callback definitions */
MQTTAsync_connectionLost* cl;
MQTTAsync_messageArrived* ma;
MQTTAsync_deliveryComplete* dc;
void* context; /* the context to be associated with the main callbacks*/
MQTTAsync_connected* connected;
void* connected_context; /* the context to be associated with the connected callback*/
/* Each time connect is called, we store the options that were used. These are reused in
any call to reconnect, or an automatic reconnect attempt */
MQTTAsync_command connect; /* Connect operation properties */
MQTTAsync_command disconnect; /* Disconnect operation properties */
MQTTAsync_command* pending_write; /* Is there a socket write pending? */
List* responses;
unsigned int command_seqno;
MQTTPacket* pack;
/* added for offline buffering */
MQTTAsync_createOptions* createOptions;
int shouldBeConnected;
/* added for automatic reconnect */
int automaticReconnect;
int minRetryInterval;
int maxRetryInterval;
int serverURIcount;
char** serverURIs;
int connectTimeout;
int currentInterval;
START_TIME_TYPE lastConnectionFailedTime;
int retrying;
int reconnectNow;
} MQTTAsyncs;
typedef struct
{
MQTTAsync_command command;
MQTTAsyncs* client;
unsigned int seqno; /* only used on restore */
} MQTTAsync_queuedCommand;
static int clientSockCompare(void* a, void* b);
static void MQTTAsync_lock_mutex(mutex_type amutex);
static void MQTTAsync_unlock_mutex(mutex_type amutex);
static int MQTTAsync_checkConn(MQTTAsync_command* command, MQTTAsyncs* client);
static void MQTTAsync_terminate(void);
#if !defined(NO_PERSISTENCE)
static int MQTTAsync_unpersistCommand(MQTTAsync_queuedCommand* qcmd);
static int MQTTAsync_persistCommand(MQTTAsync_queuedCommand* qcmd);
static MQTTAsync_queuedCommand* MQTTAsync_restoreCommand(char* buffer, int buflen);
/*static void MQTTAsync_insertInOrder(List* list, void* content, int size);*/
static int MQTTAsync_restoreCommands(MQTTAsyncs* client);
#endif
static int MQTTAsync_addCommand(MQTTAsync_queuedCommand* command, int command_size);
static void MQTTAsync_startConnectRetry(MQTTAsyncs* m);
static void MQTTAsync_checkDisconnect(MQTTAsync handle, MQTTAsync_command* command);
static void MQTTProtocol_checkPendingWrites(void);
static void MQTTAsync_freeServerURIs(MQTTAsyncs* m);
static void MQTTAsync_freeCommand1(MQTTAsync_queuedCommand *command);
static void MQTTAsync_freeCommand(MQTTAsync_queuedCommand *command);
static void MQTTAsync_writeComplete(int socket);
static int MQTTAsync_processCommand(void);
static void MQTTAsync_checkTimeouts(void);
static thread_return_type WINAPI MQTTAsync_sendThread(void* n);
static void MQTTAsync_emptyMessageQueue(Clients* client);
static void MQTTAsync_removeResponsesAndCommands(MQTTAsyncs* m);
static int MQTTAsync_completeConnection(MQTTAsyncs* m, MQTTPacket* pack);
static thread_return_type WINAPI MQTTAsync_receiveThread(void* n);
static void MQTTAsync_stop(void);
static void MQTTAsync_closeOnly(Clients* client);
static void MQTTAsync_closeSession(Clients* client);
static int clientStructCompare(void* a, void* b);
static int MQTTAsync_cleanSession(Clients* client);
static int MQTTAsync_deliverMessage(MQTTAsyncs* m, char* topicName, size_t topicLen, MQTTAsync_message* mm);
static int MQTTAsync_disconnect1(MQTTAsync handle, const MQTTAsync_disconnectOptions* options, int internal);
static int MQTTAsync_disconnect_internal(MQTTAsync handle, int timeout);
static int cmdMessageIDCompare(void* a, void* b);
static int MQTTAsync_assignMsgId(MQTTAsyncs* m);
static int MQTTAsync_countBufferedMessages(MQTTAsyncs* m);
static void MQTTAsync_retry(void);
static int MQTTAsync_connecting(MQTTAsyncs* m);
static MQTTPacket* MQTTAsync_cycle(int* sock, unsigned long timeout, int* rc);
/*static int pubCompare(void* a, void* b);*/
void MQTTAsync_sleep(long milliseconds)
{
FUNC_ENTRY;
#if defined(WIN32) || defined(WIN64)
Sleep(milliseconds);
#else
usleep(milliseconds*1000);
#endif
FUNC_EXIT;
}
/**
* List callback function for comparing clients by socket
* @param a first integer value
* @param b second integer value
* @return boolean indicating whether a and b are equal
*/
static int clientSockCompare(void* a, void* b)
{
MQTTAsyncs* m = (MQTTAsyncs*)a;
return m->c->net.socket == *(int*)b;
}
static void MQTTAsync_lock_mutex(mutex_type amutex)
{
int rc = Thread_lock_mutex(amutex);
if (rc != 0)
Log(LOG_ERROR, 0, "Error %s locking mutex", strerror(rc));
}
static void MQTTAsync_unlock_mutex(mutex_type amutex)
{
int rc = Thread_unlock_mutex(amutex);
if (rc != 0)
Log(LOG_ERROR, 0, "Error %s unlocking mutex", strerror(rc));
}
/*
Check whether there are any more connect options. If not then we are finished
with connect attempts.
*/
static int MQTTAsync_checkConn(MQTTAsync_command* command, MQTTAsyncs* client)
{
int rc;
FUNC_ENTRY;
rc = command->details.conn.currentURI + 1 < client->serverURIcount ||
(command->details.conn.MQTTVersion == 4 && client->c->MQTTVersion == MQTTVERSION_DEFAULT);
FUNC_EXIT_RC(rc);
return rc;
}
int MQTTAsync_createWithOptions(MQTTAsync* handle, const char* serverURI, const char* clientId,
int persistence_type, void* persistence_context, MQTTAsync_createOptions* options)
{
int rc = 0;
MQTTAsyncs *m = NULL;
FUNC_ENTRY;
MQTTAsync_lock_mutex(mqttasync_mutex);
if (serverURI == NULL || clientId == NULL)
{
rc = MQTTASYNC_NULL_PARAMETER;
goto exit;
}
if (!UTF8_validateString(clientId))
{
rc = MQTTASYNC_BAD_UTF8_STRING;
goto exit;
}
if (options && (strncmp(options->struct_id, "MQCO", 4) != 0 || options->struct_version != 0))
{
rc = MQTTASYNC_BAD_STRUCTURE;
goto exit;
}
if (!initialized)
{
#if defined(HEAP_H)
Heap_initialize();
#endif
Log_initialize((Log_nameValue*)MQTTAsync_getVersionInfo());
bstate->clients = ListInitialize();
Socket_outInitialize();
Socket_setWriteCompleteCallback(MQTTAsync_writeComplete);
handles = ListInitialize();
commands = ListInitialize();
#if defined(OPENSSL)
SSLSocket_initialize();
#endif
initialized = 1;
}
m = malloc(sizeof(MQTTAsyncs));
*handle = m;
memset(m, '\0', sizeof(MQTTAsyncs));
if (strncmp(URI_TCP, serverURI, strlen(URI_TCP)) == 0)
serverURI += strlen(URI_TCP);
#if defined(OPENSSL)
else if (strncmp(URI_SSL, serverURI, strlen(URI_SSL)) == 0)
{
serverURI += strlen(URI_SSL);
m->ssl = 1;
}
#endif
m->serverURI = MQTTStrdup(serverURI);
m->responses = ListInitialize();
ListAppend(handles, m, sizeof(MQTTAsyncs));
m->c = malloc(sizeof(Clients));
memset(m->c, '\0', sizeof(Clients));
m->c->context = m;
m->c->outboundMsgs = ListInitialize();
m->c->inboundMsgs = ListInitialize();
m->c->messageQueue = ListInitialize();
m->c->clientID = MQTTStrdup(clientId);
m->shouldBeConnected = 0;
if (options)
{
m->createOptions = malloc(sizeof(MQTTAsync_createOptions));
memcpy(m->createOptions, options, sizeof(MQTTAsync_createOptions));
}
#if !defined(NO_PERSISTENCE)
rc = MQTTPersistence_create(&(m->c->persistence), persistence_type, persistence_context);
if (rc == 0)
{
rc = MQTTPersistence_initialize(m->c, m->serverURI);
if (rc == 0)
{
MQTTAsync_restoreCommands(m);
MQTTPersistence_restoreMessageQueue(m->c);
}
}
#endif
ListAppend(bstate->clients, m->c, sizeof(Clients) + 3*sizeof(List));
exit:
MQTTAsync_unlock_mutex(mqttasync_mutex);
FUNC_EXIT_RC(rc);
return rc;
}
int MQTTAsync_create(MQTTAsync* handle, const char* serverURI, const char* clientId,
int persistence_type, void* persistence_context)
{
return MQTTAsync_createWithOptions(handle, serverURI, clientId, persistence_type,
persistence_context, NULL);
}
static void MQTTAsync_terminate(void)
{
FUNC_ENTRY;
MQTTAsync_stop();
if (initialized)
{
ListElement* elem = NULL;
ListFree(bstate->clients);
ListFree(handles);
while (ListNextElement(commands, &elem))
MQTTAsync_freeCommand1((MQTTAsync_queuedCommand*)(elem->content));
ListFree(commands);
handles = NULL;
Socket_outTerminate();
#if defined(OPENSSL)
SSLSocket_terminate();
#endif
#if defined(HEAP_H)
Heap_terminate();
#endif
Log_terminate();
initialized = 0;
}
FUNC_EXIT;
}
#if !defined(NO_PERSISTENCE)
static int MQTTAsync_unpersistCommand(MQTTAsync_queuedCommand* qcmd)
{
int rc = 0;
char key[PERSISTENCE_MAX_KEY_LENGTH + 1];
FUNC_ENTRY;
sprintf(key, "%s%u", PERSISTENCE_COMMAND_KEY, qcmd->seqno);
if ((rc = qcmd->client->c->persistence->premove(qcmd->client->c->phandle, key)) != 0)
Log(LOG_ERROR, 0, "Error %d removing command from persistence", rc);
FUNC_EXIT_RC(rc);
return rc;
}
static int MQTTAsync_persistCommand(MQTTAsync_queuedCommand* qcmd)
{
int rc = 0;
MQTTAsyncs* aclient = qcmd->client;
MQTTAsync_command* command = &qcmd->command;
int* lens = NULL;
void** bufs = NULL;
int bufindex = 0, i, nbufs = 0;
char key[PERSISTENCE_MAX_KEY_LENGTH + 1];
FUNC_ENTRY;
switch (command->type)
{
case SUBSCRIBE:
nbufs = 3 + (command->details.sub.count * 2);
lens = (int*)malloc(nbufs * sizeof(int));
bufs = malloc(nbufs * sizeof(char *));
bufs[bufindex] = &command->type;
lens[bufindex++] = sizeof(command->type);
bufs[bufindex] = &command->token;
lens[bufindex++] = sizeof(command->token);
bufs[bufindex] = &command->details.sub.count;
lens[bufindex++] = sizeof(command->details.sub.count);
for (i = 0; i < command->details.sub.count; ++i)
{
bufs[bufindex] = command->details.sub.topics[i];
lens[bufindex++] = (int)strlen(command->details.sub.topics[i]) + 1;
bufs[bufindex] = &command->details.sub.qoss[i];
lens[bufindex++] = sizeof(command->details.sub.qoss[i]);
}
sprintf(key, "%s%d", PERSISTENCE_COMMAND_KEY, ++aclient->command_seqno);
break;
case UNSUBSCRIBE:
nbufs = 3 + command->details.unsub.count;
lens = (int*)malloc(nbufs * sizeof(int));
bufs = malloc(nbufs * sizeof(char *));
bufs[bufindex] = &command->type;
lens[bufindex++] = sizeof(command->type);
bufs[bufindex] = &command->token;
lens[bufindex++] = sizeof(command->token);
bufs[bufindex] = &command->details.unsub.count;
lens[bufindex++] = sizeof(command->details.unsub.count);
for (i = 0; i < command->details.unsub.count; ++i)
{
bufs[bufindex] = command->details.unsub.topics[i];
lens[bufindex++] = (int)strlen(command->details.unsub.topics[i]) + 1;
}
sprintf(key, "%s%d", PERSISTENCE_COMMAND_KEY, ++aclient->command_seqno);
break;
case PUBLISH:
nbufs = 7;
lens = (int*)malloc(nbufs * sizeof(int));
bufs = malloc(nbufs * sizeof(char *));
bufs[bufindex] = &command->type;
lens[bufindex++] = sizeof(command->type);
bufs[bufindex] = &command->token;
lens[bufindex++] = sizeof(command->token);
bufs[bufindex] = command->details.pub.destinationName;
lens[bufindex++] = (int)strlen(command->details.pub.destinationName) + 1;
bufs[bufindex] = &command->details.pub.payloadlen;
lens[bufindex++] = sizeof(command->details.pub.payloadlen);
bufs[bufindex] = command->details.pub.payload;
lens[bufindex++] = command->details.pub.payloadlen;
bufs[bufindex] = &command->details.pub.qos;
lens[bufindex++] = sizeof(command->details.pub.qos);
bufs[bufindex] = &command->details.pub.retained;
lens[bufindex++] = sizeof(command->details.pub.retained);
sprintf(key, "%s%d", PERSISTENCE_COMMAND_KEY, ++aclient->command_seqno);
break;
}
if (nbufs > 0)
{
if ((rc = aclient->c->persistence->pput(aclient->c->phandle, key, nbufs, (char**)bufs, lens)) != 0)
Log(LOG_ERROR, 0, "Error persisting command, rc %d", rc);
qcmd->seqno = aclient->command_seqno;
}
if (lens)
free(lens);
if (bufs)
free(bufs);
FUNC_EXIT_RC(rc);
return rc;
}
static MQTTAsync_queuedCommand* MQTTAsync_restoreCommand(char* buffer, int buflen)
{
MQTTAsync_command* command = NULL;
MQTTAsync_queuedCommand* qcommand = NULL;
char* ptr = buffer;
int i;
size_t data_size;
FUNC_ENTRY;
qcommand = malloc(sizeof(MQTTAsync_queuedCommand));
memset(qcommand, '\0', sizeof(MQTTAsync_queuedCommand));
command = &qcommand->command;
command->type = *(int*)ptr;
ptr += sizeof(int);
command->token = *(MQTTAsync_token*)ptr;
ptr += sizeof(MQTTAsync_token);
switch (command->type)
{
case SUBSCRIBE:
command->details.sub.count = *(int*)ptr;
ptr += sizeof(int);
for (i = 0; i < command->details.sub.count; ++i)
{
data_size = strlen(ptr) + 1;
command->details.sub.topics[i] = malloc(data_size);
strcpy(command->details.sub.topics[i], ptr);
ptr += data_size;
command->details.sub.qoss[i] = *(int*)ptr;
ptr += sizeof(int);
}
break;
case UNSUBSCRIBE:
command->details.sub.count = *(int*)ptr;
ptr += sizeof(int);
for (i = 0; i < command->details.unsub.count; ++i)
{
size_t data_size = strlen(ptr) + 1;
command->details.unsub.topics[i] = malloc(data_size);
strcpy(command->details.unsub.topics[i], ptr);
ptr += data_size;
}
break;
case PUBLISH:
data_size = strlen(ptr) + 1;
command->details.pub.destinationName = malloc(data_size);
strcpy(command->details.pub.destinationName, ptr);
ptr += data_size;
command->details.pub.payloadlen = *(int*)ptr;
ptr += sizeof(int);
data_size = command->details.pub.payloadlen;
command->details.pub.payload = malloc(data_size);
memcpy(command->details.pub.payload, ptr, data_size);
ptr += data_size;
command->details.pub.qos = *(int*)ptr;
ptr += sizeof(int);
command->details.pub.retained = *(int*)ptr;
ptr += sizeof(int);
break;
default:
free(qcommand);
qcommand = NULL;
}
FUNC_EXIT;
return qcommand;
}
/*
static void MQTTAsync_insertInOrder(List* list, void* content, int size)
{
ListElement* index = NULL;
ListElement* current = NULL;
FUNC_ENTRY;
while (ListNextElement(list, ¤t) != NULL && index == NULL)
{
if (((MQTTAsync_queuedCommand*)content)->seqno < ((MQTTAsync_queuedCommand*)current->content)->seqno)
index = current;
}
ListInsert(list, content, size, index);
FUNC_EXIT;
}*/
static int MQTTAsync_restoreCommands(MQTTAsyncs* client)
{
int rc = 0;
char **msgkeys;
int nkeys;
int i = 0;
Clients* c = client->c;
int commands_restored = 0;
FUNC_ENTRY;
if (c->persistence && (rc = c->persistence->pkeys(c->phandle, &msgkeys, &nkeys)) == 0)
{
while (rc == 0 && i < nkeys)
{
char *buffer = NULL;
int buflen;
if (strncmp(msgkeys[i], PERSISTENCE_COMMAND_KEY, strlen(PERSISTENCE_COMMAND_KEY)) != 0)
{
;
}
else if ((rc = c->persistence->pget(c->phandle, msgkeys[i], &buffer, &buflen)) == 0)
{
MQTTAsync_queuedCommand* cmd = MQTTAsync_restoreCommand(buffer, buflen);
if (cmd)
{
cmd->client = client;
cmd->seqno = atoi(msgkeys[i]+2);
MQTTPersistence_insertInOrder(commands, cmd, sizeof(MQTTAsync_queuedCommand));
free(buffer);
client->command_seqno = max(client->command_seqno, cmd->seqno);
commands_restored++;
}
}
if (msgkeys[i])
free(msgkeys[i]);
i++;
}
if (msgkeys != NULL)
free(msgkeys);
}
Log(TRACE_MINIMUM, -1, "%d commands restored for client %s", commands_restored, c->clientID);
FUNC_EXIT_RC(rc);
return rc;
}
#endif
static int MQTTAsync_addCommand(MQTTAsync_queuedCommand* command, int command_size)
{
int rc = 0;
FUNC_ENTRY;
MQTTAsync_lock_mutex(mqttcommand_mutex);
/* Don't set start time if the connect command is already in process #218 */
if ((command->command.type != CONNECT) || (command->client->c->connect_state == 0))
command->command.start_time = MQTTAsync_start_clock();
if (command->command.type == CONNECT ||
(command->command.type == DISCONNECT && command->command.details.dis.internal))
{
MQTTAsync_queuedCommand* head = NULL;
if (commands->first)
head = (MQTTAsync_queuedCommand*)(commands->first->content);
if (head != NULL && head->client == command->client && head->command.type == command->command.type)
MQTTAsync_freeCommand(command); /* ignore duplicate connect or disconnect command */
else
ListInsert(commands, command, command_size, commands->first); /* add to the head of the list */
}
else
{
ListAppend(commands, command, command_size);
#if !defined(NO_PERSISTENCE)
if (command->client->c->persistence)
MQTTAsync_persistCommand(command);
#endif
}
MQTTAsync_unlock_mutex(mqttcommand_mutex);
#if !defined(WIN32) && !defined(WIN64)
rc = Thread_signal_cond(send_cond);
if (rc != 0)
Log(LOG_ERROR, 0, "Error %d from signal cond", rc);
#else
if (!Thread_check_sem(send_sem))
Thread_post_sem(send_sem);
#endif
FUNC_EXIT_RC(rc);
return rc;
}
static void MQTTAsync_startConnectRetry(MQTTAsyncs* m)
{
if (m->automaticReconnect && m->shouldBeConnected)
{
m->lastConnectionFailedTime = MQTTAsync_start_clock();
if (m->retrying)
m->currentInterval = min(m->currentInterval * 2, m->maxRetryInterval);
else
{
m->currentInterval = m->minRetryInterval;
m->retrying = 1;
}
}
}
int MQTTAsync_reconnect(MQTTAsync handle)
{
int rc = MQTTASYNC_FAILURE;
MQTTAsyncs* m = handle;
FUNC_ENTRY;
MQTTAsync_lock_mutex(mqttasync_mutex);
if (m->automaticReconnect)
{
if (m->shouldBeConnected)
{
m->reconnectNow = 1;
if (m->retrying == 0)
{
m->currentInterval = m->minRetryInterval;
m->retrying = 1;
}
rc = MQTTASYNC_SUCCESS;
}
}
else
{
/* to reconnect, put the connect command to the head of the command queue */
MQTTAsync_queuedCommand* conn = malloc(sizeof(MQTTAsync_queuedCommand));
memset(conn, '\0', sizeof(MQTTAsync_queuedCommand));
conn->client = m;
conn->command = m->connect;
/* make sure that the version attempts are restarted */
if (m->c->MQTTVersion == MQTTVERSION_DEFAULT)
conn->command.details.conn.MQTTVersion = 0;
MQTTAsync_addCommand(conn, sizeof(m->connect));
rc = MQTTASYNC_SUCCESS;
}
MQTTAsync_unlock_mutex(mqttasync_mutex);
FUNC_EXIT_RC(rc);
return rc;
}
static void MQTTAsync_checkDisconnect(MQTTAsync handle, MQTTAsync_command* command)
{
MQTTAsyncs* m = handle;
FUNC_ENTRY;
/* wait for all inflight message flows to finish, up to timeout */;
if (m->c->outboundMsgs->count == 0 || MQTTAsync_elapsed(command->start_time) >= command->details.dis.timeout)
{
int was_connected = m->c->connected;
MQTTAsync_closeSession(m->c);
if (command->details.dis.internal)
{
if (m->cl && was_connected)
{
Log(TRACE_MIN, -1, "Calling connectionLost for client %s", m->c->clientID);
(*(m->cl))(m->context, NULL);
}
MQTTAsync_startConnectRetry(m);
}
else if (command->onSuccess)
{
Log(TRACE_MIN, -1, "Calling disconnect complete for client %s", m->c->clientID);
(*(command->onSuccess))(command->context, NULL);
}
}
FUNC_EXIT;
}
/**
* See if any pending writes have been completed, and cleanup if so.
* Cleaning up means removing any publication data that was stored because the write did
* not originally complete.
*/
static void MQTTProtocol_checkPendingWrites(void)
{
FUNC_ENTRY;
if (state.pending_writes.count > 0)
{
ListElement* le = state.pending_writes.first;
while (le)
{
if (Socket_noPendingWrites(((pending_write*)(le->content))->socket))
{
MQTTProtocol_removePublication(((pending_write*)(le->content))->p);
state.pending_writes.current = le;
ListRemove(&(state.pending_writes), le->content); /* does NextElement itself */
le = state.pending_writes.current;
}
else
ListNextElement(&(state.pending_writes), &le);
}
}
FUNC_EXIT;
}
static void MQTTAsync_freeServerURIs(MQTTAsyncs* m)
{
int i;
for (i = 0; i < m->serverURIcount; ++i)
free(m->serverURIs[i]);
if (m->serverURIs)
free(m->serverURIs);
}
static void MQTTAsync_freeCommand1(MQTTAsync_queuedCommand *command)
{
if (command->command.type == SUBSCRIBE)
{
int i;
for (i = 0; i < command->command.details.sub.count; i++)
free(command->command.details.sub.topics[i]);
free(command->command.details.sub.topics);
free(command->command.details.sub.qoss);
}
else if (command->command.type == UNSUBSCRIBE)
{
int i;
for (i = 0; i < command->command.details.unsub.count; i++)
free(command->command.details.unsub.topics[i]);
free(command->command.details.unsub.topics);
}
else if (command->command.type == PUBLISH)
{
/* qos 1 and 2 topics are freed in the protocol code when the flows are completed */
if (command->command.details.pub.destinationName)
free(command->command.details.pub.destinationName);
free(command->command.details.pub.payload);
}
}
static void MQTTAsync_freeCommand(MQTTAsync_queuedCommand *command)
{
MQTTAsync_freeCommand1(command);
free(command);
}
static void MQTTAsync_writeComplete(int socket)
{
ListElement* found = NULL;
FUNC_ENTRY;
/* a partial write is now complete for a socket - this will be on a publish*/
MQTTProtocol_checkPendingWrites();
/* find the client using this socket */
if ((found = ListFindItem(handles, &socket, clientSockCompare)) != NULL)
{
MQTTAsyncs* m = (MQTTAsyncs*)(found->content);
time(&(m->c->net.lastSent));
/* see if there is a pending write flagged */
if (m->pending_write)
{
ListElement* cur_response = NULL;
MQTTAsync_command* command = m->pending_write;
MQTTAsync_queuedCommand* com = NULL;
while (ListNextElement(m->responses, &cur_response))
{
com = (MQTTAsync_queuedCommand*)(cur_response->content);
if (com->client->pending_write == m->pending_write)
break;
}
if (cur_response && command->onSuccess)
{
MQTTAsync_successData data;
data.token = command->token;
data.alt.pub.destinationName = command->details.pub.destinationName;
data.alt.pub.message.payload = command->details.pub.payload;
data.alt.pub.message.payloadlen = command->details.pub.payloadlen;
data.alt.pub.message.qos = command->details.pub.qos;
data.alt.pub.message.retained = command->details.pub.retained;
Log(TRACE_MIN, -1, "Calling publish success for client %s", m->c->clientID);
(*(command->onSuccess))(command->context, &data);
}
m->pending_write = NULL;
ListDetach(m->responses, com);
MQTTAsync_freeCommand(com);
}
}
FUNC_EXIT;
}
static int MQTTAsync_processCommand(void)
{
int rc = 0;
MQTTAsync_queuedCommand* command = NULL;
ListElement* cur_command = NULL;
List* ignored_clients = NULL;
FUNC_ENTRY;
MQTTAsync_lock_mutex(mqttasync_mutex);
MQTTAsync_lock_mutex(mqttcommand_mutex);
/* only the first command in the list must be processed for any particular client, so if we skip
a command for a client, we must skip all following commands for that client. Use a list of
ignored clients to keep track
*/
ignored_clients = ListInitialize();
/* don't try a command until there isn't a pending write for that client, and we are not connecting */
while (ListNextElement(commands, &cur_command))
{
MQTTAsync_queuedCommand* cmd = (MQTTAsync_queuedCommand*)(cur_command->content);
if (ListFind(ignored_clients, cmd->client))
continue;
if (cmd->command.type == CONNECT || cmd->command.type == DISCONNECT || (cmd->client->c->connected &&
cmd->client->c->connect_state == 0 && Socket_noPendingWrites(cmd->client->c->net.socket)))
{
if ((cmd->command.type == PUBLISH || cmd->command.type == SUBSCRIBE || cmd->command.type == UNSUBSCRIBE) &&
cmd->client->c->outboundMsgs->count >= MAX_MSG_ID - 1)
{
; /* no more message ids available */
}
else
{
command = cmd;
break;
}
}
ListAppend(ignored_clients, cmd->client, sizeof(cmd->client));
}
ListFreeNoContent(ignored_clients);
if (command)
{
ListDetach(commands, command);
#if !defined(NO_PERSISTENCE)
if (command->client->c->persistence)
MQTTAsync_unpersistCommand(command);
#endif
}
MQTTAsync_unlock_mutex(mqttcommand_mutex);
if (!command)
goto exit; /* nothing to do */
if (command->command.type == CONNECT)
{
if (command->client->c->connect_state != 0 || command->client->c->connected)
rc = 0;
else
{
char* serverURI = command->client->serverURI;
if (command->client->serverURIcount > 0)
{
serverURI = command->client->serverURIs[command->command.details.conn.currentURI];
if (strncmp(URI_TCP, serverURI, strlen(URI_TCP)) == 0)
serverURI += strlen(URI_TCP);
#if defined(OPENSSL)
else if (strncmp(URI_SSL, serverURI, strlen(URI_SSL)) == 0)
{
serverURI += strlen(URI_SSL);
command->client->ssl = 1;
}
#endif
}
if (command->client->c->MQTTVersion == MQTTVERSION_DEFAULT)
{
if (command->command.details.conn.MQTTVersion == 0)
command->command.details.conn.MQTTVersion = MQTTVERSION_3_1_1;
else if (command->command.details.conn.MQTTVersion == MQTTVERSION_3_1_1)
command->command.details.conn.MQTTVersion = MQTTVERSION_3_1;
}
else
command->command.details.conn.MQTTVersion = command->client->c->MQTTVersion;
Log(TRACE_MIN, -1, "Connecting to serverURI %s with MQTT version %d", serverURI, command->command.details.conn.MQTTVersion);
#if defined(OPENSSL)
rc = MQTTProtocol_connect(serverURI, command->client->c, command->client->ssl, command->command.details.conn.MQTTVersion);
#else
rc = MQTTProtocol_connect(serverURI, command->client->c, command->command.details.conn.MQTTVersion);
#endif
if (command->client->c->connect_state == 0)
rc = SOCKET_ERROR;
/* if the TCP connect is pending, then we must call select to determine when the connect has completed,
which is indicated by the socket being ready *either* for reading *or* writing. The next couple of lines
make sure we check for writeability as well as readability, otherwise we wait around longer than we need to
in Socket_getReadySocket() */
if (rc == EINPROGRESS)
Socket_addPendingWrite(command->client->c->net.socket);
}
}
else if (command->command.type == SUBSCRIBE)
{
List* topics = ListInitialize();
List* qoss = ListInitialize();
int i;
for (i = 0; i < command->command.details.sub.count; i++)
{
ListAppend(topics, command->command.details.sub.topics[i], strlen(command->command.details.sub.topics[i]));
ListAppend(qoss, &command->command.details.sub.qoss[i], sizeof(int));
}
rc = MQTTProtocol_subscribe(command->client->c, topics, qoss, command->command.token);
ListFreeNoContent(topics);
ListFreeNoContent(qoss);
}
else if (command->command.type == UNSUBSCRIBE)
{
List* topics = ListInitialize();
int i;
for (i = 0; i < command->command.details.unsub.count; i++)
ListAppend(topics, command->command.details.unsub.topics[i], strlen(command->command.details.unsub.topics[i]));
rc = MQTTProtocol_unsubscribe(command->client->c, topics, command->command.token);
ListFreeNoContent(topics);
}
else if (command->command.type == PUBLISH)
{
Messages* msg = NULL;
Publish* p = NULL;
p = malloc(sizeof(Publish));
p->payload = command->command.details.pub.payload;
p->payloadlen = command->command.details.pub.payloadlen;
p->topic = command->command.details.pub.destinationName;
p->msgId = command->command.token;
rc = MQTTProtocol_startPublish(command->client->c, p, command->command.details.pub.qos, command->command.details.pub.retained, &msg);
if (command->command.details.pub.qos == 0)
{
if (rc == TCPSOCKET_COMPLETE)
{
if (command->command.onSuccess)
{
MQTTAsync_successData data;
data.token = command->command.token;
data.alt.pub.destinationName = command->command.details.pub.destinationName;
data.alt.pub.message.payload = command->command.details.pub.payload;
data.alt.pub.message.payloadlen = command->command.details.pub.payloadlen;
data.alt.pub.message.qos = command->command.details.pub.qos;
data.alt.pub.message.retained = command->command.details.pub.retained;
Log(TRACE_MIN, -1, "Calling publish success for client %s", command->client->c->clientID);
(*(command->command.onSuccess))(command->command.context, &data);
}
}
else
{
command->command.details.pub.destinationName = NULL; /* this will be freed by the protocol code */
command->client->pending_write = &command->command;
}
}
else
command->command.details.pub.destinationName = NULL; /* this will be freed by the protocol code */
free(p); /* should this be done if the write isn't complete? */
}
else if (command->command.type == DISCONNECT)
{
if (command->client->c->connect_state != 0 || command->client->c->connected != 0)
{
command->client->c->connect_state = -2;
MQTTAsync_checkDisconnect(command->client, &command->command);
}
}
if (command->command.type == CONNECT && rc != SOCKET_ERROR && rc != MQTTASYNC_PERSISTENCE_ERROR)
{
command->client->connect = command->command;
MQTTAsync_freeCommand(command);
}
else if (command->command.type == DISCONNECT)
{
command->client->disconnect = command->command;
MQTTAsync_freeCommand(command);
}
else if (command->command.type == PUBLISH && command->command.details.pub.qos == 0)
{
if (rc == TCPSOCKET_INTERRUPTED)
ListAppend(command->client->responses, command, sizeof(command));
else
MQTTAsync_freeCommand(command);
}
else if (rc == SOCKET_ERROR || rc == MQTTASYNC_PERSISTENCE_ERROR)
{
if (command->command.type == CONNECT)
{
MQTTAsync_disconnectOptions opts = MQTTAsync_disconnectOptions_initializer;
MQTTAsync_disconnect(command->client, &opts); /* not "internal" because we don't want to call connection lost */
command->client->shouldBeConnected = 1; /* as above call is not "internal" we need to reset this */
}
else
MQTTAsync_disconnect_internal(command->client, 0);
if (command->command.type == CONNECT && MQTTAsync_checkConn(&command->command, command->client))
{
Log(TRACE_MIN, -1, "Connect failed, more to try");
if (command->client->c->MQTTVersion == MQTTVERSION_DEFAULT)
{
if (command->command.details.conn.MQTTVersion == MQTTVERSION_3_1)
{
command->command.details.conn.currentURI++;
command->command.details.conn.MQTTVersion = MQTTVERSION_DEFAULT;
}
}
else
command->command.details.conn.currentURI++;
/* put the connect command back to the head of the command queue, using the next serverURI */
rc = MQTTAsync_addCommand(command, sizeof(command->command.details.conn));
}
else
{
if (command->command.onFailure)
{
Log(TRACE_MIN, -1, "Calling command failure for client %s", command->client->c->clientID);
(*(command->command.onFailure))(command->command.context, NULL);
}
MQTTAsync_freeCommand(command); /* free up the command if necessary */
}
}
else /* put the command into a waiting for response queue for each client, indexed by msgid */
ListAppend(command->client->responses, command, sizeof(command));
exit:
MQTTAsync_unlock_mutex(mqttasync_mutex);
rc = (command != NULL);
FUNC_EXIT_RC(rc);
return rc;
}
static void nextOrClose(MQTTAsyncs* m, int rc, char* message)
{
if (MQTTAsync_checkConn(&m->connect, m))
{
MQTTAsync_queuedCommand* conn;
MQTTAsync_closeOnly(m->c);
/* put the connect command back to the head of the command queue, using the next serverURI */
conn = malloc(sizeof(MQTTAsync_queuedCommand));
memset(conn, '\0', sizeof(MQTTAsync_queuedCommand));
conn->client = m;
conn->command = m->connect;
Log(TRACE_MIN, -1, "Connect failed, more to try");
if (conn->client->c->MQTTVersion == MQTTVERSION_DEFAULT)
{
if (conn->command.details.conn.MQTTVersion == MQTTVERSION_3_1)
{
conn->command.details.conn.currentURI++;
conn->command.details.conn.MQTTVersion = MQTTVERSION_DEFAULT;
}
}
else
conn->command.details.conn.currentURI++;
MQTTAsync_addCommand(conn, sizeof(m->connect));
}
else
{
MQTTAsync_closeSession(m->c);
if (m->connect.onFailure)
{
MQTTAsync_failureData data;
data.token = 0;
data.code = rc;
data.message = message;
Log(TRACE_MIN, -1, "Calling connect failure for client %s", m->c->clientID);
(*(m->connect.onFailure))(m->connect.context, &data);
}
MQTTAsync_startConnectRetry(m);
}
}
static void MQTTAsync_checkTimeouts(void)
{
ListElement* current = NULL;
static time_t last = 0L;
time_t now;
FUNC_ENTRY;
time(&(now));
if (difftime(now, last) < 3)
goto exit;
MQTTAsync_lock_mutex(mqttasync_mutex);
last = now;
while (ListNextElement(handles, ¤t)) /* for each client */
{
ListElement* cur_response = NULL;
int i = 0,
timed_out_count = 0;
MQTTAsyncs* m = (MQTTAsyncs*)(current->content);
/* check disconnect timeout */
if (m->c->connect_state == -2)
MQTTAsync_checkDisconnect(m, &m->disconnect);
/* check connect timeout */
if (m->c->connect_state != 0 && MQTTAsync_elapsed(m->connect.start_time) > (m->connectTimeout * 1000))
{
nextOrClose(m, MQTTASYNC_FAILURE, "TCP connect timeout");
continue;
}
timed_out_count = 0;
/* check response timeouts */
while (ListNextElement(m->responses, &cur_response))
{
MQTTAsync_queuedCommand* com = (MQTTAsync_queuedCommand*)(cur_response->content);
if (1 /*MQTTAsync_elapsed(com->command.start_time) < 120000*/)
break; /* command has not timed out */
else
{
if (com->command.onFailure)
{
Log(TRACE_MIN, -1, "Calling %s failure for client %s",
MQTTPacket_name(com->command.type), m->c->clientID);
(*(com->command.onFailure))(com->command.context, NULL);
}
timed_out_count++;
}
}
for (i = 0; i < timed_out_count; ++i)
ListRemoveHead(m->responses); /* remove the first response in the list */
if (m->automaticReconnect && m->retrying)
{
if (m->reconnectNow || MQTTAsync_elapsed(m->lastConnectionFailedTime) > (m->currentInterval * 1000))
{
/* to reconnect put the connect command to the head of the command queue */
MQTTAsync_queuedCommand* conn = malloc(sizeof(MQTTAsync_queuedCommand));
memset(conn, '\0', sizeof(MQTTAsync_queuedCommand));
conn->client = m;
conn->command = m->connect;
/* make sure that the version attempts are restarted */
if (m->c->MQTTVersion == MQTTVERSION_DEFAULT)
conn->command.details.conn.MQTTVersion = 0;
Log(TRACE_MIN, -1, "Automatically attempting to reconnect");
MQTTAsync_addCommand(conn, sizeof(m->connect));
m->reconnectNow = 0;
}
}
}
MQTTAsync_unlock_mutex(mqttasync_mutex);
exit:
FUNC_EXIT;
}
static thread_return_type WINAPI MQTTAsync_sendThread(void* n)
{
FUNC_ENTRY;
MQTTAsync_lock_mutex(mqttasync_mutex);
sendThread_state = RUNNING;
sendThread_id = Thread_getid();
MQTTAsync_unlock_mutex(mqttasync_mutex);
while (!tostop)
{
int rc;
while (commands->count > 0)
{
if (MQTTAsync_processCommand() == 0)
break; /* no commands were processed, so go into a wait */
}
#if !defined(WIN32) && !defined(WIN64)
if ((rc = Thread_wait_cond(send_cond, 1)) != 0 && rc != ETIMEDOUT)
Log(LOG_ERROR, -1, "Error %d waiting for condition variable", rc);
#else
if ((rc = Thread_wait_sem(send_sem, 1000)) != 0 && rc != ETIMEDOUT)
Log(LOG_ERROR, -1, "Error %d waiting for semaphore", rc);
#endif
MQTTAsync_checkTimeouts();
}
sendThread_state = STOPPING;
MQTTAsync_lock_mutex(mqttasync_mutex);
sendThread_state = STOPPED;
sendThread_id = 0;
MQTTAsync_unlock_mutex(mqttasync_mutex);
FUNC_EXIT;
return 0;
}
static void MQTTAsync_emptyMessageQueue(Clients* client)
{
FUNC_ENTRY;
/* empty message queue */
if (client->messageQueue->count > 0)
{
ListElement* current = NULL;
while (ListNextElement(client->messageQueue, ¤t))
{
qEntry* qe = (qEntry*)(current->content);
free(qe->topicName);
free(qe->msg->payload);
free(qe->msg);
}
ListEmpty(client->messageQueue);
}
FUNC_EXIT;
}
static void MQTTAsync_removeResponsesAndCommands(MQTTAsyncs* m)
{
int count = 0;
ListElement* current = NULL;
ListElement *next = NULL;
FUNC_ENTRY;
if (m->responses)
{
ListElement* cur_response = NULL;
while (ListNextElement(m->responses, &cur_response))
{
MQTTAsync_queuedCommand* command = (MQTTAsync_queuedCommand*)(cur_response->content);
if (command->command.onFailure)
{
MQTTAsync_failureData data;
data.token = command->command.token;
data.code = MQTTASYNC_OPERATION_INCOMPLETE; /* interrupted return code */
data.message = NULL;
Log(TRACE_MIN, -1, "Calling %s failure for client %s",
MQTTPacket_name(command->command.type), m->c->clientID);
(*(command->command.onFailure))(command->command.context, &data);
}
MQTTAsync_freeCommand1(command);
count++;
}
}
ListEmpty(m->responses);
Log(TRACE_MINIMUM, -1, "%d responses removed for client %s", count, m->c->clientID);
/* remove commands in the command queue relating to this client */
count = 0;
current = ListNextElement(commands, &next);
ListNextElement(commands, &next);
while (current)
{
MQTTAsync_queuedCommand* command = (MQTTAsync_queuedCommand*)(current->content);
if (command->client == m)
{
ListDetach(commands, command);
if (command->command.onFailure)
{
MQTTAsync_failureData data;
data.token = command->command.token;
data.code = MQTTASYNC_OPERATION_INCOMPLETE; /* interrupted return code */
data.message = NULL;
Log(TRACE_MIN, -1, "Calling %s failure for client %s",
MQTTPacket_name(command->command.type), m->c->clientID);
(*(command->command.onFailure))(command->command.context, &data);
}
MQTTAsync_freeCommand(command);
count++;
}
current = next;
ListNextElement(commands, &next);
}
Log(TRACE_MINIMUM, -1, "%d commands removed for client %s", count, m->c->clientID);
FUNC_EXIT;
}
void MQTTAsync_destroy(MQTTAsync* handle)
{
MQTTAsyncs* m = *handle;
FUNC_ENTRY;
MQTTAsync_lock_mutex(mqttasync_mutex);
if (m == NULL)
goto exit;
MQTTAsync_removeResponsesAndCommands(m);
ListFree(m->responses);
if (m->c)
{
int saved_socket = m->c->net.socket;
char* saved_clientid = MQTTStrdup(m->c->clientID);
#if !defined(NO_PERSISTENCE)
MQTTPersistence_close(m->c);
#endif
MQTTAsync_emptyMessageQueue(m->c);
MQTTProtocol_freeClient(m->c);
if (!ListRemove(bstate->clients, m->c))
Log(LOG_ERROR, 0, NULL);
else
Log(TRACE_MIN, 1, NULL, saved_clientid, saved_socket);
free(saved_clientid);
}
if (m->serverURI)
free(m->serverURI);
if (m->createOptions)
free(m->createOptions);
MQTTAsync_freeServerURIs(m);
if (!ListRemove(handles, m))
Log(LOG_ERROR, -1, "free error");
*handle = NULL;
if (bstate->clients->count == 0)
MQTTAsync_terminate();
exit:
MQTTAsync_unlock_mutex(mqttasync_mutex);
FUNC_EXIT;
}
void MQTTAsync_freeMessage(MQTTAsync_message** message)
{
FUNC_ENTRY;
free((*message)->payload);
free(*message);
*message = NULL;
FUNC_EXIT;
}
void MQTTAsync_free(void* memory)
{
FUNC_ENTRY;
free(memory);
FUNC_EXIT;
}
static int MQTTAsync_completeConnection(MQTTAsyncs* m, MQTTPacket* pack)
{
int rc = MQTTASYNC_FAILURE;
FUNC_ENTRY;
if (m->c->connect_state == 3) /* MQTT connect sent - wait for CONNACK */
{
Connack* connack = (Connack*)pack;
Log(LOG_PROTOCOL, 1, NULL, m->c->net.socket, m->c->clientID, connack->rc);
if ((rc = connack->rc) == MQTTASYNC_SUCCESS)
{
m->retrying = 0;
m->c->connected = 1;
m->c->good = 1;
m->c->connect_state = 0;
if (m->c->cleansession)
rc = MQTTAsync_cleanSession(m->c);
if (m->c->outboundMsgs->count > 0)
{
ListElement* outcurrent = NULL;
while (ListNextElement(m->c->outboundMsgs, &outcurrent))
{
Messages* m = (Messages*)(outcurrent->content);
m->lastTouch = 0;
}
MQTTProtocol_retry((time_t)0, 1, 1);
if (m->c->connected != 1)
rc = MQTTASYNC_DISCONNECTED;
}
}
free(connack);
m->pack = NULL;
#if !defined(WIN32) && !defined(WIN64)
Thread_signal_cond(send_cond);
#else
if (!Thread_check_sem(send_sem))
Thread_post_sem(send_sem);
#endif
}
FUNC_EXIT_RC(rc);
return rc;
}
/* This is the thread function that handles the calling of callback functions if set */
static thread_return_type WINAPI MQTTAsync_receiveThread(void* n)
{
long timeout = 10L; /* first time in we have a small timeout. Gets things started more quickly */
FUNC_ENTRY;
MQTTAsync_lock_mutex(mqttasync_mutex);
receiveThread_state = RUNNING;
receiveThread_id = Thread_getid();
while (!tostop)
{
int rc = SOCKET_ERROR;
int sock = -1;
MQTTAsyncs* m = NULL;
MQTTPacket* pack = NULL;
MQTTAsync_unlock_mutex(mqttasync_mutex);
pack = MQTTAsync_cycle(&sock, timeout, &rc);
MQTTAsync_lock_mutex(mqttasync_mutex);
if (tostop)
break;
timeout = 1000L;
if (sock == 0)
continue;
/* find client corresponding to socket */
if (ListFindItem(handles, &sock, clientSockCompare) == NULL)
{
Log(TRACE_MINIMUM, -1, "Could not find client corresponding to socket %d", sock);
/* Socket_close(sock); - removing socket in this case is not necessary (Bug 442400) */
continue;
}
m = (MQTTAsyncs*)(handles->current->content);
if (m == NULL)
{
Log(LOG_ERROR, -1, "Client structure was NULL for socket %d - removing socket", sock);
Socket_close(sock);
continue;
}
if (rc == SOCKET_ERROR)
{
Log(TRACE_MINIMUM, -1, "Error from MQTTAsync_cycle() - removing socket %d", sock);
if (m->c->connected == 1)
{
MQTTAsync_unlock_mutex(mqttasync_mutex);
MQTTAsync_disconnect_internal(m, 0);
MQTTAsync_lock_mutex(mqttasync_mutex);
}
else if (m->c->connect_state != 0)
nextOrClose(m, rc, "socket error");
else /* calling disconnect_internal won't have any effect if we're already disconnected */
MQTTAsync_closeOnly(m->c);
}
else
{
if (m->c->messageQueue->count > 0)
{
qEntry* qe = (qEntry*)(m->c->messageQueue->first->content);
int topicLen = qe->topicLen;
if (strlen(qe->topicName) == topicLen)
topicLen = 0;
if (m->ma)
rc = MQTTAsync_deliverMessage(m, qe->topicName, topicLen, qe->msg);
else
rc = 1;
if (rc)
{
ListRemove(m->c->messageQueue, qe);
#if !defined(NO_PERSISTENCE)
if (m->c->persistence)
MQTTPersistence_unpersistQueueEntry(m->c, (MQTTPersistence_qEntry*)qe);
#endif
}
else
Log(TRACE_MIN, -1, "False returned from messageArrived for client %s, message remains on queue",
m->c->clientID);
}
if (pack)
{
if (pack->header.bits.type == CONNACK)
{
int sessionPresent = ((Connack*)pack)->flags.bits.sessionPresent;
int rc = MQTTAsync_completeConnection(m, pack);
if (rc == MQTTASYNC_SUCCESS)
{
int onSuccess = 0;
if (m->serverURIcount > 0)
Log(TRACE_MIN, -1, "Connect succeeded to %s",
m->serverURIs[m->connect.details.conn.currentURI]);
onSuccess = (m->connect.onSuccess != NULL); /* save setting of onSuccess callback */
if (m->connect.onSuccess)
{
MQTTAsync_successData data;
memset(&data, '\0', sizeof(data));
Log(TRACE_MIN, -1, "Calling connect success for client %s", m->c->clientID);
if (m->serverURIcount > 0)
data.alt.connect.serverURI = m->serverURIs[m->connect.details.conn.currentURI];
else
data.alt.connect.serverURI = m->serverURI;
data.alt.connect.MQTTVersion = m->connect.details.conn.MQTTVersion;
data.alt.connect.sessionPresent = sessionPresent;
(*(m->connect.onSuccess))(m->connect.context, &data);
m->connect.onSuccess = NULL; /* don't accidentally call it again */
}
if (m->connected)
{
char* reason = (onSuccess) ? "connect onSuccess called" : "automatic reconnect";
Log(TRACE_MIN, -1, "Calling connected for client %s", m->c->clientID);
(*(m->connected))(m->connected_context, reason);
}
}
else
nextOrClose(m, rc, "CONNACK return code");
}
else if (pack->header.bits.type == SUBACK)
{
ListElement* current = NULL;
/* use the msgid to find the callback to be called */
while (ListNextElement(m->responses, ¤t))
{
MQTTAsync_queuedCommand* command = (MQTTAsync_queuedCommand*)(current->content);
if (command->command.token == ((Suback*)pack)->msgId)
{
Suback* sub = (Suback*)pack;
if (!ListDetach(m->responses, command)) /* remove the response from the list */
Log(LOG_ERROR, -1, "Subscribe command not removed from command list");
/* Call the failure callback if there is one subscribe in the MQTT packet and
* the return code is 0x80 (failure). If the MQTT packet contains >1 subscription
* request, then we call onSuccess with the list of returned QoSs, which inelegantly,
* could include some failures, or worse, the whole list could have failed.
*/
if (sub->qoss->count == 1 && *(int*)(sub->qoss->first->content) == MQTT_BAD_SUBSCRIBE)
{
if (command->command.onFailure)
{
MQTTAsync_failureData data;
data.token = command->command.token;
data.code = *(int*)(sub->qoss->first->content);
Log(TRACE_MIN, -1, "Calling subscribe failure for client %s", m->c->clientID);
(*(command->command.onFailure))(command->command.context, &data);
}
}
else if (command->command.onSuccess)
{
MQTTAsync_successData data;
int* array = NULL;
if (sub->qoss->count == 1)
data.alt.qos = *(int*)(sub->qoss->first->content);
else if (sub->qoss->count > 1)
{
ListElement* cur_qos = NULL;
int* element = array = data.alt.qosList = malloc(sub->qoss->count * sizeof(int));
while (ListNextElement(sub->qoss, &cur_qos))
*element++ = *(int*)(cur_qos->content);
}
data.token = command->command.token;
Log(TRACE_MIN, -1, "Calling subscribe success for client %s", m->c->clientID);
(*(command->command.onSuccess))(command->command.context, &data);
if (array)
free(array);
}
MQTTAsync_freeCommand(command);
break;
}
}
rc = MQTTProtocol_handleSubacks(pack, m->c->net.socket);
}
else if (pack->header.bits.type == UNSUBACK)
{
ListElement* current = NULL;
int handleCalled = 0;
/* use the msgid to find the callback to be called */
while (ListNextElement(m->responses, ¤t))
{
MQTTAsync_queuedCommand* command = (MQTTAsync_queuedCommand*)(current->content);
if (command->command.token == ((Unsuback*)pack)->msgId)
{
if (!ListDetach(m->responses, command)) /* remove the response from the list */
Log(LOG_ERROR, -1, "Unsubscribe command not removed from command list");
if (command->command.onSuccess)
{
rc = MQTTProtocol_handleUnsubacks(pack, m->c->net.socket);
handleCalled = 1;
Log(TRACE_MIN, -1, "Calling unsubscribe success for client %s", m->c->clientID);
(*(command->command.onSuccess))(command->command.context, NULL);
}
MQTTAsync_freeCommand(command);
break;
}
}
if (!handleCalled)
rc = MQTTProtocol_handleUnsubacks(pack, m->c->net.socket);
}
}
}
}
receiveThread_state = STOPPED;
receiveThread_id = 0;
MQTTAsync_unlock_mutex(mqttasync_mutex);
#if !defined(WIN32) && !defined(WIN64)
if (sendThread_state != STOPPED)
Thread_signal_cond(send_cond);
#else
if (sendThread_state != STOPPED && !Thread_check_sem(send_sem))
Thread_post_sem(send_sem);
#endif
FUNC_EXIT;
return 0;
}
static void MQTTAsync_stop(void)
{
int rc = 0;
FUNC_ENTRY;
if (sendThread_state != STOPPED || receiveThread_state != STOPPED)
{
int conn_count = 0;
ListElement* current = NULL;
if (handles != NULL)
{
/* find out how many handles are still connected */
while (ListNextElement(handles, ¤t))
{
if (((MQTTAsyncs*)(current->content))->c->connect_state > 0 ||
((MQTTAsyncs*)(current->content))->c->connected)
++conn_count;
}
}
Log(TRACE_MIN, -1, "Conn_count is %d", conn_count);
/* stop the background thread, if we are the last one to be using it */
if (conn_count == 0)
{
int count = 0;
tostop = 1;
while ((sendThread_state != STOPPED || receiveThread_state != STOPPED) && ++count < 100)
{
MQTTAsync_unlock_mutex(mqttasync_mutex);
Log(TRACE_MIN, -1, "sleeping");
MQTTAsync_sleep(100L);
MQTTAsync_lock_mutex(mqttasync_mutex);
}
rc = 1;
tostop = 0;
}
}
FUNC_EXIT_RC(rc);
}
int MQTTAsync_setCallbacks(MQTTAsync handle, void* context,
MQTTAsync_connectionLost* cl,
MQTTAsync_messageArrived* ma,
MQTTAsync_deliveryComplete* dc)
{
int rc = MQTTASYNC_SUCCESS;
MQTTAsyncs* m = handle;
FUNC_ENTRY;
MQTTAsync_lock_mutex(mqttasync_mutex);
if (m == NULL || ma == NULL || m->c->connect_state != 0)
rc = MQTTASYNC_FAILURE;
else
{
m->context = context;
m->cl = cl;
m->ma = ma;
m->dc = dc;
}
MQTTAsync_unlock_mutex(mqttasync_mutex);
FUNC_EXIT_RC(rc);
return rc;
}
int MQTTAsync_setConnected(MQTTAsync handle, void* context, MQTTAsync_connected* connected)
{
int rc = MQTTASYNC_SUCCESS;
MQTTAsyncs* m = handle;
FUNC_ENTRY;
MQTTAsync_lock_mutex(mqttasync_mutex);
if (m == NULL || m->c->connect_state != 0)
rc = MQTTASYNC_FAILURE;
else
{
m->connected_context = context;
m->connected = connected;
}
MQTTAsync_unlock_mutex(mqttasync_mutex);
FUNC_EXIT_RC(rc);
return rc;
}
static void MQTTAsync_closeOnly(Clients* client)
{
FUNC_ENTRY;
client->good = 0;
client->ping_outstanding = 0;
if (client->net.socket > 0)
{
if (client->connected)
MQTTPacket_send_disconnect(&client->net, client->clientID);
Thread_lock_mutex(socket_mutex);
#if defined(OPENSSL)
SSLSocket_close(&client->net);
#endif
Socket_close(client->net.socket);
client->net.socket = 0;
#if defined(OPENSSL)
client->net.ssl = NULL;
#endif
Thread_unlock_mutex(socket_mutex);
}
client->connected = 0;
client->connect_state = 0;
FUNC_EXIT;
}
static void MQTTAsync_closeSession(Clients* client)
{
FUNC_ENTRY;
MQTTAsync_closeOnly(client);
if (client->cleansession)
MQTTAsync_cleanSession(client);
FUNC_EXIT;
}
/**
* List callback function for comparing clients by client structure
* @param a Async structure
* @param b Client structure
* @return boolean indicating whether a and b are equal
*/
static int clientStructCompare(void* a, void* b)
{
MQTTAsyncs* m = (MQTTAsyncs*)a;
return m->c == (Clients*)b;
}
static int MQTTAsync_cleanSession(Clients* client)
{
int rc = 0;
ListElement* found = NULL;
FUNC_ENTRY;
#if !defined(NO_PERSISTENCE)
rc = MQTTPersistence_clear(client);
#endif
MQTTProtocol_emptyMessageList(client->inboundMsgs);
MQTTProtocol_emptyMessageList(client->outboundMsgs);
MQTTAsync_emptyMessageQueue(client);
client->msgID = 0;
if ((found = ListFindItem(handles, client, clientStructCompare)) != NULL)
{
MQTTAsyncs* m = (MQTTAsyncs*)(found->content);
MQTTAsync_removeResponsesAndCommands(m);
}
else
Log(LOG_ERROR, -1, "cleanSession: did not find client structure in handles list");
FUNC_EXIT_RC(rc);
return rc;
}
static int MQTTAsync_deliverMessage(MQTTAsyncs* m, char* topicName, size_t topicLen, MQTTAsync_message* mm)
{
int rc;
Log(TRACE_MIN, -1, "Calling messageArrived for client %s, queue depth %d",
m->c->clientID, m->c->messageQueue->count);
rc = (*(m->ma))(m->context, topicName, (int)topicLen, mm);
/* if 0 (false) is returned by the callback then it failed, so we don't remove the message from
* the queue, and it will be retried later. If 1 is returned then the message data may have been freed,
* so we must be careful how we use it.
*/
return rc;
}
void Protocol_processPublication(Publish* publish, Clients* client)
{
MQTTAsync_message* mm = NULL;
int rc = 0;
FUNC_ENTRY;
mm = malloc(sizeof(MQTTAsync_message));
/* If the message is QoS 2, then we have already stored the incoming payload
* in an allocated buffer, so we don't need to copy again.
*/
if (publish->header.bits.qos == 2)
mm->payload = publish->payload;
else
{
mm->payload = malloc(publish->payloadlen);
memcpy(mm->payload, publish->payload, publish->payloadlen);
}
mm->payloadlen = publish->payloadlen;
mm->qos = publish->header.bits.qos;
mm->retained = publish->header.bits.retain;
if (publish->header.bits.qos == 2)
mm->dup = 0; /* ensure that a QoS2 message is not passed to the application with dup = 1 */
else
mm->dup = publish->header.bits.dup;
mm->msgid = publish->msgId;
if (client->messageQueue->count == 0 && client->connected)
{
ListElement* found = NULL;
if ((found = ListFindItem(handles, client, clientStructCompare)) == NULL)
Log(LOG_ERROR, -1, "processPublication: did not find client structure in handles list");
else
{
MQTTAsyncs* m = (MQTTAsyncs*)(found->content);
if (m->ma)
rc = MQTTAsync_deliverMessage(m, publish->topic, publish->topiclen, mm);
}
}
if (rc == 0) /* if message was not delivered, queue it up */
{
qEntry* qe = malloc(sizeof(qEntry));
qe->msg = mm;
qe->topicName = publish->topic;
qe->topicLen = publish->topiclen;
ListAppend(client->messageQueue, qe, sizeof(qe) + sizeof(mm) + mm->payloadlen + strlen(qe->topicName)+1);
#if !defined(NO_PERSISTENCE)
if (client->persistence)
MQTTPersistence_persistQueueEntry(client, (MQTTPersistence_qEntry*)qe);
#endif
}
publish->topic = NULL;
FUNC_EXIT;
}
static int retryLoopInterval = 5;
static void setRetryLoopInterval(int keepalive)
{
int proposed = keepalive / 10;
if (proposed < 1)
proposed = 1;
else if (proposed > 5)
proposed = 5;
if (proposed < retryLoopInterval)
retryLoopInterval = proposed;
}
int MQTTAsync_connect(MQTTAsync handle, const MQTTAsync_connectOptions* options)
{
MQTTAsyncs* m = handle;
int rc = MQTTASYNC_SUCCESS;
MQTTAsync_queuedCommand* conn;
FUNC_ENTRY;
if (options == NULL)
{
rc = MQTTASYNC_NULL_PARAMETER;
goto exit;
}
if (strncmp(options->struct_id, "MQTC", 4) != 0 || options->struct_version < 0 || options->struct_version > 5)
{
rc = MQTTASYNC_BAD_STRUCTURE;
goto exit;
}
if (options->will) /* check validity of will options structure */
{
if (strncmp(options->will->struct_id, "MQTW", 4) != 0 || (options->will->struct_version != 0 && options->will->struct_version != 1))
{
rc = MQTTASYNC_BAD_STRUCTURE;
goto exit;
}
if (options->will->qos < 0 || options->will->qos > 2)
{
rc = MQTTASYNC_BAD_QOS;
goto exit;
}
}
if (options->struct_version != 0 && options->ssl) /* check validity of SSL options structure */
{
if (strncmp(options->ssl->struct_id, "MQTS", 4) != 0 || options->ssl->struct_version < 0 || options->ssl->struct_version > 1)
{
rc = MQTTASYNC_BAD_STRUCTURE;
goto exit;
}
}
if ((options->username && !UTF8_validateString(options->username)) ||
(options->password && !UTF8_validateString(options->password)))
{
rc = MQTTASYNC_BAD_UTF8_STRING;
goto exit;
}
m->connect.onSuccess = options->onSuccess;
m->connect.onFailure = options->onFailure;
m->connect.context = options->context;
m->connectTimeout = options->connectTimeout;
tostop = 0;
if (sendThread_state != STARTING && sendThread_state != RUNNING)
{
MQTTAsync_lock_mutex(mqttasync_mutex);
sendThread_state = STARTING;
Thread_start(MQTTAsync_sendThread, NULL);
MQTTAsync_unlock_mutex(mqttasync_mutex);
}
if (receiveThread_state != STARTING && receiveThread_state != RUNNING)
{
MQTTAsync_lock_mutex(mqttasync_mutex);
receiveThread_state = STARTING;
Thread_start(MQTTAsync_receiveThread, handle);
MQTTAsync_unlock_mutex(mqttasync_mutex);
}
m->c->keepAliveInterval = options->keepAliveInterval;
setRetryLoopInterval(options->keepAliveInterval);
m->c->cleansession = options->cleansession;
m->c->maxInflightMessages = options->maxInflight;
if (options->struct_version >= 3)
m->c->MQTTVersion = options->MQTTVersion;
else
m->c->MQTTVersion = 0;
if (options->struct_version >= 4)
{
m->automaticReconnect = options->automaticReconnect;
m->minRetryInterval = options->minRetryInterval;
m->maxRetryInterval = options->maxRetryInterval;
}
if (m->c->will)
{
free(m->c->will->payload);
free(m->c->will->topic);
free(m->c->will);
m->c->will = NULL;
}
if (options->will && (options->will->struct_version == 0 || options->will->struct_version == 1))
{
const void* source = NULL;
m->c->will = malloc(sizeof(willMessages));
if (options->will->message || (options->will->struct_version == 1 && options->will->payload.data))
{
if (options->will->struct_version == 1 && options->will->payload.data)
{
m->c->will->payloadlen = options->will->payload.len;
source = options->will->payload.data;
}
else
{
m->c->will->payloadlen = strlen(options->will->message);
source = (void*)options->will->message;
}
m->c->will->payload = malloc(m->c->will->payloadlen);
memcpy(m->c->will->payload, source, m->c->will->payloadlen);
}
else
{
m->c->will->payload = NULL;
m->c->will->payloadlen = 0;
}
m->c->will->qos = options->will->qos;
m->c->will->retained = options->will->retained;
m->c->will->topic = MQTTStrdup(options->will->topicName);
}
#if defined(OPENSSL)
if (m->c->sslopts)
{
if (m->c->sslopts->trustStore)
free((void*)m->c->sslopts->trustStore);
if (m->c->sslopts->keyStore)
free((void*)m->c->sslopts->keyStore);
if (m->c->sslopts->privateKey)
free((void*)m->c->sslopts->privateKey);
if (m->c->sslopts->privateKeyPassword)
free((void*)m->c->sslopts->privateKeyPassword);
if (m->c->sslopts->enabledCipherSuites)
free((void*)m->c->sslopts->enabledCipherSuites);
free((void*)m->c->sslopts);
m->c->sslopts = NULL;
}
if (options->struct_version != 0 && options->ssl)
{
m->c->sslopts = malloc(sizeof(MQTTClient_SSLOptions));
memset(m->c->sslopts, '\0', sizeof(MQTTClient_SSLOptions));
m->c->sslopts->struct_version = options->ssl->struct_version;
if (options->ssl->trustStore)
m->c->sslopts->trustStore = MQTTStrdup(options->ssl->trustStore);
if (options->ssl->keyStore)
m->c->sslopts->keyStore = MQTTStrdup(options->ssl->keyStore);
if (options->ssl->privateKey)
m->c->sslopts->privateKey = MQTTStrdup(options->ssl->privateKey);
if (options->ssl->privateKeyPassword)
m->c->sslopts->privateKeyPassword = MQTTStrdup(options->ssl->privateKeyPassword);
if (options->ssl->enabledCipherSuites)
m->c->sslopts->enabledCipherSuites = MQTTStrdup(options->ssl->enabledCipherSuites);
m->c->sslopts->enableServerCertAuth = options->ssl->enableServerCertAuth;
if (m->c->sslopts->struct_version >= 1)
m->c->sslopts->sslVersion = options->ssl->sslVersion;
}
#else
if (options->struct_version != 0 && options->ssl)
{
rc = MQTTASYNC_SSL_NOT_SUPPORTED;
goto exit;
}
#endif
m->c->username = options->username;
m->c->password = options->password;
if (options->password)
m->c->passwordlen = strlen(options->password);
else if (options->struct_version >= 5 && options->binarypwd.data)
{
m->c->password = options->binarypwd.data;
m->c->passwordlen = options->binarypwd.len;
}
m->c->retryInterval = options->retryInterval;
m->shouldBeConnected = 1;
m->connectTimeout = options->connectTimeout;
MQTTAsync_freeServerURIs(m);
if (options->struct_version >= 2 && options->serverURIcount > 0)
{
int i;
m->serverURIcount = options->serverURIcount;
m->serverURIs = malloc(options->serverURIcount * sizeof(char*));
for (i = 0; i < options->serverURIcount; ++i)
m->serverURIs[i] = MQTTStrdup(options->serverURIs[i]);
}
/* Add connect request to operation queue */
conn = malloc(sizeof(MQTTAsync_queuedCommand));
memset(conn, '\0', sizeof(MQTTAsync_queuedCommand));
conn->client = m;
if (options)
{
conn->command.onSuccess = options->onSuccess;
conn->command.onFailure = options->onFailure;
conn->command.context = options->context;
}
conn->command.type = CONNECT;
conn->command.details.conn.currentURI = 0;
rc = MQTTAsync_addCommand(conn, sizeof(conn));
exit:
FUNC_EXIT_RC(rc);
return rc;
}
static int MQTTAsync_disconnect1(MQTTAsync handle, const MQTTAsync_disconnectOptions* options, int internal)
{
MQTTAsyncs* m = handle;
int rc = MQTTASYNC_SUCCESS;
MQTTAsync_queuedCommand* dis;
FUNC_ENTRY;
if (m == NULL || m->c == NULL)
{
rc = MQTTASYNC_FAILURE;
goto exit;
}
if (!internal)
m->shouldBeConnected = 0;
if (m->c->connected == 0)
{
rc = MQTTASYNC_DISCONNECTED;
goto exit;
}
/* Add disconnect request to operation queue */
dis = malloc(sizeof(MQTTAsync_queuedCommand));
memset(dis, '\0', sizeof(MQTTAsync_queuedCommand));
dis->client = m;
if (options)
{
dis->command.onSuccess = options->onSuccess;
dis->command.onFailure = options->onFailure;
dis->command.context = options->context;
dis->command.details.dis.timeout = options->timeout;
}
dis->command.type = DISCONNECT;
dis->command.details.dis.internal = internal;
rc = MQTTAsync_addCommand(dis, sizeof(dis));
exit:
FUNC_EXIT_RC(rc);
return rc;
}
static int MQTTAsync_disconnect_internal(MQTTAsync handle, int timeout)
{
MQTTAsync_disconnectOptions options = MQTTAsync_disconnectOptions_initializer;
options.timeout = timeout;
return MQTTAsync_disconnect1(handle, &options, 1);
}
void MQTTProtocol_closeSession(Clients* c, int sendwill)
{
MQTTAsync_disconnect_internal((MQTTAsync)c->context, 0);
}
int MQTTAsync_disconnect(MQTTAsync handle, const MQTTAsync_disconnectOptions* options)
{
return MQTTAsync_disconnect1(handle, options, 0);
}
int MQTTAsync_isConnected(MQTTAsync handle)
{
MQTTAsyncs* m = handle;
int rc = 0;
FUNC_ENTRY;
MQTTAsync_lock_mutex(mqttasync_mutex);
if (m && m->c)
rc = m->c->connected;
MQTTAsync_unlock_mutex(mqttasync_mutex);
FUNC_EXIT_RC(rc);
return rc;
}
static int cmdMessageIDCompare(void* a, void* b)
{
MQTTAsync_queuedCommand* cmd = (MQTTAsync_queuedCommand*)a;
return cmd->command.token == *(int*)b;
}
/**
* Assign a new message id for a client. Make sure it isn't already being used and does
* not exceed the maximum.
* @param m a client structure
* @return the next message id to use, or 0 if none available
*/
static int MQTTAsync_assignMsgId(MQTTAsyncs* m)
{
int start_msgid = m->c->msgID;
int msgid = start_msgid;
thread_id_type thread_id = 0;
int locked = 0;
/* need to check: commands list and response list for a client */
FUNC_ENTRY;
/* We might be called in a callback. In which case, this mutex will be already locked. */
thread_id = Thread_getid();
if (thread_id != sendThread_id && thread_id != receiveThread_id)
{
MQTTAsync_lock_mutex(mqttasync_mutex);
locked = 1;
}
msgid = (msgid == MAX_MSG_ID) ? 1 : msgid + 1;
while (ListFindItem(commands, &msgid, cmdMessageIDCompare) ||
ListFindItem(m->responses, &msgid, cmdMessageIDCompare))
{
msgid = (msgid == MAX_MSG_ID) ? 1 : msgid + 1;
if (msgid == start_msgid)
{ /* we've tried them all - none free */
msgid = 0;
break;
}
}
if (msgid != 0)
m->c->msgID = msgid;
if (locked)
MQTTAsync_unlock_mutex(mqttasync_mutex);
FUNC_EXIT_RC(msgid);
return msgid;
}
int MQTTAsync_subscribeMany(MQTTAsync handle, int count, char* const* topic, int* qos, MQTTAsync_responseOptions* response)
{
MQTTAsyncs* m = handle;
int i = 0;
int rc = MQTTASYNC_FAILURE;
MQTTAsync_queuedCommand* sub;
int msgid = 0;
FUNC_ENTRY;
if (m == NULL || m->c == NULL)
{
rc = MQTTASYNC_FAILURE;
goto exit;
}
if (m->c->connected == 0)
{
rc = MQTTASYNC_DISCONNECTED;
goto exit;
}
for (i = 0; i < count; i++)
{
if (!UTF8_validateString(topic[i]))
{
rc = MQTTASYNC_BAD_UTF8_STRING;
goto exit;
}
if (qos[i] < 0 || qos[i] > 2)
{
rc = MQTTASYNC_BAD_QOS;
goto exit;
}
}
if ((msgid = MQTTAsync_assignMsgId(m)) == 0)
{
rc = MQTTASYNC_NO_MORE_MSGIDS;
goto exit;
}
/* Add subscribe request to operation queue */
sub = malloc(sizeof(MQTTAsync_queuedCommand));
memset(sub, '\0', sizeof(MQTTAsync_queuedCommand));
sub->client = m;
sub->command.token = msgid;
if (response)
{
sub->command.onSuccess = response->onSuccess;
sub->command.onFailure = response->onFailure;
sub->command.context = response->context;
response->token = sub->command.token;
}
sub->command.type = SUBSCRIBE;
sub->command.details.sub.count = count;
sub->command.details.sub.topics = malloc(sizeof(char*) * count);
sub->command.details.sub.qoss = malloc(sizeof(int) * count);
for (i = 0; i < count; ++i)
{
sub->command.details.sub.topics[i] = MQTTStrdup(topic[i]);
sub->command.details.sub.qoss[i] = qos[i];
}
rc = MQTTAsync_addCommand(sub, sizeof(sub));
exit:
FUNC_EXIT_RC(rc);
return rc;
}
int MQTTAsync_subscribe(MQTTAsync handle, const char* topic, int qos, MQTTAsync_responseOptions* response)
{
int rc = 0;
char *const topics[] = {(char*)topic};
FUNC_ENTRY;
rc = MQTTAsync_subscribeMany(handle, 1, topics, &qos, response);
FUNC_EXIT_RC(rc);
return rc;
}
int MQTTAsync_unsubscribeMany(MQTTAsync handle, int count, char* const* topic, MQTTAsync_responseOptions* response)
{
MQTTAsyncs* m = handle;
int i = 0;
int rc = SOCKET_ERROR;
MQTTAsync_queuedCommand* unsub;
int msgid = 0;
FUNC_ENTRY;
if (m == NULL || m->c == NULL)
{
rc = MQTTASYNC_FAILURE;
goto exit;
}
if (m->c->connected == 0)
{
rc = MQTTASYNC_DISCONNECTED;
goto exit;
}
for (i = 0; i < count; i++)
{
if (!UTF8_validateString(topic[i]))
{
rc = MQTTASYNC_BAD_UTF8_STRING;
goto exit;
}
}
if ((msgid = MQTTAsync_assignMsgId(m)) == 0)
{
rc = MQTTASYNC_NO_MORE_MSGIDS;
goto exit;
}
/* Add unsubscribe request to operation queue */
unsub = malloc(sizeof(MQTTAsync_queuedCommand));
memset(unsub, '\0', sizeof(MQTTAsync_queuedCommand));
unsub->client = m;
unsub->command.type = UNSUBSCRIBE;
unsub->command.token = msgid;
if (response)
{
unsub->command.onSuccess = response->onSuccess;
unsub->command.onFailure = response->onFailure;
unsub->command.context = response->context;
response->token = unsub->command.token;
}
unsub->command.details.unsub.count = count;
unsub->command.details.unsub.topics = malloc(sizeof(char*) * count);
for (i = 0; i < count; ++i)
unsub->command.details.unsub.topics[i] = MQTTStrdup(topic[i]);
rc = MQTTAsync_addCommand(unsub, sizeof(unsub));
exit:
FUNC_EXIT_RC(rc);
return rc;
}
int MQTTAsync_unsubscribe(MQTTAsync handle, const char* topic, MQTTAsync_responseOptions* response)
{
int rc = 0;
char *const topics[] = {(char*)topic};
FUNC_ENTRY;
rc = MQTTAsync_unsubscribeMany(handle, 1, topics, response);
FUNC_EXIT_RC(rc);
return rc;
}
static int MQTTAsync_countBufferedMessages(MQTTAsyncs* m)
{
ListElement* current = NULL;
int count = 0;
while (ListNextElement(commands, ¤t))
{
MQTTAsync_queuedCommand* cmd = (MQTTAsync_queuedCommand*)(current->content);
if (cmd->client == m && cmd->command.type == PUBLISH)
count++;
}
return count;
}
int MQTTAsync_send(MQTTAsync handle, const char* destinationName, int payloadlen, void* payload,
int qos, int retained, MQTTAsync_responseOptions* response)
{
int rc = MQTTASYNC_SUCCESS;
MQTTAsyncs* m = handle;
MQTTAsync_queuedCommand* pub;
int msgid = 0;
FUNC_ENTRY;
if (m == NULL || m->c == NULL)
rc = MQTTASYNC_FAILURE;
else if (m->c->connected == 0 && (m->createOptions == NULL ||
m->createOptions->sendWhileDisconnected == 0 || m->shouldBeConnected == 0))
rc = MQTTASYNC_DISCONNECTED;
else if (!UTF8_validateString(destinationName))
rc = MQTTASYNC_BAD_UTF8_STRING;
else if (qos < 0 || qos > 2)
rc = MQTTASYNC_BAD_QOS;
else if (qos > 0 && (msgid = MQTTAsync_assignMsgId(m)) == 0)
rc = MQTTASYNC_NO_MORE_MSGIDS;
else if (m->createOptions && (MQTTAsync_countBufferedMessages(m) >= m->createOptions->maxBufferedMessages))
rc = MQTTASYNC_MAX_BUFFERED_MESSAGES;
if (rc != MQTTASYNC_SUCCESS)
goto exit;
/* Add publish request to operation queue */
pub = malloc(sizeof(MQTTAsync_queuedCommand));
memset(pub, '\0', sizeof(MQTTAsync_queuedCommand));
pub->client = m;
pub->command.type = PUBLISH;
pub->command.token = msgid;
if (response)
{
pub->command.onSuccess = response->onSuccess;
pub->command.onFailure = response->onFailure;
pub->command.context = response->context;
response->token = pub->command.token;
}
pub->command.details.pub.destinationName = MQTTStrdup(destinationName);
pub->command.details.pub.payloadlen = payloadlen;
pub->command.details.pub.payload = malloc(payloadlen);
memcpy(pub->command.details.pub.payload, payload, payloadlen);
pub->command.details.pub.qos = qos;
pub->command.details.pub.retained = retained;
rc = MQTTAsync_addCommand(pub, sizeof(pub));
exit:
FUNC_EXIT_RC(rc);
return rc;
}
int MQTTAsync_sendMessage(MQTTAsync handle, const char* destinationName, const MQTTAsync_message* message,
MQTTAsync_responseOptions* response)
{
int rc = MQTTASYNC_SUCCESS;
FUNC_ENTRY;
if (message == NULL)
{
rc = MQTTASYNC_NULL_PARAMETER;
goto exit;
}
if (strncmp(message->struct_id, "MQTM", 4) != 0 || message->struct_version != 0)
{
rc = MQTTASYNC_BAD_STRUCTURE;
goto exit;
}
rc = MQTTAsync_send(handle, destinationName, message->payloadlen, message->payload,
message->qos, message->retained, response);
exit:
FUNC_EXIT_RC(rc);
return rc;
}
static void MQTTAsync_retry(void)
{
static time_t last = 0L;
time_t now;
FUNC_ENTRY;
time(&(now));
if (difftime(now, last) > retryLoopInterval)
{
time(&(last));
MQTTProtocol_keepalive(now);
MQTTProtocol_retry(now, 1, 0);
}
else
MQTTProtocol_retry(now, 0, 0);
FUNC_EXIT;
}
static int MQTTAsync_connecting(MQTTAsyncs* m)
{
int rc = -1;
FUNC_ENTRY;
if (m->c->connect_state == 1) /* TCP connect started - check for completion */
{
int error;
socklen_t len = sizeof(error);
if ((rc = getsockopt(m->c->net.socket, SOL_SOCKET, SO_ERROR, (char*)&error, &len)) == 0)
rc = error;
if (rc != 0)
goto exit;
Socket_clearPendingWrite(m->c->net.socket);
#if defined(OPENSSL)
if (m->ssl)
{
int port;
char* hostname;
int setSocketForSSLrc = 0;
hostname = MQTTProtocol_addressPort(m->serverURI, &port);
setSocketForSSLrc = SSLSocket_setSocketForSSL(&m->c->net, m->c->sslopts, hostname);
if (hostname != m->serverURI)
free(hostname);
if (setSocketForSSLrc != MQTTASYNC_SUCCESS)
{
if (m->c->session != NULL)
if ((rc = SSL_set_session(m->c->net.ssl, m->c->session)) != 1)
Log(TRACE_MIN, -1, "Failed to set SSL session with stored data, non critical");
rc = SSLSocket_connect(m->c->net.ssl, m->c->net.socket);
if (rc == TCPSOCKET_INTERRUPTED)
{
rc = MQTTCLIENT_SUCCESS; /* the connect is still in progress */
m->c->connect_state = 2;
}
else if (rc == SSL_FATAL)
{
rc = SOCKET_ERROR;
goto exit;
}
else if (rc == 1)
{
rc = MQTTCLIENT_SUCCESS;
m->c->connect_state = 3;
if (MQTTPacket_send_connect(m->c, m->connect.details.conn.MQTTVersion) == SOCKET_ERROR)
{
rc = SOCKET_ERROR;
goto exit;
}
if (!m->c->cleansession && m->c->session == NULL)
m->c->session = SSL_get1_session(m->c->net.ssl);
}
}
else
{
rc = SOCKET_ERROR;
goto exit;
}
}
else
{
#endif
m->c->connect_state = 3; /* TCP/SSL connect completed, in which case send the MQTT connect packet */
if ((rc = MQTTPacket_send_connect(m->c, m->connect.details.conn.MQTTVersion)) == SOCKET_ERROR)
goto exit;
#if defined(OPENSSL)
}
#endif
}
#if defined(OPENSSL)
else if (m->c->connect_state == 2) /* SSL connect sent - wait for completion */
{
if ((rc = SSLSocket_connect(m->c->net.ssl, m->c->net.socket)) != 1)
goto exit;
if(!m->c->cleansession && m->c->session == NULL)
m->c->session = SSL_get1_session(m->c->net.ssl);
m->c->connect_state = 3; /* SSL connect completed, in which case send the MQTT connect packet */
if ((rc = MQTTPacket_send_connect(m->c, m->connect.details.conn.MQTTVersion)) == SOCKET_ERROR)
goto exit;
}
#endif
exit:
if ((rc != 0 && rc != TCPSOCKET_INTERRUPTED && m->c->connect_state != 2) || (rc == SSL_FATAL))
nextOrClose(m, MQTTASYNC_FAILURE, "TCP/TLS connect failure");
FUNC_EXIT_RC(rc);
return rc;
}
static MQTTPacket* MQTTAsync_cycle(int* sock, unsigned long timeout, int* rc)
{
struct timeval tp = {0L, 0L};
static Ack ack;
MQTTPacket* pack = NULL;
FUNC_ENTRY;
if (timeout > 0L)
{
tp.tv_sec = timeout / 1000;
tp.tv_usec = (timeout % 1000) * 1000; /* this field is microseconds! */
}
#if defined(OPENSSL)
if ((*sock = SSLSocket_getPendingRead()) == -1)
{
#endif
Thread_lock_mutex(socket_mutex);
/* 0 from getReadySocket indicates no work to do, -1 == error, but can happen normally */
*sock = Socket_getReadySocket(0, &tp);
Thread_unlock_mutex(socket_mutex);
if (!tostop && *sock == 0 && (tp.tv_sec > 0L || tp.tv_usec > 0L))
MQTTAsync_sleep(100L);
#if defined(OPENSSL)
}
#endif
MQTTAsync_lock_mutex(mqttasync_mutex);
if (*sock > 0)
{
MQTTAsyncs* m = NULL;
if (ListFindItem(handles, sock, clientSockCompare) != NULL)
m = (MQTTAsync)(handles->current->content);
if (m != NULL)
{
Log(TRACE_MINIMUM, -1, "m->c->connect_state = %d",m->c->connect_state);
if (m->c->connect_state == 1 || m->c->connect_state == 2)
*rc = MQTTAsync_connecting(m);
else
pack = MQTTPacket_Factory(&m->c->net, rc);
if (m->c->connect_state == 3 && *rc == SOCKET_ERROR)
{
Log(TRACE_MINIMUM, -1, "CONNECT sent but MQTTPacket_Factory has returned SOCKET_ERROR");
nextOrClose(m, MQTTASYNC_FAILURE, "TCP connect completion failure");
}
else
{
Log(TRACE_MINIMUM, -1, "m->c->connect_state = %d",m->c->connect_state);
Log(TRACE_MINIMUM, -1, "CONNECT sent, *rc is %d",*rc);
}
}
if (pack)
{
int freed = 1;
/* Note that these handle... functions free the packet structure that they are dealing with */
if (pack->header.bits.type == PUBLISH)
*rc = MQTTProtocol_handlePublishes(pack, *sock);
else if (pack->header.bits.type == PUBACK || pack->header.bits.type == PUBCOMP)
{
int msgid;
ack = (pack->header.bits.type == PUBCOMP) ? *(Pubcomp*)pack : *(Puback*)pack;
msgid = ack.msgId;
*rc = (pack->header.bits.type == PUBCOMP) ?
MQTTProtocol_handlePubcomps(pack, *sock) : MQTTProtocol_handlePubacks(pack, *sock);
if (!m)
Log(LOG_ERROR, -1, "PUBCOMP or PUBACK received for no client, msgid %d", msgid);
if (m)
{
ListElement* current = NULL;
if (m->dc)
{
Log(TRACE_MIN, -1, "Calling deliveryComplete for client %s, msgid %d", m->c->clientID, msgid);
(*(m->dc))(m->context, msgid);
}
/* use the msgid to find the callback to be called */
while (ListNextElement(m->responses, ¤t))
{
MQTTAsync_queuedCommand* command = (MQTTAsync_queuedCommand*)(current->content);
if (command->command.token == msgid)
{
if (!ListDetach(m->responses, command)) /* then remove the response from the list */
Log(LOG_ERROR, -1, "Publish command not removed from command list");
if (command->command.onSuccess)
{
MQTTAsync_successData data;
data.token = command->command.token;
data.alt.pub.destinationName = command->command.details.pub.destinationName;
data.alt.pub.message.payload = command->command.details.pub.payload;
data.alt.pub.message.payloadlen = command->command.details.pub.payloadlen;
data.alt.pub.message.qos = command->command.details.pub.qos;
data.alt.pub.message.retained = command->command.details.pub.retained;
Log(TRACE_MIN, -1, "Calling publish success for client %s", m->c->clientID);
(*(command->command.onSuccess))(command->command.context, &data);
}
MQTTAsync_freeCommand(command);
break;
}
}
}
}
else if (pack->header.bits.type == PUBREC)
*rc = MQTTProtocol_handlePubrecs(pack, *sock);
else if (pack->header.bits.type == PUBREL)
*rc = MQTTProtocol_handlePubrels(pack, *sock);
else if (pack->header.bits.type == PINGRESP)
*rc = MQTTProtocol_handlePingresps(pack, *sock);
else
freed = 0;
if (freed)
pack = NULL;
}
}
MQTTAsync_retry();
MQTTAsync_unlock_mutex(mqttasync_mutex);
FUNC_EXIT_RC(*rc);
return pack;
}
/*
static int pubCompare(void* a, void* b)
{
Messages* msg = (Messages*)a;
return msg->publish == (Publications*)b;
}*/
int MQTTAsync_getPendingTokens(MQTTAsync handle, MQTTAsync_token **tokens)
{
int rc = MQTTASYNC_SUCCESS;
MQTTAsyncs* m = handle;
ListElement* current = NULL;
int count = 0;
FUNC_ENTRY;
MQTTAsync_lock_mutex(mqttasync_mutex);
*tokens = NULL;
if (m == NULL)
{
rc = MQTTASYNC_FAILURE;
goto exit;
}
/* calculate the number of pending tokens - commands plus inflight */
while (ListNextElement(commands, ¤t))
{
MQTTAsync_queuedCommand* cmd = (MQTTAsync_queuedCommand*)(current->content);
if (cmd->client == m)
count++;
}
if (m->c)
count += m->c->outboundMsgs->count;
if (count == 0)
goto exit; /* no tokens to return */
*tokens = malloc(sizeof(MQTTAsync_token) * (count + 1)); /* add space for sentinel at end of list */
/* First add the unprocessed commands to the pending tokens */
current = NULL;
count = 0;
while (ListNextElement(commands, ¤t))
{
MQTTAsync_queuedCommand* cmd = (MQTTAsync_queuedCommand*)(current->content);
if (cmd->client == m)
(*tokens)[count++] = cmd->command.token;
}
/* Now add the inflight messages */
if (m->c && m->c->outboundMsgs->count > 0)
{
current = NULL;
while (ListNextElement(m->c->outboundMsgs, ¤t))
{
Messages* m = (Messages*)(current->content);
(*tokens)[count++] = m->msgid;
}
}
(*tokens)[count] = -1; /* indicate end of list */
exit:
MQTTAsync_unlock_mutex(mqttasync_mutex);
FUNC_EXIT_RC(rc);
return rc;
}
int MQTTAsync_isComplete(MQTTAsync handle, MQTTAsync_token dt)
{
int rc = MQTTASYNC_SUCCESS;
MQTTAsyncs* m = handle;
ListElement* current = NULL;
FUNC_ENTRY;
MQTTAsync_lock_mutex(mqttasync_mutex);
if (m == NULL)
{
rc = MQTTASYNC_FAILURE;
goto exit;
}
/* First check unprocessed commands */
current = NULL;
while (ListNextElement(commands, ¤t))
{
MQTTAsync_queuedCommand* cmd = (MQTTAsync_queuedCommand*)(current->content);
if (cmd->client == m && cmd->command.token == dt)
goto exit;
}
/* Now check the inflight messages */
if (m->c && m->c->outboundMsgs->count > 0)
{
current = NULL;
while (ListNextElement(m->c->outboundMsgs, ¤t))
{
Messages* m = (Messages*)(current->content);
if (m->msgid == dt)
goto exit;
}
}
rc = MQTTASYNC_TRUE; /* Can't find it, so it must be complete */
exit:
MQTTAsync_unlock_mutex(mqttasync_mutex);
FUNC_EXIT_RC(rc);
return rc;
}
int MQTTAsync_waitForCompletion(MQTTAsync handle, MQTTAsync_token dt, unsigned long timeout)
{
int rc = MQTTASYNC_FAILURE;
START_TIME_TYPE start = MQTTAsync_start_clock();
unsigned long elapsed = 0L;
MQTTAsyncs* m = handle;
FUNC_ENTRY;
MQTTAsync_lock_mutex(mqttasync_mutex);
if (m == NULL || m->c == NULL)
{
rc = MQTTASYNC_FAILURE;
goto exit;
}
if (m->c->connected == 0)
{
rc = MQTTASYNC_DISCONNECTED;
goto exit;
}
MQTTAsync_unlock_mutex(mqttasync_mutex);
if (MQTTAsync_isComplete(handle, dt) == 1)
{
rc = MQTTASYNC_SUCCESS; /* well we couldn't find it */
goto exit;
}
elapsed = MQTTAsync_elapsed(start);
while (elapsed < timeout)
{
MQTTAsync_sleep(100);
if (MQTTAsync_isComplete(handle, dt) == 1)
{
rc = MQTTASYNC_SUCCESS; /* well we couldn't find it */
goto exit;
}
elapsed = MQTTAsync_elapsed(start);
}
exit:
FUNC_EXIT_RC(rc);
return rc;
}
void MQTTAsync_setTraceLevel(enum MQTTASYNC_TRACE_LEVELS level)
{
Log_setTraceLevel((enum LOG_LEVELS)level);
}
void MQTTAsync_setTraceCallback(MQTTAsync_traceCallback* callback)
{
Log_setTraceCallback((Log_traceCallback*)callback);
}
MQTTAsync_nameValue* MQTTAsync_getVersionInfo(void)
{
#define MAX_INFO_STRINGS 8
static MQTTAsync_nameValue libinfo[MAX_INFO_STRINGS + 1];
int i = 0;
libinfo[i].name = "Product name";
libinfo[i++].value = "Paho Asynchronous MQTT C Client Library";
libinfo[i].name = "Version";
libinfo[i++].value = CLIENT_VERSION;
libinfo[i].name = "Build level";
libinfo[i++].value = BUILD_TIMESTAMP;
#if defined(OPENSSL)
libinfo[i].name = "OpenSSL version";
libinfo[i++].value = SSLeay_version(SSLEAY_VERSION);
libinfo[i].name = "OpenSSL flags";
libinfo[i++].value = SSLeay_version(SSLEAY_CFLAGS);
libinfo[i].name = "OpenSSL build timestamp";
libinfo[i++].value = SSLeay_version(SSLEAY_BUILT_ON);
libinfo[i].name = "OpenSSL platform";
libinfo[i++].value = SSLeay_version(SSLEAY_PLATFORM);
libinfo[i].name = "OpenSSL directory";
libinfo[i++].value = SSLeay_version(SSLEAY_DIR);
#endif
libinfo[i].name = NULL;
libinfo[i].value = NULL;
return libinfo;
}
| {
"pile_set_name": "Github"
} |
# Lo-Dash v2.4.1
A utility library delivering consistency, [customization](http://lodash.com/custom-builds), [performance](http://lodash.com/benchmarks), & [extras](http://lodash.com/#features).
## Download
Check out our [wiki]([https://github.com/lodash/lodash/wiki/build-differences]) for details over the differences between builds.
* Modern builds perfect for newer browsers/environments:<br>
[Development](https://raw.github.com/lodash/lodash/2.4.1/dist/lodash.js) &
[Production](https://raw.github.com/lodash/lodash/2.4.1/dist/lodash.min.js)
* Compatibility builds for older environment support too:<br>
[Development](https://raw.github.com/lodash/lodash/2.4.1/dist/lodash.compat.js) &
[Production](https://raw.github.com/lodash/lodash/2.4.1/dist/lodash.compat.min.js)
* Underscore builds to use as a drop-in replacement:<br>
[Development](https://raw.github.com/lodash/lodash/2.4.1/dist/lodash.underscore.js) &
[Production](https://raw.github.com/lodash/lodash/2.4.1/dist/lodash.underscore.min.js)
CDN copies are available on [cdnjs](http://cdnjs.com/libraries/lodash.js/) & [jsDelivr](http://www.jsdelivr.com/#!lodash). For smaller file sizes, create [custom builds](http://lodash.com/custom-builds) with only the features needed.
Love modules? We’ve got you covered with [lodash-amd](https://npmjs.org/package/lodash-amd), [lodash-es6](https://github.com/lodash/lodash-es6), [lodash-node](https://npmjs.org/package/lodash-node), & [npm packages](https://npmjs.org/browse/keyword/lodash-modularized) per method.
## Dive in
There’s plenty of **[documentation](http://lodash.com/docs)**, [unit tests](http://lodash.com/tests), & [benchmarks](http://lodash.com/benchmarks).<br>
Check out <a href="http://devdocs.io/lodash/">DevDocs</a> as a fast, organized, & searchable interface for our documentation.
The full changelog for this release is available on our [wiki](https://github.com/lodash/lodash/wiki/Changelog).<br>
A list of upcoming features is available on our [roadmap](https://github.com/lodash/lodash/wiki/Roadmap).
## Features *not* in Underscore
* AMD loader support ([curl](https://github.com/cujojs/curl), [dojo](http://dojotoolkit.org/), [requirejs](http://requirejs.org/), etc.)
* [_(…)](http://lodash.com/docs#_) supports intuitive chaining
* [_.at](http://lodash.com/docs#at) for cherry-picking collection values
* [_.bindKey](http://lodash.com/docs#bindKey) for binding [*“lazy”*](http://michaux.ca/articles/lazy-function-definition-pattern) defined methods
* [_.clone](http://lodash.com/docs#clone) supports shallow cloning of `Date` & `RegExp` objects
* [_.cloneDeep](http://lodash.com/docs#cloneDeep) for deep cloning arrays & objects
* [_.constant](http://lodash.com/docs#constant) & [_.property](http://lodash.com/docs#property) function generators for composing functions
* [_.contains](http://lodash.com/docs#contains) accepts a `fromIndex`
* [_.create](http://lodash.com/docs#create) for easier object inheritance
* [_.createCallback](http://lodash.com/docs#createCallback) for extending callbacks in methods & mixins
* [_.curry](http://lodash.com/docs#curry) for creating [curried](http://hughfdjackson.com/javascript/2013/07/06/why-curry-helps/) functions
* [_.debounce](http://lodash.com/docs#debounce) & [_.throttle](http://lodash.com/docs#throttle) accept additional `options` for more control
* [_.findIndex](http://lodash.com/docs#findIndex) & [_.findKey](http://lodash.com/docs#findKey) for finding indexes & keys
* [_.forEach](http://lodash.com/docs#forEach) is chainable & supports exiting early
* [_.forIn](http://lodash.com/docs#forIn) for iterating own & inherited properties
* [_.forOwn](http://lodash.com/docs#forOwn) for iterating own properties
* [_.isPlainObject](http://lodash.com/docs#isPlainObject) for checking if values are created by `Object`
* [_.mapValues](http://lodash.com/docs#mapValues) for [mapping](http://lodash.com/docs#map) values to an object
* [_.memoize](http://lodash.com/docs#memoize) exposes the `cache` of memoized functions
* [_.merge](http://lodash.com/docs#merge) for a deep [_.extend](http://lodash.com/docs#extend)
* [_.noop](http://lodash.com/docs#noop) for function placeholders
* [_.now](http://lodash.com/docs#now) as a cross-browser `Date.now` alternative
* [_.parseInt](http://lodash.com/docs#parseInt) for consistent behavior
* [_.pull](http://lodash.com/docs#pull) & [_.remove](http://lodash.com/docs#remove) for mutating arrays
* [_.random](http://lodash.com/docs#random) supports returning floating-point numbers
* [_.runInContext](http://lodash.com/docs#runInContext) for easier mocking
* [_.sortBy](http://lodash.com/docs#sortBy) supports sorting by multiple properties
* [_.support](http://lodash.com/docs#support) for flagging environment features
* [_.template](http://lodash.com/docs#template) supports [*“imports”*](http://lodash.com/docs#templateSettings_imports) options & [ES6 template delimiters](http://people.mozilla.org/~jorendorff/es6-draft.html#sec-literals-string-literals)
* [_.transform](http://lodash.com/docs#transform) as a powerful alternative to [_.reduce](http://lodash.com/docs#reduce) for transforming objects
* [_.where](http://lodash.com/docs#where) supports deep object comparisons
* [_.xor](http://lodash.com/docs#xor) as a companion to [_.difference](http://lodash.com/docs#difference), [_.intersection](http://lodash.com/docs#intersection), & [_.union](http://lodash.com/docs#union)
* [_.zip](http://lodash.com/docs#zip) is capable of unzipping values
* [_.omit](http://lodash.com/docs#omit), [_.pick](http://lodash.com/docs#pick), &
[more](http://lodash.com/docs "_.assign, _.clone, _.cloneDeep, _.first, _.initial, _.isEqual, _.last, _.merge, _.rest") accept callbacks
* [_.contains](http://lodash.com/docs#contains), [_.toArray](http://lodash.com/docs#toArray), &
[more](http://lodash.com/docs "_.at, _.countBy, _.every, _.filter, _.find, _.forEach, _.forEachRight, _.groupBy, _.invoke, _.map, _.max, _.min, _.pluck, _.reduce, _.reduceRight, _.reject, _.shuffle, _.size, _.some, _.sortBy, _.where") accept strings
* [_.filter](http://lodash.com/docs#filter), [_.map](http://lodash.com/docs#map), &
[more](http://lodash.com/docs "_.countBy, _.every, _.find, _.findKey, _.findLast, _.findLastIndex, _.findLastKey, _.first, _.groupBy, _.initial, _.last, _.max, _.min, _.reject, _.rest, _.some, _.sortBy, _.sortedIndex, _.uniq") support *“_.pluck”* & *“_.where”* shorthands
* [_.findLast](http://lodash.com/docs#findLast), [_.findLastIndex](http://lodash.com/docs#findLastIndex), &
[more](http://lodash.com/docs "_.findLastKey, _.forEachRight, _.forInRight, _.forOwnRight, _.partialRight") right-associative methods
## Resources
* Podcasts
- [JavaScript Jabber](http://javascriptjabber.com/079-jsj-lo-dash-with-john-david-dalton/)
* Posts
- [Say “Hello” to Lo-Dash](http://kitcambridge.be/blog/say-hello-to-lo-dash/)
- [Custom builds in Lo-Dash 2.0](http://kitcambridge.be/blog/custom-builds-in-lo-dash-2-dot-0/)
* Videos
- [Introduction](https://vimeo.com/44154599)
- [Origins](https://vimeo.com/44154600)
- [Optimizations & builds](https://vimeo.com/44154601)
- [Native method use](https://vimeo.com/48576012)
- [Testing](https://vimeo.com/45865290)
- [CascadiaJS ’12](http://www.youtube.com/watch?v=dpPy4f_SeEk)
A list of other community created podcasts, posts, & videos is available on our [wiki](https://github.com/lodash/lodash/wiki/Resources).
## Support
Tested in Chrome 5~31, Firefox 2~25, IE 6-11, Opera 9.25~17, Safari 3-7, Node.js 0.6.21~0.10.22, Narwhal 0.3.2, PhantomJS 1.9.2, RingoJS 0.9, & Rhino 1.7RC5.<br>
Automated browser test results [are available](https://saucelabs.com/u/lodash) as well as [Travis CI](https://travis-ci.org/) builds for [lodash](https://travis-ci.org/lodash/lodash/), [lodash-cli](https://travis-ci.org/lodash/lodash-cli/), [lodash-amd](https://travis-ci.org/lodash/lodash-amd/), [lodash-node](https://travis-ci.org/lodash/lodash-node/), & [grunt-lodash](https://travis-ci.org/lodash/grunt-lodash).
Special thanks to [Sauce Labs](https://saucelabs.com/) for providing automated browser testing.<br>
[](https://saucelabs.com/ "Sauce Labs: Selenium Testing & More")
## Installation & usage
In browsers:
```html
<script src="lodash.js"></script>
```
Using [`npm`](http://npmjs.org/):
```bash
npm i --save lodash
{sudo} npm i -g lodash
npm ln lodash
```
In [Node.js](http://nodejs.org/) & [Ringo](http://ringojs.org/):
```js
var _ = require('lodash');
// or as Underscore
var _ = require('lodash/dist/lodash.underscore');
```
**Notes:**
* Don’t assign values to [special variable](http://nodejs.org/api/repl.html#repl_repl_features) `_` when in the REPL
* If Lo-Dash is installed globally, run [`npm ln lodash`](http://blog.nodejs.org/2011/03/23/npm-1-0-global-vs-local-installation/) in your project’s root directory *before* requiring it
In [Rhino](http://www.mozilla.org/rhino/):
```js
load('lodash.js');
```
In an AMD loader:
```js
require({
'packages': [
{ 'name': 'lodash', 'location': 'path/to/lodash', 'main': 'lodash' }
]
},
['lodash'], function(_) {
console.log(_.VERSION);
});
```
## Author
| [](https://twitter.com/jdalton "Follow @jdalton on Twitter") |
|---|
| [John-David Dalton](http://allyoucanleet.com/) |
## Contributors
| [](https://twitter.com/blainebublitz "Follow @BlaineBublitz on Twitter") | [](https://twitter.com/kitcambridge "Follow @kitcambridge on Twitter") | [](https://twitter.com/mathias "Follow @mathias on Twitter") |
|---|---|---|
| [Blaine Bublitz](http://www.iceddev.com/) | [Kit Cambridge](http://kitcambridge.be/) | [Mathias Bynens](http://mathiasbynens.be/) |
[](https://bitdeli.com/free "Bitdeli Badge")
| {
"pile_set_name": "Github"
} |
package org.unimodules.adapters.react.views;
import android.util.Log;
import android.view.View;
import com.facebook.react.bridge.Dynamic;
import com.facebook.react.bridge.ReadableMap;
import com.facebook.react.bridge.ReadableMapKeySetIterator;
import com.facebook.react.common.MapBuilder;
import java.util.HashMap;
import java.util.Map;
import org.unimodules.adapters.react.ArgumentsHelper;
import org.unimodules.core.ModuleRegistry;
import org.unimodules.core.ViewManager;
import org.unimodules.core.interfaces.ModuleRegistryConsumer;
public class ViewManagerAdapterUtils {
/* package */ static String getViewManagerAdapterName(ViewManager viewManager) {
return "ViewManagerAdapter_" + viewManager.getName();
}
/* package */ static Map<String, Object> getConstants(ViewManager viewManager) {
Map<String, Object> constants = new HashMap<>();
constants.put("eventNames", viewManager.getExportedEventNames());
return constants;
}
/* package */ static Map<String, Object> getExportedCustomDirectEventTypeConstants(ViewManager viewManager) {
MapBuilder.Builder<String, Object> builder = MapBuilder.builder();
// Somehow Java compiler thinks getExportedEventNames() returns list of Objects.
// ¯\_(ツ)_/¯
for (Object eventName : viewManager.getExportedEventNames()) {
if (eventName instanceof String) {
builder.put((String) eventName, MapBuilder.of("registrationName", eventName));
}
}
return builder.build();
}
/* package */ static <V extends View> void setProxiedProperties(String viewManagerAdapterName, ViewManager<V> viewManager, V view, ReadableMap proxiedProperties) {
ReadableMapKeySetIterator keyIterator = proxiedProperties.keySetIterator();
while (keyIterator.hasNextKey()) {
String key = keyIterator.nextKey();
try {
ViewManager.PropSetterInfo propSetterInfo = viewManager.getPropSetterInfos().get(key);
if (propSetterInfo == null) {
throw new IllegalArgumentException("No setter found for prop " + key + " in " + viewManagerAdapterName);
}
Dynamic dynamicPropertyValue = proxiedProperties.getDynamic(key);
Object castPropertyValue = ArgumentsHelper.getNativeArgumentForExpectedClass(dynamicPropertyValue, propSetterInfo.getExpectedValueClass());
viewManager.updateProp(view, key, castPropertyValue);
} catch (Exception e) {
Log.e(viewManagerAdapterName, "Error when setting prop " + key + ". " + e.getMessage());
}
}
}
/* package */ static void setModuleRegistryOnViewManager(ViewManager viewManager, ModuleRegistry moduleRegistry) {
if (viewManager instanceof ModuleRegistryConsumer) {
((ModuleRegistryConsumer) viewManager).setModuleRegistry(moduleRegistry);
}
}
}
| {
"pile_set_name": "Github"
} |
i didn't hate the big hit , even though it is a stupefyingly terrible film .
for the entirety of its running time , my eyes were attached to the screen , and i never once got bored .
i found the film interesting because of its unique awfulness : this is such a confused disaster of a film that it's entertaining to watch it in the same way that it's entertaining to witness a thirty car pileup on a freeway spaghetti bowl .
as a narrative , the big hit is pure garbage , never truly deciding its genre and constantly crossing all kinds of boundaries .
it reminded me a lot of grosse pointe blank , which is a similar film that fails in a lot of the same ways .
it's fine to mix genres , if the film makers know what they're doing .
unfortunately , writer ben ramsey and director kirk wong don't seem to know how to handle the material , and the result is an action film that wants to be a comedy .
the biggest problem with the amalgamation , in this case , is that the film is absurd and the comedy is out of place .
but it sure is a fascinating failure .
marky .
.
.
er , mark wahlberg stars as melvin surley , a hitman .
he's a good hitman , apparently , although his tactics seem a bit rambunctious ( he doesn't snipe or make clean kills--he just kicks the door down and shoots everything ) .
he works with a few other hitmen--cisco ( lou diamond phillips ) , crunch ( bokeem woodbine ) , and vinnie ( antonio sabbato , jr . ) .
they're a nice bunch of muscular guys , who stand around in the locker room after working out and compare masturbation to sex .
they all work for a man named paris ( avery brooks ) , who is rich , powerful , and in constant need of four sloppy hitmen .
it is very important that they never go beyond their boss and do work on their own ; this , of course , is where the plot comes in .
they decide to kidnap a young japanese girl named keiko ( china chow ) , who has a rich father .
when they do this , it turns out she is paris' .
.
.
goddaughter !
it's very bad to have paris against you .
the story is standard action film stuff .
it's nothing new , and nothing particularly offensive ( but certainly not the slightest bit compelling ) .
clearly the major selling point of the big hit is that it's a john woo-type of action film mixed in with some really hip comedy .
it's true that a lot of the action sequences resemble recent films that go for the same idea ( such as face/off and the replacement killers , which are both far superior ) .
there are stunts that are fun to watch : the opening sequence has melvin and two of his partners going in to kill some guy who has nothing to do with the story .
they use night vision goggles and really powerful handguns .
melvin is also very good at breakdancing , and uses this talent to avoid bullets and knives .
like i said , it's not boring ; most disasters aren't .
what makes it so bad is its genre-shifting madness .
it seems to start out as a quirky-but-realistic action comedy , as melvin is seen transporting bags of human remains .
then , as soon as they go to their first hit , it turns into a music video with bodies and bullets flying everywhere .
then , somewhere in between , it turns back into comedy .
keiko turns out to be a spunky little girl ; in one amusing scene , she is forced to read a letter out loud , indicating that she has been kidnapped , but the letter is littered with grammatical errors that skew the meaning of the words .
and scenes like this work alone , but wong applies this goofy tone to scenes that should be more serious , or not be in the film at all .
one of the most irritating moments has paris ordering cisco to come up to his office after he learns that keiko has been kidnapped ; when cisco gets there , paris and his men are standing around as if they knew cisco did it , but they let him out of there , telling him to find the perpetrator .
the scene is played for laughs , but it isn't funny .
and since it doesn't quite work as a comedy , it tries to fall back on the action , which also fails .
this is a film where people fly fifteen feet backwards when shot with a handgun .
cars land on tree branches and are supported by them .
characters betray each other without a second thought .
grenades are thrown in tight places .
people jump out of tall buildings and survive .
people outrun tumbling cars , and get out of the way of falling objects in small fractions of seconds .
most frustrating of all , though , is the film's definition of a hit man : these guys are anything but subtle , quiet , and skilled individuals .
they're more like socially depraved militia men ( melvin has an extensive collection of firearms in his garage , including missile launchers and hand-held machineguns ) .
the characters each have one trait that distinguishes them from the rest ; this obviously doesn't make for deep or interesting people to watch .
the acting is kind of fun , though--wahlberg is a good actor , and his innocuous presence in this film is charming in a silly sort of way .
phillips certainly has fun with his psychotic character , while christina applegate , who plays melvin's fianc ? e , is convincingly air-headed .
the big hit is an action film that unknowingly spoofs itself in trying to be funny .
the funniest parts are supplied by the actors , and not by the numerous failed attempts at sight gags and one-liners .
it's a true disaster , one that makes me believe that the goofy and unrealistic tone is completely unintentional .
despite all this , though , i must reiterate the entertainment value here .
you can cherish the awfulness of a film like this .
if you embrace the big hit for the catastrophe that it is , you just might enjoy yourself .
| {
"pile_set_name": "Github"
} |
# Copyright (c) Open-MMLab. All rights reserved.
import os.path as osp
import cv2
import numpy as np
from numpy.testing import assert_array_equal
import mmcv
class TestPhotometric:
@classmethod
def setup_class(cls):
# the test img resolution is 400x300
cls.img_path = osp.join(osp.dirname(__file__), '../data/color.jpg')
cls.img = cv2.imread(cls.img_path)
cls.mean = np.array([123.675, 116.28, 103.53], dtype=np.float32)
cls.std = np.array([58.395, 57.12, 57.375], dtype=np.float32)
def test_imnormalize(self):
rgb_img = self.img[:, :, ::-1]
baseline = (rgb_img - self.mean) / self.std
img = mmcv.imnormalize(self.img, self.mean, self.std)
assert np.allclose(img, baseline)
assert id(img) != id(self.img)
img = mmcv.imnormalize(rgb_img, self.mean, self.std, to_rgb=False)
assert np.allclose(img, baseline)
assert id(img) != id(rgb_img)
def test_imnormalize_(self):
img_for_normalize = np.float32(self.img)
rgb_img_for_normalize = np.float32(self.img[:, :, ::-1])
baseline = (rgb_img_for_normalize - self.mean) / self.std
img = mmcv.imnormalize_(img_for_normalize, self.mean, self.std)
assert np.allclose(img_for_normalize, baseline)
assert id(img) == id(img_for_normalize)
img = mmcv.imnormalize_(
rgb_img_for_normalize, self.mean, self.std, to_rgb=False)
assert np.allclose(img, baseline)
assert id(img) == id(rgb_img_for_normalize)
def test_imdenormalize(self):
norm_img = (self.img[:, :, ::-1] - self.mean) / self.std
rgb_baseline = (norm_img * self.std + self.mean)
bgr_baseline = rgb_baseline[:, :, ::-1]
img = mmcv.imdenormalize(norm_img, self.mean, self.std)
assert np.allclose(img, bgr_baseline)
img = mmcv.imdenormalize(norm_img, self.mean, self.std, to_bgr=False)
assert np.allclose(img, rgb_baseline)
def test_iminvert(self):
img = np.array([[0, 128, 255], [1, 127, 254], [2, 129, 253]],
dtype=np.uint8)
img_r = np.array([[255, 127, 0], [254, 128, 1], [253, 126, 2]],
dtype=np.uint8)
assert_array_equal(mmcv.iminvert(img), img_r)
def test_solarize(self):
img = np.array([[0, 128, 255], [1, 127, 254], [2, 129, 253]],
dtype=np.uint8)
img_r = np.array([[0, 127, 0], [1, 127, 1], [2, 126, 2]],
dtype=np.uint8)
assert_array_equal(mmcv.solarize(img), img_r)
img_r = np.array([[0, 127, 0], [1, 128, 1], [2, 126, 2]],
dtype=np.uint8)
assert_array_equal(mmcv.solarize(img, 100), img_r)
def test_posterize(self):
img = np.array([[0, 128, 255], [1, 127, 254], [2, 129, 253]],
dtype=np.uint8)
img_r = np.array([[0, 128, 128], [0, 0, 128], [0, 128, 128]],
dtype=np.uint8)
assert_array_equal(mmcv.posterize(img, 1), img_r)
img_r = np.array([[0, 128, 224], [0, 96, 224], [0, 128, 224]],
dtype=np.uint8)
assert_array_equal(mmcv.posterize(img, 3), img_r)
def test_adjust_color(self):
img = np.array([[0, 128, 255], [1, 127, 254], [2, 129, 253]],
dtype=np.uint8)
img = np.stack([img, img, img], axis=-1)
assert_array_equal(mmcv.adjust_color(img), img)
img_gray = mmcv.bgr2gray(img)
img_r = np.stack([img_gray, img_gray, img_gray], axis=-1)
assert_array_equal(mmcv.adjust_color(img, 0), img_r)
assert_array_equal(mmcv.adjust_color(img, 0, 1), img_r)
assert_array_equal(
mmcv.adjust_color(img, 0.5, 0.5),
np.round(np.clip((img * 0.5 + img_r * 0.5), 0,
255)).astype(img.dtype))
assert_array_equal(
mmcv.adjust_color(img, 1, 1.5),
np.round(np.clip(img * 1 + img_r * 1.5, 0, 255)).astype(img.dtype))
assert_array_equal(
mmcv.adjust_color(img, 0.8, -0.6, gamma=2),
np.round(np.clip(img * 0.8 - 0.6 * img_r + 2, 0,
255)).astype(img.dtype))
assert_array_equal(
mmcv.adjust_color(img, 0.8, -0.6, gamma=-0.6),
np.round(np.clip(img * 0.8 - 0.6 * img_r - 0.6, 0,
255)).astype(img.dtype))
# test float type of image
img = img.astype(np.float32)
assert_array_equal(
np.round(mmcv.adjust_color(img, 0.8, -0.6, gamma=-0.6)),
np.round(np.clip(img * 0.8 - 0.6 * img_r - 0.6, 0, 255)))
def test_imequalize(self, nb_rand_test=100):
def _imequalize(img):
# equalize the image using PIL.ImageOps.equalize
from PIL import ImageOps, Image
img = Image.fromarray(img)
equalized_img = np.asarray(ImageOps.equalize(img))
return equalized_img
img = np.array([[0, 128, 255], [1, 127, 254], [2, 129, 253]],
dtype=np.uint8)
img = np.stack([img, img, img], axis=-1)
equalized_img = mmcv.imequalize(img)
assert_array_equal(equalized_img, _imequalize(img))
# test equalize with case step=0
img = np.array([[0, 0, 0], [120, 120, 120], [255, 255, 255]],
dtype=np.uint8)
img = np.stack([img, img, img], axis=-1)
assert_array_equal(mmcv.imequalize(img), img)
# test equalize with randomly sampled image.
for _ in range(nb_rand_test):
img = np.clip(
np.random.uniform(0, 1, (1000, 1200, 3)) * 260, 0,
255).astype(np.uint8)
equalized_img = mmcv.imequalize(img)
assert_array_equal(equalized_img, _imequalize(img))
def test_adjust_brightness(self, nb_rand_test=100):
def _adjust_brightness(img, factor):
# adjust the brightness of image using
# PIL.ImageEnhance.Brightness
from PIL.ImageEnhance import Brightness
from PIL import Image
img = Image.fromarray(img)
brightened_img = Brightness(img).enhance(factor)
return np.asarray(brightened_img)
img = np.array([[0, 128, 255], [1, 127, 254], [2, 129, 253]],
dtype=np.uint8)
img = np.stack([img, img, img], axis=-1)
# test case with factor 1.0
assert_array_equal(mmcv.adjust_brightness(img, 1.), img)
# test case with factor 0.0
assert_array_equal(mmcv.adjust_brightness(img, 0.), np.zeros_like(img))
# test adjust_brightness with randomly sampled images and factors.
for _ in range(nb_rand_test):
img = np.clip(
np.random.uniform(0, 1, (1000, 1200, 3)) * 260, 0,
255).astype(np.uint8)
factor = np.random.uniform()
np.testing.assert_allclose(
mmcv.adjust_brightness(img, factor).astype(np.int32),
_adjust_brightness(img, factor).astype(np.int32),
rtol=0,
atol=1)
def test_adjust_contrast(self, nb_rand_test=100):
def _adjust_contrast(img, factor):
from PIL.ImageEnhance import Contrast
from PIL import Image
# Image.fromarray defaultly supports RGB, not BGR.
# convert from BGR to RGB
img = Image.fromarray(img[..., ::-1], mode='RGB')
contrasted_img = Contrast(img).enhance(factor)
# convert from RGB to BGR
return np.asarray(contrasted_img)[..., ::-1]
img = np.array([[0, 128, 255], [1, 127, 254], [2, 129, 253]],
dtype=np.uint8)
img = np.stack([img, img, img], axis=-1)
# test case with factor 1.0
assert_array_equal(mmcv.adjust_contrast(img, 1.), img)
# test case with factor 0.0
assert_array_equal(
mmcv.adjust_contrast(img, 0.), _adjust_contrast(img, 0.))
# test adjust_contrast with randomly sampled images and factors.
for _ in range(nb_rand_test):
img = np.clip(
np.random.uniform(0, 1, (1200, 1000, 3)) * 260, 0,
255).astype(np.uint8)
factor = np.random.uniform()
# Note the gap (less_equal 1) between PIL.ImageEnhance.Contrast
# and mmcv.adjust_contrast comes from the gap that converts from
# a color image to gray image using mmcv or PIL.
np.testing.assert_allclose(
mmcv.adjust_contrast(img, factor).astype(np.int32),
_adjust_contrast(img, factor).astype(np.int32),
rtol=0,
atol=1)
| {
"pile_set_name": "Github"
} |
import UIKit
func delay(_ delay:Double, closure:@escaping ()->()) {
let when = DispatchTime.now() + delay
DispatchQueue.main.asyncAfter(deadline: when, execute: closure)
}
class SettingsController: UIViewController, UINavigationControllerDelegate {
override func viewDidLoad() {
super.viewDidLoad()
self.navigationController?.delegate = self
}
func navigationControllerSupportedInterfaceOrientations(
_ nav: UINavigationController) -> UIInterfaceOrientationMask {
return .portrait
}
@IBAction func doButton(_ sender: Any) {
self.showColorPicker()
}
}
extension SettingsController : ColorPickerDelegate {
func showColorPicker() {
let colorName = "MyColor"
let c = UIColor.blue
let cpc = ColorPickerController(colorName:colorName, color:c)
cpc.delegate = self
self.present(cpc, animated: true)
}
// delegate method
func colorPicker (_ picker:ColorPickerController,
didSetColorNamed theName:String?,
to theColor:UIColor?) {
print("the delegate method was called")
delay(0.1) {
picker.dismiss(animated: true)
}
}
}
| {
"pile_set_name": "Github"
} |
{
"name": "Youtube Vitess",
"description": "provides servers and tools which facilitate scaling of MySQL databases for large scale web services",
"abstract": "provides servers and tools which facilitate scaling of MySQL databases for large scale web services",
"category": "MySQL forks and evolutions",
"tags": [
],
"links": [
{
"text": "Website",
"url": "https://github.com/youtube/vitess"
}
]
} | {
"pile_set_name": "Github"
} |
/*
MIT Copyright Notice
Copyright 2003 M.I.T.
Permission is hereby granted, without written agreement or royalty fee, to use,
copy, modify, and distribute this software and its documentation for any
purpose, provided that the above copyright notice and the following three
paragraphs appear in all copies of this software.
IN NO EVENT SHALL M.I.T. BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL,
INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OF THIS SOFTWARE
AND ITS DOCUMENTATION, EVEN IF M.I.T. HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMANGE.
M.I.T. SPECIFICALLY DISCLAIMS ANY WARRANTIES INCLUDING, BUT NOT LIMITED TO
THE IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE,
AND NON-INFRINGEMENT.
THE SOFTWARE IS PROVIDED ON AN "AS-IS" BASIS AND M.I.T. HAS NO OBLIGATION TO
PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
$Author: tleek $
$Date: 2004/01/05 17:27:41 $
$Header: /mnt/leo2/cvs/sabo/hist-040105/bind/b1/create_msg_file.c,v 1.1.1.1 2004/01/05 17:27:41 tleek Exp $
*/
/*
BIND Copyright Notice
Copyright (C) 2000-2002 Internet Software Consortium.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SOFTWARE CONSORTIUM
DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
INTERNET SOFTWARE CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
$Author: tleek $
$Date: 2004/01/05 17:27:41 $
$Header: /mnt/leo2/cvs/sabo/hist-040105/bind/b1/create_msg_file.c,v 1.1.1.1 2004/01/05 17:27:41 tleek Exp $
*/
/*
<source>
*/
#include <stdio.h>
#include <sys/types.h>
#include <arpa/nameser.h>
#include <arpa/nameser_compat.h>
#include <time.h>
#include <stdlib.h>
#include <string.h>
#include <netinet/in.h>
#include <resolv.h>
int main(){
FILE *f;
u_char buf[1000];
u_char *p;
char *temp, *temp1;
u_char *comp_dn, *comp_dn2;
char exp_dn[200], exp_dn2[200];
u_char **dnptrs, **lastdnptr, **dnptrs2;
int i,len = 0, comp_size;
u_long now;
dnptrs = (unsigned char **) malloc(2 * sizeof(unsigned char *));
dnptrs2 = (unsigned char **) malloc(2 * sizeof(unsigned char *));
comp_dn = (unsigned char *) malloc(200*sizeof(unsigned char));
comp_dn2 = (unsigned char *) malloc(200*sizeof(unsigned char));
temp1 = (char *) malloc(400*sizeof(char));
temp = temp1;
p = buf;
strcpy(temp, "HEADER JUNK:");
len += strlen(temp);
while (*temp != '\0')
*p++ = *temp++;
strcpy(exp_dn, "lcs.mit.edu");
*dnptrs++ = (u_char *) exp_dn;
*dnptrs-- = NULL;
lastdnptr = NULL;
printf("Calling dn_comp..\n");
comp_size = dn_comp((const char *) exp_dn, comp_dn, 200, dnptrs, lastdnptr);
printf("uncomp_size = %d\n", strlen(exp_dn));
printf("comp_size = %d\n", comp_size);
printf("exp_dn = %s, comp_dn = %s\n", exp_dn, (char *) comp_dn);
for(i=0; i<comp_size; i++)
*p++ = *comp_dn++;
len += comp_size;
PUTSHORT(30, p); /* type = T_NXT = 30 */
p += 2;
PUTSHORT(255, p); /* class = C_ANY = 255*/
p += 2;
PUTLONG(255, p); /* ttl */
p += 4;
PUTSHORT(16, p); /* dlen = len of everything starting with the covered byte
(the length of the entire resource record... we lie about it
*/
p += 2;
len += 10;
strcpy(exp_dn2, "sls.lcs.mit.edu"); /* domain name */
*dnptrs2++ = (u_char *) exp_dn2;
*dnptrs2-- = NULL;
lastdnptr = NULL;
printf("Calling dn_comp..\n");
comp_size = dn_comp((const char *) exp_dn2, comp_dn2, 200, dnptrs2, lastdnptr);
printf("uncomp_size = %d\n", strlen(exp_dn2));
printf("comp_size = %d\n", comp_size);
printf("exp_dn2 = %s, comp_dn2 = %s\n", exp_dn2, (char *) comp_dn2);
len += comp_size;
for(i=0; i<comp_size; i++)
*p++ = *comp_dn2++;
PUTLONG(1 << 24, p); /* bitmap - set NXT type*/
p += 4;
PUTLONG(0, p);
p += 4;
PUTLONG(0, p);
p += 4;
PUTLONG(0, p);
p += 4;
len += 16;
f = fopen("SIGFILE", "w");
p = buf;
printf("len = %d\n", len);
for(i=0; i<len; i++, p++) /* write record into file */
fputc(*p, f);
fclose(f);
return 0;
}
/*
</source>
*/
| {
"pile_set_name": "Github"
} |
package com.espressif.iot.logintool;
import java.io.IOException;
import java.io.InputStream;
import java.util.LinkedList;
import java.util.List;
import org.xmlpull.v1.XmlPullParser;
import org.xmlpull.v1.XmlPullParserException;
import com.espressif.iot.logintool.Platform.Type;
import android.content.Context;
import android.util.Xml;
class LoginXMLParser {
private static final String ENCODING_UTF8 = "UTF-8";
private static final String TAG_PLATFORM = "Platform";
private static final String ATTR_NAME = "name";
private static final String ATTR_APP_ID = "AppId";
private static final String ATTR_APP_KEY = "AppKey";
private static final String ATTR_APP_SECRET = "AppSecret";
private static final String FILE_NAME = "EspLogin.xml";
private final Context mContext;
LoginXMLParser(Context context) {
mContext = context;
}
List<Platform> parse() {
try {
InputStream is = mContext.getAssets().open(FILE_NAME);
if (is == null) {
return null;
}
List<Platform> result = new LinkedList<Platform>();
XmlPullParser xpp = Xml.newPullParser();
xpp.setInput(is, ENCODING_UTF8);
Platform.Builder builder = new Platform.Builder();
for (int eventType = xpp.getEventType(); eventType != XmlPullParser.END_DOCUMENT; eventType = xpp.next()) {
switch (eventType) {
case XmlPullParser.START_TAG:
final String nameStart = xpp.getName();
if (nameStart.equals(TAG_PLATFORM)) {
builder = new Platform.Builder();
int attrCount = xpp.getAttributeCount();
for (int i = 0; i < attrCount; i++) {
String attrName = xpp.getAttributeName(i);
String attrValue = xpp.getAttributeValue(i);
if (attrName.equals(ATTR_NAME)) {
if (attrValue.equals(Type.QQ.name())) {
builder.setType(Type.QQ);
}
} else if (attrName.equals(ATTR_APP_ID)) {
builder.setAppId(attrValue);
} else if (attrName.equals(ATTR_APP_KEY)) {
builder.setAppKey(attrValue);
} else if (attrName.equals(ATTR_APP_SECRET)) {
builder.setAppSecret(attrValue);
}
}
}
break;
case XmlPullParser.END_TAG:
final String nameEnd = xpp.getName();
if (nameEnd.equals(TAG_PLATFORM)) {
result.add(builder.create());
}
break;
}
}
return result;
} catch (IOException e) {
e.printStackTrace();
} catch (XmlPullParserException e) {
e.printStackTrace();
}
return null;
}
}
| {
"pile_set_name": "Github"
} |
{
"CVE_data_meta": {
"ASSIGNER": "[email protected]",
"ID": "CVE-2008-5361",
"STATE": "PUBLIC"
},
"affects": {
"vendor": {
"vendor_data": [
{
"product": {
"product_data": [
{
"product_name": "n/a",
"version": {
"version_data": [
{
"version_value": "n/a"
}
]
}
}
]
},
"vendor_name": "n/a"
}
]
}
},
"data_format": "MITRE",
"data_type": "CVE",
"data_version": "4.0",
"description": {
"description_data": [
{
"lang": "eng",
"value": "The ActionScript 2 virtual machine in Adobe Flash Player 10.x before 10.0.12.36 and 9.x before 9.0.151.0, and Adobe AIR before 1.5, does not verify a member element's size when performing (1) DefineConstantPool, (2) ActionJump, (3) ActionPush, (4) ActionTry, and unspecified other actions, which allows remote attackers to read sensitive data from process memory via a crafted PDF file."
}
]
},
"problemtype": {
"problemtype_data": [
{
"description": [
{
"lang": "eng",
"value": "n/a"
}
]
}
]
},
"references": {
"reference_data": [
{
"name": "20081122 Adobe Flash Multiple Vulnerabilities",
"refsource": "BUGTRAQ",
"url": "http://www.securityfocus.com/archive/1/498561/100/0/threaded"
},
{
"name": "33390",
"refsource": "SECUNIA",
"url": "http://secunia.com/advisories/33390"
},
{
"name": "http://www.isecpartners.com/advisories/2008-01-flash.txt",
"refsource": "MISC",
"url": "http://www.isecpartners.com/advisories/2008-01-flash.txt"
},
{
"name": "http://support.avaya.com/elmodocs2/security/ASA-2009-020.htm",
"refsource": "CONFIRM",
"url": "http://support.avaya.com/elmodocs2/security/ASA-2009-020.htm"
},
{
"name": "http://www.adobe.com/support/security/bulletins/apsb08-22.html",
"refsource": "MISC",
"url": "http://www.adobe.com/support/security/bulletins/apsb08-22.html"
},
{
"name": "34226",
"refsource": "SECUNIA",
"url": "http://secunia.com/advisories/34226"
},
{
"name": "4692",
"refsource": "SREASON",
"url": "http://securityreason.com/securityalert/4692"
},
{
"name": "GLSA-200903-23",
"refsource": "GENTOO",
"url": "http://security.gentoo.org/glsa/glsa-200903-23.xml"
},
{
"name": "248586",
"refsource": "SUNALERT",
"url": "http://sunsolve.sun.com/search/document.do?assetkey=1-26-248586-1"
}
]
}
} | {
"pile_set_name": "Github"
} |
$(document).ready(function() {
/* global Cookies */
// Set relative link path (without domain)
var rpath = window.location.href.replace(window.location.origin, '');
// Write position in cookie
var timeout;
$(window).on('scroll', function() {
clearTimeout(timeout);
timeout = setTimeout(function() {
Cookies.set('scroll-cookie', $(window).scrollTop() + '|' + rpath, { expires: 365, path: '' });
}, 250);
});
// Read position from cookie
if (Cookies.get('scroll-cookie') !== undefined) {
var cvalues = Cookies.get('scroll-cookie').split('|');
if (cvalues[1] === rpath) {
$(window).scrollTop(cvalues[0]);
}
}
});
| {
"pile_set_name": "Github"
} |
/* General */
.clickable-item {
cursor: pointer;
}
.text-bold {
font-weight: bold;
}
/* Portlet */
.portlet.light > .portlet-title .inputs.inputs-full-width {
display: block;
float: none;
}
.famfamfam-flags {
display: inline-block;
}
topbar-languageswitch {
float: left;
margin-top: 25px;
margin-left: 10px;
a {
text-decoration: none;
color: #fff;
&:hover {
text-decoration: none;
color: #fff;
}
}
}
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="UTF-8"?>
<Workspace
version = "1.0">
<FileRef
location = "self:RLottie.xcodeproj">
</FileRef>
</Workspace>
| {
"pile_set_name": "Github"
} |
/**
* Created by jiachenpan on 16/11/18.
*/
export function parseTime(time, cFormat) {
if (arguments.length === 0) {
return null
}
const format = cFormat || '{y}-{m}-{d} {h}:{i}:{s}'
let date
if (typeof time === 'object') {
date = time
} else {
if ((typeof time === 'string') && (/^[0-9]+$/.test(time))) {
time = parseInt(time)
}
if ((typeof time === 'number') && (time.toString().length === 10)) {
time = time * 1000
}
date = new Date(time)
}
const formatObj = {
y: date.getFullYear(),
m: date.getMonth() + 1,
d: date.getDate(),
h: date.getHours(),
i: date.getMinutes(),
s: date.getSeconds(),
a: date.getDay()
}
const time_str = format.replace(/{(y|m|d|h|i|s|a)+}/g, (result, key) => {
let value = formatObj[key]
// Note: getDay() returns 0 on Sunday
if (key === 'a') { return ['日', '一', '二', '三', '四', '五', '六'][value ] }
if (result.length > 0 && value < 10) {
value = '0' + value
}
return value || 0
})
return time_str
}
export function formatTime(time, option) {
time = +time * 1000
const d = new Date(time)
const now = Date.now()
const diff = (now - d) / 1000
if (diff < 30) {
return '刚刚'
} else if (diff < 3600) {
// less 1 hour
return Math.ceil(diff / 60) + '分钟前'
} else if (diff < 3600 * 24) {
return Math.ceil(diff / 3600) + '小时前'
} else if (diff < 3600 * 24 * 2) {
return '1天前'
}
if (option) {
return parseTime(time, option)
} else {
return (
d.getMonth() +
1 +
'月' +
d.getDate() +
'日' +
d.getHours() +
'时' +
d.getMinutes() +
'分'
)
}
}
export function isExternal(path) {
return /^(https?:|mailto:|tel:)/.test(path)
}
export function debounce(func, wait, immediate) {
let timeout, args, context, timestamp, result
const later = function() {
// 据上一次触发时间间隔
const last = +new Date() - timestamp
// 上次被包装函数被调用时间间隔last小于设定时间间隔wait
if (last < wait && last > 0) {
timeout = setTimeout(later, wait - last)
} else {
timeout = null
// 如果设定为immediate===true,因为开始边界已经调用过了此处无需调用
if (!immediate) {
result = func.apply(context, args)
if (!timeout) context = args = null
}
}
}
return function(...args) {
context = this
timestamp = +new Date()
const callNow = immediate && !timeout
// 如果延时不存在,重新设定延时
if (!timeout) timeout = setTimeout(later, wait)
if (callNow) {
result = func.apply(context, args)
context = args = null
}
return result
}
}
| {
"pile_set_name": "Github"
} |
**This airport has been automatically generated**
We have no information about FLCS[*] airport other than its name, ICAO and location (ZM).
This airport will have to be done from scratch, which includes adding runways, taxiways, parking locations, boundaries...
Good luck if you decide to do this airport! | {
"pile_set_name": "Github"
} |
# Contributor: Mika Havela <[email protected]>
# Maintainer: Francesco Colista <[email protected]>
pkgname=swatch
_realname=swatchdog
pkgver=3.2.4
pkgrel=5
pkgdesc="Logfile monitoring tool"
url="https://sourceforge.net/projects/swatch/"
arch="noarch"
license="GPL-2.0-only"
depends="perl perl-date-calc perl-date-format perl-date-manip perl-file-tail perl-carp-clan"
makedepends="perl-dev"
subpackages="$pkgname-doc $pkgname-openrc"
source="
$pkgname-$pkgver.tar.gz::https://downloads.sourceforge.net/project/$pkgname/$_realname/$_realname-$pkgver.tar.gz
swatch.initd
swatch.confd
swatchrc
"
builddir="$srcdir"/$_realname-$pkgver
build() {
PERL_MM_USE_DEFAULT=1 perl Makefile.PL INSTALLDIRS=vendor
make
}
check() {
make test
}
package() {
make DESTDIR="$pkgdir" install
make realclean
# remove perllocal.pod and .packlist
find "$pkgdir" -name perllocal.pod -delete
find "$pkgdir" -name .packlist -delete
mkdir -p "$pkgdir"/etc/init.d/ "$pkgdir"/etc/conf.d/ \
"$pkgdir"/etc/$pkgname/
cp "$srcdir"/$pkgname.initd "$pkgdir"/etc/init.d/$pkgname
cp "$srcdir"/$pkgname.confd "$pkgdir"/etc/conf.d/$pkgname
cp "$srcdir"/${pkgname}rc "$pkgdir"/etc/$pkgname/${pkgname}rc
chmod 755 "$pkgdir"/etc/init.d/$pkgname
}
sha512sums="4e0a4e3feed00df0f0d04f94cc090e53e71fa9b20d46236ec41d63b98b5733d80a5941b491cffcbb0b655a9c7d2b5c9423ca7ae043346dbe1b05ee6ab24b9489 swatch-3.2.4.tar.gz
4d274d4875664dee989016be71d8e72583766b88da684a4f0c242ec8d7b213c2a917395ee15cd5d446d2dbbfc9665bb75729e010a8fdcc6ba12985a19354b0ca swatch.initd
493f3e54e6472ee349c3d56537d3ad88405936d6551defa802911625e84a9afabc13c4975e2edfe63e723c48cfdfe09b966e807d05ca218b4cd238889b656fde swatch.confd
6579b3e998640f571097cddb65c7772f62855605fb38fbeb4e53976d72faacb6bdb90f7392727170e0e523d19ae74f19b30ec5fd0c5fab8c67f64976ee0f2d73 swatchrc"
| {
"pile_set_name": "Github"
} |
---
name: IceWarp
wfh: Required
travel: Restricted
visitors: Restricted
events: Restricted
last_update: 2020-03-11
| {
"pile_set_name": "Github"
} |
// Copyright (c) 2017 Robert Ramey
//
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// test construction assignments
#include <iostream>
#include <boost/safe_numerics/safe_integer.hpp>
template <class T>
using safe_t = boost::safe_numerics::safe<
T,
boost::safe_numerics::native
>;
#include <boost/mp11/list.hpp>
#include <boost/mp11/algorithm.hpp>
#include "test_values.hpp"
// note: same test matrix as used in test_checked. Here we test all combinations
// safe and unsafe integers. in test_checked we test all combinations of
// integer primitives
const char *test_assignment_result[boost::mp11::mp_size<test_values>::value] = {
// 0 0 0 0
// 012345670123456701234567012345670
// 012345678901234567890123456789012
/* 0*/ ".....xx..xx..xx...xx.xxx.xxx.xxx.",
/* 1*/ ".....xx..xx..xx...xx.xxx.xxx.xxx.",
/* 2*/ ".....xx..xx..xx...xx.xxx.xxx.xxx.",
/* 3*/ ".....xx..xx..xx...xx.xxx.xxx.xxx.",
/* 4*/ ".........xx..xx.......xx.xxx.xxx.",
/* 5*/ ".........xx..xx.......xx.xxx.xxx.",
/* 6*/ ".........xx..xx.......xx.xxx.xxx.",
/* 7*/ ".........xx..xx.......xx.xxx.xxx.",
/* 8*/ ".............xx...........xx.xxx.",
/* 9*/ ".............xx...........xx.xxx.",
/*10*/ ".............xx...........xx.xxx.",
/*11*/ ".............xx...........xx.xxx.",
/*12*/ "..............................xx.",
/*13*/ "..............................xx.",
/*14*/ "..............................xx.",
/*15*/ "..............................xx.",
// 0 0 0 0
// 012345670123456701234567012345670
// 012345678901234567890123456789012
/*16*/ "..xx.xxx.xxx.xxx.....xxx.xxx.xxx.",
/*17*/ "..xx.xxx.xxx.xxx.....xxx.xxx.xxx.",
/*18*/ "..xx.xxx.xxx.xxx.....xxx.xxx.xxx.",
/*19*/ "..xx.xxx.xxx.xxx.....xxx.xxx.xxx.",
/*20*/ "..xx..xx.xxx.xxx.........xxx.xxx.",
/*21*/ "..xx..xx.xxx.xxx.........xxx.xxx.",
/*22*/ "..xx..xx.xxx.xxx.........xxx.xxx.",
/*23*/ "..xx..xx.xxx.xxx.........xxx.xxx.",
/*24*/ "..xx..xx..xx.xxx.............xxx.",
/*25*/ "..xx..xx..xx.xxx.............xxx.",
/*26*/ "..xx..xx..xx.xxx.............xxx.",
/*27*/ "..xx..xx..xx.xxx.............xxx.",
/*28*/ "..xx..xx..xx..xx.................",
/*29*/ "..xx..xx..xx..xx.................",
/*30*/ "..xx..xx..xx..xx.................",
/*31*/ "..xx..xx..xx..xx.................",
// 012345678901234567890123456789012
/*32*/ ".....xx..xx..xx...xx.xxx.xxx.xxx."
};
#include <boost/mp11/algorithm.hpp>
#include <boost/core/demangle.hpp>
template <class T>
using safe_t = boost::safe_numerics::safe<
T,
boost::safe_numerics::native
>;
#include "test_assignment.hpp"
using namespace boost::mp11;
template<typename L>
struct test {
static_assert(mp_is_list<L>(), "must be a list of integral constants");
bool m_error;
test(bool b = true) : m_error(b) {}
operator bool(){
return m_error;
}
template<typename T>
void operator()(const T &){
static_assert(mp_is_list<T>(), "must be a list of two integral constants");
constexpr size_t i1 = mp_first<T>(); // index of first argument
constexpr size_t i2 = mp_second<T>();// index of second argument
std::cout << i1 << ',' << i2 << ',';
using T1 = typename boost::mp11::mp_at_c<L, i1>::value_type;
using T2 = typename boost::mp11::mp_at_c<L, i2>::value_type;
m_error &= test_assignment<T1, T2>(
boost::mp11::mp_at_c<L, i1>(), // value of first argument
boost::mp11::mp_at_c<L, i2>(), // value of second argument
boost::core::demangle(typeid(T1).name()).c_str(),
boost::core::demangle(typeid(T2).name()).c_str(),
test_assignment_result[i1][i2]
);
}
};
int main(int, char *[]){
// TEST_EACH_VALUE_PAIR
test<test_values> rval(true);
using value_indices = mp_iota_c<mp_size<test_values>::value>;
mp_for_each<
mp_product<mp_list, value_indices, value_indices>
>(rval);
std::cout << (rval ? "success!" : "failure") << std::endl;
return ! rval ;
}
| {
"pile_set_name": "Github"
} |
<?php
/**
* Zend Framework (http://framework.zend.com/)
*
* @link http://github.com/zendframework/zf2 for the canonical source repository
* @copyright Copyright (c) 2005-2012 Zend Technologies USA Inc. (http://www.zend.com)
* @license http://framework.zend.com/license/new-bsd New BSD License
* @package Zend_Mvc
*/
namespace Blog\Event;
use Zend\EventManager\EventManagerInterface;
use Zend\EventManager\ListenerAggregateInterface;
/**
* @category Zend
* @package Zend_Mvc
*/
class Listener implements ListenerAggregateInterface
{
/**
* @var \Zend\Stdlib\CallbackHandler[]
*/
protected $listeners = array();
/**
* Attach to an event manager
*
* @param EventManagerInterface $events
* @return void
*/
public function attach(EventManagerInterface $events)
{
$this->listeners[] = $events->attach('blog.model.comment.create.post', array($this, 'onCreateComment'));
$this->listeners[] = $events->attach('blog.model.comment.remove.post', array($this, 'onRemoveComment'));
}
/**
* Detach all our listeners from the event manager
*
* @param EventManagerInterface $events
* @return void
*/
public function detach(EventManagerInterface $events)
{
foreach ($this->listeners as $index => $listener) {
if ($events->detach($listener)) {
unset($this->listeners[$index]);
}
}
}
public function onCreateComment($e)
{
$commentModel = \Eva\Api::_()->getModel('Blog\Model\Comment');
$commentModel->updatePostCommentCount();
}
public function onRemoveComment($e)
{
$commentModel = \Eva\Api::_()->getModel('Blog\Model\Comment');
$commentModel->updatePostCommentCount();
}
}
| {
"pile_set_name": "Github"
} |
//
// Hello YouTube! on ESP-12 and AdaFruit I2C LED Matrix
// Hari Wiguna, 2015
//
// I started with AdaFruit's HT16K33 sample, hence the comment below.
//
/***************************************************
This is a library for our I2C LED Backpacks
Designed specifically to work with the Adafruit LED Matrix backpacks
----> http://www.adafruit.com/products/872
----> http://www.adafruit.com/products/871
----> http://www.adafruit.com/products/870
These displays use I2C to communicate, 2 pins are required to
interface. There are multiple selectable I2C addresses. For backpacks
with 2 Address Select pins: 0x70, 0x71, 0x72 or 0x73. For backpacks
with 3 Address Select pins: 0x70 thru 0x77
Adafruit invests time and resources providing this open source code,
please support Adafruit and open-source hardware by purchasing
products from Adafruit!
Written by Limor Fried/Ladyada for Adafruit Industries.
BSD license, all text above must be included in any redistribution
****************************************************/
#include <Wire.h>
#include "Adafruit_LEDBackpack.h"
#include "Adafruit_GFX.h"
#ifndef _BV
#define _BV(bit) (1<<(bit))
#endif
//Adafruit_LEDBackpack matrix = Adafruit_LEDBackpack();
Adafruit_8x8matrix matrix = Adafruit_8x8matrix();
uint8_t counter = 0;
String msg = " Hello YouTube! ";
void setup() {
//Serial.begin(9600);
//Serial.println("HT16K33 test");
pinMode(A0, INPUT);
Wire.pins(0,2); // <-- IMPORTANT!!! call this BEFORE initializing AdaFruit matrix (which calls wire.begin() with no parameters!
matrix.begin(0x70); // pass in the address
}
void loop() {
//matrix.fillScreen(0);
int len = msg.length();
int16_t reading = analogRead(A0);
int16_t spd = map(reading, 0, 950, 300, 30); // In theory upper should be 1023, but in practice it only reach 1000
//matrix.drawPixel(x,7, 1);
for (int i = 0; i < len; i++)
matrix.drawChar(i*6 - counter, 0, msg[i], 1, 0, 1);
counter++;
if (counter >= len*6) counter = 0;
// write the changes we just made to the display
matrix.writeDisplay();
delay(spd);
}
| {
"pile_set_name": "Github"
} |
#!/bin/bash
. $(dirname $0)/../../include.rc
. $(dirname $0)/../../cluster.rc
#This tests if the arbiter-count is transferred to the other peer.
function check_peers {
$CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
}
cleanup;
TEST launch_cluster 2;
TEST $CLI_1 peer probe $H2;
EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers
kill_glusterd 2
$CLI_1 volume create $V0 replica 3 arbiter 1 $H0:$B0/b{1..3}
TEST $glusterd_2
EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers
EXPECT "1 x \(2 \+ 1\) = 3" volinfo_field_1 $V0 "Number of Bricks"
EXPECT "1 x \(2 \+ 1\) = 3" volinfo_field_2 $V0 "Number of Bricks"
cleanup;
| {
"pile_set_name": "Github"
} |
Copyright 2011-2016 Canonical Ltd.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
| {
"pile_set_name": "Github"
} |
import numpy as np
import pandas as pd
import pyranges as pr
from natsort import natsorted
import os
from collections import defaultdict
def get_n_args(f):
import inspect
nparams = len(inspect.signature(f).parameters)
return nparams
def call_f(f, nparams, df, odf, kwargs):
if nparams == 3:
return f.remote(df, odf, **kwargs)
else:
return f.remote(df, odf)
def call_f_single(f, nparams, df, **kwargs):
if nparams == 2:
return f.remote(df, **kwargs)
else:
return f.remote(df)
class suppress_stdout_stderr(object):
'''
A context manager for doing a "deep suppression" of stdout and stderr in
Python, i.e. will suppress all print, even if the print originates in a
compiled C/Fortran sub-function.
This will not suppress raised exceptions, since exceptions are printed
to stderr just before a script exits, and after the context manager has
exited (at least, I think that is why it lets exceptions through).
'''
def __init__(self):
# Open a pair of null files
self.null_fds = [os.open(os.devnull, os.O_RDWR) for x in range(2)]
# Save the actual stdout (1) and stderr (2) file descriptors.
self.save_fds = (os.dup(1), os.dup(2))
def __enter__(self):
# Assign the null pointers to stdout and stderr.
os.dup2(self.null_fds[0], 1)
os.dup2(self.null_fds[1], 2)
def __exit__(self, *_):
# Re-assign the real stdout/stderr back to (1) and (2)
os.dup2(self.save_fds[0], 1)
os.dup2(self.save_fds[1], 2)
# Close the null files
os.close(self.null_fds[0])
os.close(self.null_fds[1])
def merge_dfs(df1, df2):
if not df1.empty and not df2.empty:
return pd.concat([df1, df2], sort=False).reset_index(drop=True)
elif df1.empty and df2.empty:
# can this happen?
return None
elif df1.empty:
return df2
else:
return df1
def process_results(results, keys):
results_dict = {k: r for k, r in zip(keys, results) if r is not None}
try:
first_item = next(iter(results_dict.values()))
except StopIteration: # empty collection
return results_dict
if not isinstance(first_item, pd.DataFrame):
return results_dict
to_delete = []
# to ensure no duplicate indexes and no empty dataframes
for k in results_dict:
if results_dict[k] is None or results_dict[k].empty:
to_delete.append(k)
else:
# pandas might make a df that is not always C-contiguous
# copying fixes this
# TODO: better to only fix columns that are not C-contiguous?
results_dict[k] = results_dict[k].copy(deep=True)
results_dict[k].index = range(len(results_dict[k]))
for k in to_delete:
del results_dict[k]
return results_dict
def make_sparse(df):
if "Strand" in df:
cols = "Chromosome Start End Strand".split()
else:
cols = "Chromosome Start End".split()
return df[cols]
def make_binary_sparse(kwargs, df, odf):
sparse = kwargs.get("sparse")
if not sparse:
return df, odf
if sparse.get("self"):
df = make_sparse(df)
if sparse.get("other"):
odf = make_sparse(odf)
return df, odf
def make_unary_sparse(kwargs, df):
sparse = kwargs.get("sparse").get("self")
if sparse:
df = make_sparse(df)
return df
def ray_initialized():
def test_function():
pass
try:
test_function = ray.remote(test_function)
except Exception as e:
if isinstance(e, NameError):
return False
raise e
try:
test_function.remote()
except Exception as e:
if "RayConnectionError" in str(type(e)):
return True
else:
raise e
def get_multithreaded_funcs(function, nb_cpu):
if nb_cpu > 1:
import ray
_merge_dfs = ray.remote(merge_dfs)
get = ray.get
function = ray.remote(function)
else:
_merge_dfs = lambda: "dummy value"
_merge_dfs.remote = merge_dfs
get = lambda x: x
function.remote = function
return function, get, _merge_dfs
def pyrange_apply(function, self, other, **kwargs):
nparams = get_n_args(function)
nb_cpu = kwargs.get("nb_cpu", 1)
if nb_cpu > 1:
import ray
with suppress_stdout_stderr():
ray.init(num_cpus=nb_cpu, ignore_reinit_error=True)
function, get, _merge_dfs = get_multithreaded_funcs(
function, nb_cpu=nb_cpu)
strandedness = kwargs["strandedness"]
other_strand = {"+": "-", "-": "+"}
same_strand = {"+": "+", "-": "-"}
if strandedness == "opposite":
strand_dict = other_strand
else:
strand_dict = same_strand
assert strandedness in ["same", "opposite", False, None]
if strandedness:
assert self.stranded and other.stranded, \
"Can only do stranded operations when both PyRanges contain strand info"
results = []
items = natsorted(self.dfs.items())
keys = natsorted(self.dfs.keys())
if strandedness:
for (c, s), df in items:
os = strand_dict[s]
if not (c, os) in other.keys() or len(other[c, os].values()) == 0:
odf = pd.DataFrame(columns="Chromosome Start End".split())
else:
odf = other[c, os].values()[0]
df, odf = make_binary_sparse(kwargs, df, odf)
result = call_f(function, nparams, df, odf, kwargs)
results.append(result)
else:
if self.stranded and not other.stranded:
for (c, s), df in items:
if not c in other.chromosomes:
odf = pd.DataFrame(columns="Chromosome Start End".split())
else:
odf = other.dfs[c]
df, odf = make_binary_sparse(kwargs, df, odf)
result = call_f(function, nparams, df, odf, kwargs)
results.append(result)
elif not self.stranded and other.stranded:
for c, df in items:
if not c in other.chromosomes:
odf = pd.DataFrame(columns="Chromosome Start End".split())
else:
odf1 = other[c, "+"].df
odf2 = other[c, "-"].df
odf = _merge_dfs.remote(odf1, odf2)
df, odf = make_binary_sparse(kwargs, df, odf)
result = call_f(function, nparams, df, odf, kwargs)
results.append(result)
elif self.stranded and other.stranded:
for (c, s), df in self.items():
if not c in other.chromosomes:
odfs = pr.PyRanges(
pd.DataFrame(columns="Chromosome Start End".split()))
else:
odfs = other[c].values()
# from pydbg import dbg
# dbg(odfs)
if len(odfs) == 2:
odf = _merge_dfs.remote(*odfs)
elif len(odfs) == 1:
odf = odfs[0]
else:
odf = pd.DataFrame(columns="Chromosome Start End".split())
df, odf = make_binary_sparse(kwargs, df, odf)
result = call_f(function, nparams, df, odf, kwargs)
results.append(result)
else:
for c, df in items:
if not c in other.chromosomes:
odf = pd.DataFrame(columns="Chromosome Start End".split())
else:
odf = other.dfs[c]
df, odf = make_binary_sparse(kwargs, df, odf)
result = call_f(function, nparams, df, odf, kwargs)
results.append(result)
results = get(results)
results = process_results(results, keys)
if nb_cpu > 1:
ray.shutdown()
return results
def pyrange_apply_single(function, self, **kwargs):
nparams = get_n_args(function)
nb_cpu = kwargs.get("nb_cpu", 1)
strand = kwargs["strand"]
if nb_cpu > 1:
import ray
with suppress_stdout_stderr():
ray.init(num_cpus=nb_cpu, ignore_reinit_error=True)
function, get, _merge_dfs = get_multithreaded_funcs(
function, nb_cpu=nb_cpu)
if strand:
assert self.stranded, \
"Can only do stranded operation when PyRange contains strand info"
results = []
if strand:
for (c, s), df in self.items():
kwargs["chromosome"] = c
_strand = s
kwargs["strand"] = _strand
df = make_unary_sparse(kwargs, df)
result = call_f_single(function, nparams, df, **kwargs)
results.append(result)
keys = self.keys()
elif not self.stranded:
keys = []
for c, df in self.items():
kwargs["chromosome"] = c
df = make_unary_sparse(kwargs, df)
result = call_f_single(function, nparams, df, **kwargs)
results.append(result)
keys.append(c)
else:
keys = []
for c in self.chromosomes:
kwargs["chromosome"] = c
dfs = self[c]
if len(dfs.keys()) == 2:
df, df2 = dfs.values()
# merge strands
df = _merge_dfs.remote(df, df2)
else:
df = dfs.values()[0]
df = make_unary_sparse(kwargs, df)
result = call_f_single(function, nparams, df, **kwargs)
results.append(result)
keys.append(c)
results = get(results)
if nb_cpu > 1:
ray.shutdown()
results = process_results(results, keys)
return results
def _lengths(df):
lengths = df.End - df.Start
return lengths
def _tss(df, **kwargs):
df = df.copy(deep=True)
slack = kwargs.get("slack", 0)
tss_pos = df.loc[df.Strand == "+"]
tss_neg = df.loc[df.Strand == "-"]
# pd.options.mode.chained_assignment = None
tss_neg.loc[:, "Start"] = tss_neg.End
# pd.options.mode.chained_assignment = "warn"
tss = pd.concat([tss_pos, tss_neg], sort=False)
tss["End"] = tss.Start
tss.End = tss.End + 1 + slack
tss.Start = tss.Start - slack
tss.loc[tss.Start < 0, "Start"] = 0
return tss.reindex(df.index)
def _tes(df, **kwargs):
df = df.copy()
if df.Strand.iloc[0] == "+":
df.loc[:, "Start"] = df.End
else:
df.loc[:, "End"] = df.Start
df.loc[:, "Start"] = df.End
df.loc[:, "End"] = df.End + 1
df.loc[:, "Start"] = df.Start
df.loc[df.Start < 0, "Start"] = 0
return df.reindex(df.index)
def _slack(df, **kwargs):
df = df.copy()
dtype = df.Start.dtype
slack = kwargs["slack"]
assert isinstance(
slack,
(int, dict)), "Slack parameter must be integer or dict, is {}".format(
type(slack))
if isinstance(slack, int):
df.loc[:, "Start"] = df.Start - slack
df.loc[df.Start < 0, "Start"] = 0
df.End = df.End + slack
else:
strand = df.Strand.iloc[0]
slack_dict = slack
five_end_slack = slack.get("5")
three_end_slack = slack.get("3")
if five_end_slack:
df.loc[df.Strand == "+", "Start"] -= five_end_slack
df.loc[df.Strand == "-", "End"] += five_end_slack
if three_end_slack:
df.loc[df.Strand == "-", "Start"] -= three_end_slack
df.loc[df.Strand == "+", "End"] += three_end_slack
df = df.astype({"Start": dtype, "End": dtype})
assert (df.Start < df.End).all(), "Some intervals are negative or zero length after applying slack!"
return df
def pyrange_apply_chunks(function, self, as_pyranges, **kwargs):
nparams = get_n_args(function)
nb_cpu = kwargs.get("nb_cpu", 1)
if nb_cpu > 1:
import ray
with suppress_stdout_stderr():
ray.init(num_cpus=nb_cpu, ignore_reinit_error=True)
function, get, _merge_dfs = get_multithreaded_funcs(
function, nb_cpu=nb_cpu)
keys = []
lengths = []
results = []
for k, v in self.items():
dfs = np.array_split(v, nb_cpu)
lengths.append(len(dfs))
results.extend(
[call_f_single(function, nparams, df, **kwargs) for df in dfs])
keys.append(k)
results = get(results)
_results = []
start = 0
for _, length in zip(keys, lengths):
end = start + length
_r = results[start:end]
if as_pyranges:
_results.append(pd.concat(_r))
else:
_results.append(_r)
start = end
results = _results
if nb_cpu > 1:
ray.shutdown()
results = process_results(results, keys)
return results
| {
"pile_set_name": "Github"
} |
{
"copyright": "Joey Blake, http://codenimbus.com",
"url": "http://codenimbus.com",
"theme": "double-windsor"
}
| {
"pile_set_name": "Github"
} |
/**
* Copyright (c) 2015-present, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree. An additional grant
* of patent rights can be found in the PATENTS file in the same directory.
*/
#import <UIKit/UIKit.h>
#import <XCTest/XCTest.h>
#import "RCTLog.h"
#import "RCTRootView.h"
#define TIMEOUT_SECONDS 600
#define TEXT_TO_LOOK_FOR @"Welcome to React Native!"
@interface LagouAppTests : XCTestCase
@end
@implementation LagouAppTests
- (BOOL)findSubviewInView:(UIView *)view matching:(BOOL(^)(UIView *view))test
{
if (test(view)) {
return YES;
}
for (UIView *subview in [view subviews]) {
if ([self findSubviewInView:subview matching:test]) {
return YES;
}
}
return NO;
}
- (void)testRendersWelcomeScreen
{
UIViewController *vc = [[[[UIApplication sharedApplication] delegate] window] rootViewController];
NSDate *date = [NSDate dateWithTimeIntervalSinceNow:TIMEOUT_SECONDS];
BOOL foundElement = NO;
__block NSString *redboxError = nil;
RCTSetLogFunction(^(RCTLogLevel level, RCTLogSource source, NSString *fileName, NSNumber *lineNumber, NSString *message) {
if (level >= RCTLogLevelError) {
redboxError = message;
}
});
while ([date timeIntervalSinceNow] > 0 && !foundElement && !redboxError) {
[[NSRunLoop mainRunLoop] runMode:NSDefaultRunLoopMode beforeDate:[NSDate dateWithTimeIntervalSinceNow:0.1]];
[[NSRunLoop mainRunLoop] runMode:NSRunLoopCommonModes beforeDate:[NSDate dateWithTimeIntervalSinceNow:0.1]];
foundElement = [self findSubviewInView:vc.view matching:^BOOL(UIView *view) {
if ([view.accessibilityLabel isEqualToString:TEXT_TO_LOOK_FOR]) {
return YES;
}
return NO;
}];
}
RCTSetLogFunction(RCTDefaultLogFunction);
XCTAssertNil(redboxError, @"RedBox error: %@", redboxError);
XCTAssertTrue(foundElement, @"Couldn't find element with text '%@' in %d seconds", TEXT_TO_LOOK_FOR, TIMEOUT_SECONDS);
}
@end
| {
"pile_set_name": "Github"
} |
package de.k3b.android.androFotoFinder.queries;
import android.content.AsyncTaskLoader;
import android.content.Context;
import android.database.Cursor;
import android.os.Build;
import android.os.CancellationSignal;
import android.os.OperationCanceledException;
import android.util.Log;
import java.io.FileDescriptor;
import java.io.PrintWriter;
import de.k3b.android.androFotoFinder.Global;
import de.k3b.database.QueryParameter;
/*
* Copyright (C) 2010 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Copied from android.content.CursorLoaderWithException
*/
public class CursorLoaderWithException extends AsyncTaskLoader<Cursor> {
final ForceLoadContentObserver mObserver;
private final QueryParameter query;
Cursor mCursor;
CancellationSignal mCancellationSignal;
private Exception mException;
public CursorLoaderWithException(Context context, QueryParameter query) {
super(context);
this.query = query;
mObserver = new ForceLoadContentObserver();
}
/* Runs on a worker thread */
@Override
public Cursor loadInBackground() {
mException = null;
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN) {
synchronized (this) {
if (isLoadInBackgroundCanceled()) {
throw new OperationCanceledException();
}
mCancellationSignal = new CancellationSignal();
}
}
try {
Cursor cursor;
cursor = FotoSql.getMediaDBApi().createCursorForQuery(null, "loader", this.query, null, mCancellationSignal);
if (cursor != null) {
try {
// Ensure the cursor window is filled.
cursor.getCount();
cursor.registerContentObserver(mObserver);
} catch (RuntimeException ex) {
cursor.close();
throw ex;
}
}
return cursor;
} catch (Exception ex) {
final String msg = "FotoSql.createCursorLoader()#loadInBackground failed:\n\t" + query.toSqlString();
Log.e(Global.LOG_CONTEXT, msg, ex);
mException = ex;
return null;
} finally {
synchronized (this) {
mCancellationSignal = null;
}
}
}
@Override
public void cancelLoadInBackground() {
super.cancelLoadInBackground();
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN) {
synchronized (this) {
if (mCancellationSignal != null) {
mCancellationSignal.cancel();
}
}
}
}
/* Runs on the UI thread */
@Override
public void deliverResult(Cursor cursor) {
if (isReset()) {
// An async query came in while the loader is stopped
if (cursor != null) {
cursor.close();
}
return;
}
Cursor oldCursor = mCursor;
mCursor = cursor;
if (isStarted()) {
super.deliverResult(cursor);
}
if (oldCursor != null && oldCursor != cursor && !oldCursor.isClosed()) {
oldCursor.close();
}
}
/**
* Starts an asynchronous load of the contacts list data. When the result is ready the callbacks
* will be called on the UI thread. If a previous load has been completed and is still valid
* the result may be passed to the callbacks immediately.
* <p>
* Must be called from the UI thread
*/
@Override
protected void onStartLoading() {
if (mCursor != null) {
deliverResult(mCursor);
}
if (takeContentChanged() || mCursor == null) {
forceLoad();
}
}
/**
* Must be called from the UI thread
*/
@Override
protected void onStopLoading() {
// Attempt to cancel the current load task if possible.
cancelLoad();
}
@Override
public void onCanceled(Cursor cursor) {
if (cursor != null && !cursor.isClosed()) {
cursor.close();
}
}
@Override
protected void onReset() {
super.onReset();
// Ensure the loader is stopped
onStopLoading();
if (mCursor != null && !mCursor.isClosed()) {
mCursor.close();
}
mCursor = null;
}
public QueryParameter getQuery() {
return query;
}
public Exception getException() {
return mException;
}
@Override
public void dump(String prefix, FileDescriptor fd, PrintWriter writer, String[] args) {
super.dump(prefix, fd, writer, args);
writer.print(prefix);
writer.print("query=");
writer.println(this.query.toSqlString());
writer.print(prefix);
writer.print("mCursor=");
writer.println(mCursor);
}
}
| {
"pile_set_name": "Github"
} |
spv.memoryQualifier.frag
// Module Version 10000
// Generated by (magic number): 80007
// Id's are bound by 97
Capability Shader
Capability ImageRect
Capability Image1D
1: ExtInstImport "GLSL.std.450"
MemoryModel Logical GLSL450
EntryPoint Fragment 4 "main"
ExecutionMode 4 OriginUpperLeft
Source GLSL 450
Name 4 "main"
Name 9 "texel"
Name 12 "i1D"
Name 19 "i2D"
Name 28 "i2DRect"
Name 35 "i3D"
Name 44 "iCube"
Name 49 "Data"
MemberName 49(Data) 0 "f1"
MemberName 49(Data) 1 "f2"
Name 50 "Buffer"
MemberName 50(Buffer) 0 "f1"
MemberName 50(Buffer) 1 "f2"
MemberName 50(Buffer) 2 "f3"
MemberName 50(Buffer) 3 "f4"
MemberName 50(Buffer) 4 "i1"
MemberName 50(Buffer) 5 "data"
Name 52 ""
Decorate 12(i1D) DescriptorSet 0
Decorate 12(i1D) Binding 0
Decorate 12(i1D) Coherent
Decorate 19(i2D) DescriptorSet 0
Decorate 19(i2D) Binding 1
Decorate 19(i2D) Volatile
Decorate 19(i2D) Coherent
Decorate 28(i2DRect) DescriptorSet 0
Decorate 28(i2DRect) Binding 2
Decorate 28(i2DRect) Restrict
Decorate 35(i3D) DescriptorSet 0
Decorate 35(i3D) Binding 3
Decorate 35(i3D) NonWritable
Decorate 44(iCube) DescriptorSet 0
Decorate 44(iCube) Binding 3
Decorate 44(iCube) NonReadable
MemberDecorate 49(Data) 0 Offset 0
MemberDecorate 49(Data) 1 Offset 8
MemberDecorate 50(Buffer) 0 Coherent
MemberDecorate 50(Buffer) 0 Volatile
MemberDecorate 50(Buffer) 0 Coherent
MemberDecorate 50(Buffer) 0 Offset 0
MemberDecorate 50(Buffer) 1 Coherent
MemberDecorate 50(Buffer) 1 Restrict
MemberDecorate 50(Buffer) 1 Offset 8
MemberDecorate 50(Buffer) 2 Coherent
MemberDecorate 50(Buffer) 2 NonWritable
MemberDecorate 50(Buffer) 2 Offset 16
MemberDecorate 50(Buffer) 3 Coherent
MemberDecorate 50(Buffer) 3 NonReadable
MemberDecorate 50(Buffer) 3 Offset 32
MemberDecorate 50(Buffer) 4 Coherent
MemberDecorate 50(Buffer) 4 Offset 48
MemberDecorate 50(Buffer) 5 Coherent
MemberDecorate 50(Buffer) 5 Offset 56
Decorate 50(Buffer) BufferBlock
Decorate 52 DescriptorSet 0
2: TypeVoid
3: TypeFunction 2
6: TypeFloat 32
7: TypeVector 6(float) 4
8: TypePointer Function 7(fvec4)
10: TypeImage 6(float) 1D nonsampled format:R32f
11: TypePointer UniformConstant 10
12(i1D): 11(ptr) Variable UniformConstant
14: TypeInt 32 1
15: 14(int) Constant 1
17: TypeImage 6(float) 2D nonsampled format:R32f
18: TypePointer UniformConstant 17
19(i2D): 18(ptr) Variable UniformConstant
21: TypeVector 14(int) 2
22: 21(ivec2) ConstantComposite 15 15
26: TypeImage 6(float) Rect nonsampled format:R32f
27: TypePointer UniformConstant 26
28(i2DRect): 27(ptr) Variable UniformConstant
33: TypeImage 6(float) 3D nonsampled format:R32f
34: TypePointer UniformConstant 33
35(i3D): 34(ptr) Variable UniformConstant
37: TypeVector 14(int) 3
38: 37(ivec3) ConstantComposite 15 15 15
42: TypeImage 6(float) Cube nonsampled format:R32f
43: TypePointer UniformConstant 42
44(iCube): 43(ptr) Variable UniformConstant
47: TypeVector 6(float) 2
48: TypeVector 6(float) 3
49(Data): TypeStruct 6(float) 47(fvec2)
50(Buffer): TypeStruct 6(float) 47(fvec2) 48(fvec3) 7(fvec4) 14(int) 49(Data)
51: TypePointer Uniform 50(Buffer)
52: 51(ptr) Variable Uniform
53: 14(int) Constant 4
54: TypePointer Uniform 14(int)
57: 14(int) Constant 0
58: TypePointer Uniform 6(float)
61: TypePointer Function 6(float)
63: TypePointer Uniform 47(fvec2)
71: 14(int) Constant 2
72: TypePointer Uniform 48(fvec3)
80: 14(int) Constant 5
83: TypeInt 32 0
84: 83(int) Constant 1
88: 83(int) Constant 3
93: 14(int) Constant 3
95: TypePointer Uniform 7(fvec4)
4(main): 2 Function None 3
5: Label
9(texel): 8(ptr) Variable Function
13: 10 Load 12(i1D)
16: 7(fvec4) ImageRead 13 15
Store 9(texel) 16
20: 17 Load 19(i2D)
23: 7(fvec4) ImageRead 20 22
24: 7(fvec4) Load 9(texel)
25: 7(fvec4) FAdd 24 23
Store 9(texel) 25
29: 26 Load 28(i2DRect)
30: 7(fvec4) ImageRead 29 22
31: 7(fvec4) Load 9(texel)
32: 7(fvec4) FAdd 31 30
Store 9(texel) 32
36: 33 Load 35(i3D)
39: 7(fvec4) ImageRead 36 38
40: 7(fvec4) Load 9(texel)
41: 7(fvec4) FAdd 40 39
Store 9(texel) 41
45: 42 Load 44(iCube)
46: 7(fvec4) Load 9(texel)
ImageWrite 45 38 46
55: 54(ptr) AccessChain 52 53
56: 14(int) Load 55
59: 58(ptr) AccessChain 52 57
60: 6(float) Load 59
62: 61(ptr) AccessChain 9(texel) 56
Store 62 60
64: 63(ptr) AccessChain 52 15
65: 47(fvec2) Load 64
66: 7(fvec4) Load 9(texel)
67: 47(fvec2) VectorShuffle 66 66 0 1
68: 47(fvec2) FAdd 67 65
69: 7(fvec4) Load 9(texel)
70: 7(fvec4) VectorShuffle 69 68 4 5 2 3
Store 9(texel) 70
73: 72(ptr) AccessChain 52 71
74: 48(fvec3) Load 73
75: 7(fvec4) Load 9(texel)
76: 48(fvec3) VectorShuffle 75 75 0 1 2
77: 48(fvec3) FSub 76 74
78: 7(fvec4) Load 9(texel)
79: 7(fvec4) VectorShuffle 78 77 4 5 6 3
Store 9(texel) 79
81: 58(ptr) AccessChain 52 80 57
82: 6(float) Load 81
85: 58(ptr) AccessChain 52 80 15 84
86: 6(float) Load 85
87: 6(float) FAdd 82 86
89: 61(ptr) AccessChain 9(texel) 88
90: 6(float) Load 89
91: 6(float) FAdd 90 87
92: 61(ptr) AccessChain 9(texel) 88
Store 92 91
94: 7(fvec4) Load 9(texel)
96: 95(ptr) AccessChain 52 93
Store 96 94
Return
FunctionEnd
| {
"pile_set_name": "Github"
} |
// Copyright 2020 Workiva Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
export 'src/over_react_redux/over_react_flux.dart';
export 'src/over_react_redux/redux_multi_provider.dart';
export 'src/over_react_redux/over_react_redux.dart' show ReduxProvider;
| {
"pile_set_name": "Github"
} |
:orphan:
.. _guide-cli-addsite:
The addsite command
===================
Adding a site to Zotonic is done through the :ref:`zotonic <ref-cli>` shell command. It syntax is like this::
zotonic addsite [options] <site_name>
This command creates a new site with [site_name] as the site's
name. This new site will be based on a so-called `skeleton
site`. Currently there are four skeletons: 'blog', 'basesite', 'empty'
and 'nodb'. 'blog' is the default.
The addsite command is highly configurable and takes the following options:
-s <skel> Skeleton site (one of 'blog', 'basesite', 'empty', 'nodb'; default: blog)
-H <host> Site's hostname (default: <site_name>.test)
-L Create the site in the current directory and symlink it into the zotonic user directory
-g <remote> Create a git repository in the site and push it to the given remote
-h <host> Database host (default: localhost)
-p <port> Database port (default: 5432)
-u <user> Database user (default: zotonic)
-P <pass> Database password (default: zotonic)
-d <name> Database name (default: zotonic)
-n <schema> Database schema (default: public)
-a <pass> Admin password (default: admin)
Adding a site
-------------
When adding a site, the site will be created in the Zotonic user
directory. When ``-L`` is used, the site will be created in the
current directory (from which the ``addsite`` command is ran), and a
symlink into the Zotonic user directory will be made.
Before adding the site, the command will print out an
overview of what it will do before continuing.
For instance, consider the following addsite command::
zotonic addsite -s blog myfirstblog
Will print out the following:
.. code-block:: none
************
Warning!
************
Site: 'myfirstblog.test' cannot be reached.
Command 'host myfirstblog.test' must resolve to an IP address,
otherwise you won't be able to reach it after installing the site.
You can fix that by adding the following line to /etc/hosts:
127.0.0.1 myfirstblog.test
==== Add site ====
Site name: myfirstblog
Site URL: http://myfirstblog.test:8000/
Skeleton site: blog
Site directory: /home/user/zotonic/user/sites/myfirstblog
Admin password: admin
Database host: 127.0.0.1
Database port: 5432
Database user: zotonic
Database password: zotonic
Database name: zotonic
Database schema: public
>>> Hit return to proceed...
First, it will warn you that the hostname that this site (initially)
will have, is not yet resolvable. Add the hostname to your local hosts
file to be able to see the site when it has finished installing.
After this warning, the `addsite` command will print out an overview
of what it will do. It will show the site name, the URL the site will
be reachable on, in which directory the site will be installed, et
cetera. An overview of the database credentials and the admin password
will also be printed. After hitting return, the site will be created
and built for the first time. This will take a few moments, after
which you will be able to visit the site's URL in your browser.
Default values to `zotonic addsite`
-----------------------------------
The "addsite" subcommand checks a file called
``$HOME/.zotonic-defaults`` for the default values to these
options. This file is a file in bash-syntax which can define the
following variables: ``SKEL``, ``DBHOST``, ``DBPORT``, ``DBUSER``,
``DBPASSWORD``, ``DBDATABASE``, ``DBSCHEMA``, ``ADMINPASSWORD``,
``SITEHOSTNAME``, ``DO_LINK``, ``TARGETDIR``.
For instance, if you want all new Zotonic sites to be created in
/var/www and have hostnames like `www.mysite.intra`,
`www.anothersite.intra`, add the following to your
``$HOME/.zotonic-defaults`` file::
export SITEHOSTNAME="www.%%SITE%%.intra"
export TARGETDIR=/var/www
Available skeleton sites
------------------------
Zotonic comes with four different skeletons to base your site on.
``blog``
As a full example of a Zotonic website, it installs a front page
with a listing of recent articles. As default example data, three
example articles and a couple of images are also installed.
``basesite``
A skeleton site which lets you build a site on top of
:ref:`mod_base_site`. Its site directory is pretty empty, as
`mod_base_site` itself implements most of the frontend templates
that are needed. This skeleton does install a custom homepage
template as ``home.tpl`` and dispatch rule to serve it. It also adds
a `site.css` file for tweaking fonts, colors, et cetera.
``empty``
An empty skeleton. No templates or dispatch rules whatsoever are
created. You can use this skeleton to create a new site based on
your own base templates, a custom CSS framework, etc.
``nodb``
Like the `empty` template, but this skeleton does not require a
database connection. As such, the admin and content management
interface is disabled, as those modules all require a database
connection.
| {
"pile_set_name": "Github"
} |
'use strict';
var ansiEscapes = require('ansi-escapes');
/**
* Move cursor left by `x`
* @param {Readline} rl - Readline instance
* @param {Number} x - How far to go left (default to 1)
*/
exports.left = function(rl, x) {
rl.output.write(ansiEscapes.cursorBackward(x));
};
/**
* Move cursor right by `x`
* @param {Readline} rl - Readline instance
* @param {Number} x - How far to go left (default to 1)
*/
exports.right = function(rl, x) {
rl.output.write(ansiEscapes.cursorForward(x));
};
/**
* Move cursor up by `x`
* @param {Readline} rl - Readline instance
* @param {Number} x - How far to go up (default to 1)
*/
exports.up = function (rl, x) {
rl.output.write(ansiEscapes.cursorUp(x));
};
/**
* Move cursor down by `x`
* @param {Readline} rl - Readline instance
* @param {Number} x - How far to go down (default to 1)
*/
exports.down = function (rl, x) {
rl.output.write(ansiEscapes.cursorDown(x));
};
/**
* Clear current line
* @param {Readline} rl - Readline instance
* @param {Number} len - number of line to delete
*/
exports.clearLine = function (rl, len) {
rl.output.write(ansiEscapes.eraseLines(len));
};
| {
"pile_set_name": "Github"
} |
<div class="form-login">
<h2><?= t('Password Reset') ?></h2>
<form method="post" action="<?= $this->url->href('Auth/PasswordResetController', 'update', ['token' => $token]) ?>">
<?= $this->form->csrf() ?>
<?= $this->form->label(t('New password'), 'password') ?>
<?= $this->form->password('password', $values, $errors) ?>
<?= $this->form->label(t('Confirmation'), 'confirmation') ?>
<?= $this->form->password('confirmation', $values, $errors) ?>
<div class="form-actions">
<button type="submit" class="btn btn-success"><?= t('Change Password') ?></button>
</div>
</form>
</div>
| {
"pile_set_name": "Github"
} |
| +2&#ffffff0|+| |[|N|o| |N|a|m|e|]| | +8#0000001#e0e0e08|[|N|o| |N|a|m|e|]| | +1#0000000#ffffff0@49|X+8#0000001#e0e0e08
>1+0#0000000#ffffff0| @73
|2| @8|h+0fd7ff255|e|l@1|o| |t|h|e|r|e| @8|r+0&#afffff255| |o|n|e| @8| +0&#ffffff0@30
|3| @22|a+0&#afffff255|n|o|t|h|e|r| |t|w|o| @8| +0&#ffffff0@30
|4| @22|a+0&#afffff255|n|o|t|h|e|r| |t|h|r|e@1| @6| +0&#ffffff0@30
|5| @73
|6| @73
|7| @73
|8| @73
@57|1|,|1| @10|T|o|p|
| {
"pile_set_name": "Github"
} |
cheats = 1
cheat0_desc = "Infinite Lives"
cheat0_code = "Z 8 27767 126 0"
cheat0_enable = false | {
"pile_set_name": "Github"
} |
{
"description": "Represents a projected volume source",
"required": [
"sources"
],
"properties": {
"defaultMode": {
"description": "Mode bits to use on created files by default. Must be a value between 0 and 0777. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.",
"type": "integer",
"format": "int32"
},
"sources": {
"description": "list of volume projections",
"type": [
"array",
"null"
],
"items": {
"$ref": "_definitions.json#/definitions/io.k8s.api.core.v1.VolumeProjection"
}
}
},
"$schema": "http://json-schema.org/schema#",
"type": "object"
} | {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="utf-8" ?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<meta name="generator" content="Docutils 0.4: http://docutils.sourceforge.net/" />
<title>Introduction</title>
<link rel="stylesheet" href="html4css1.css" type="text/css" />
</head>
<body>
<div class="header">
<a class="reference" href="index.html">Prev</a> <a class="reference" href="index.html">Up</a> <a class="reference" href="processing-ref.html">Next</a>
<hr class="header"/>
</div>
<div class="document" id="introduction">
<h1 class="title">Introduction</h1>
<div class="section">
<h1><a id="threads-processes-and-the-gil" name="threads-processes-and-the-gil">Threads, processes and the GIL</a></h1>
<p>To run more than one piece of code at the same time on the same
computer one has the choice of either using multiple processes or
multiple threads.</p>
<p>Although a program can be made up of multiple processes, these
processes are in effect completely independent of one another:
different processes are not able to cooperate with one another unless
one sets up some means of communication between them (such as by using
sockets). If a lot of data must be transferred between processes then
this can be inefficient.</p>
<p>On the other hand, multiple threads within a single process are
intimately connected: they share their data but often can interfere
badly with one another. It is often argued that the only way to make
multithreaded programming "easy" is to avoid relying on any shared
state and for the threads to only communicate by passing messages to
each other.</p>
<p>CPython has a <em>Global Interpreter Lock</em> (GIL) which in many ways makes
threading easier than it is in most languages by making sure that only
one thread can manipulate the interpreter's objects at a time. As a
result, it is often safe to let multiple threads access data without
using any additional locking as one would need to in a language such
as C.</p>
<p>One downside of the GIL is that on multi-processor (or multi-core)
systems a multithreaded Python program can only make use of one
processor at a time. This is a problem that can be overcome by using
multiple processes instead.</p>
<p>Python gives little direct support for writing programs using multiple
process. This package allows one to write multi-process programs
using much the same API that one uses for writing threaded programs.</p>
</div>
<div class="section">
<h1><a id="forking-and-spawning" name="forking-and-spawning">Forking and spawning</a></h1>
<p>There are two ways of creating a new process in Python:</p>
<ul>
<li><p class="first">The current process can <em>fork</em> a new child process by using the
<tt class="docutils literal"><span class="pre">os.fork()</span></tt> function. This effectively creates an identical copy
of the current process which is now able to go off and perform some
task set by the parent process. This means that the child process
inherits <em>copies</em> of all variables that the parent process had.</p>
<p>However, <tt class="docutils literal"><span class="pre">os.fork()</span></tt> is not available on every platform: in
particular Windows does not support it.</p>
</li>
<li><p class="first">Alternatively, the current process can spawn a completely new Python
interpreter by using the <tt class="docutils literal"><span class="pre">subprocess</span></tt> module or one of the
<tt class="docutils literal"><span class="pre">os.spawn*()</span></tt> functions.</p>
<p>Getting this new interpreter in to a fit state to perform the task
set for it by its parent process is, however, a bit of a challenge.</p>
</li>
</ul>
<p>The <tt class="docutils literal"><span class="pre">processing</span></tt> package uses <tt class="docutils literal"><span class="pre">os.fork()</span></tt> if it is available since
it makes life a lot simpler. Forking the process is also more
efficient in terms of memory usage and the time needed to create the
new process.</p>
</div>
<div class="section">
<h1><a id="the-process-class" name="the-process-class">The Process class</a></h1>
<p>In the <tt class="docutils literal"><span class="pre">processing</span></tt> package processes are spawned by creating a
<tt class="docutils literal"><span class="pre">Process</span></tt> object and then calling its <tt class="docutils literal"><span class="pre">start()</span></tt> method.
<tt class="docutils literal"><span class="pre">processing.Process</span></tt> follows the API of <tt class="docutils literal"><span class="pre">threading.Thread</span></tt>. A
trivial example of a multiprocess program is</p>
<pre class="literal-block">
from processing import Process
def f(name):
print 'hello', name
if __name__ == '__main__':
p = Process(target=f, args=('bob',))
p.start()
p.join()
</pre>
<p>Here the function <tt class="docutils literal"><span class="pre">f</span></tt> is run in a child process.</p>
<p>For an explanation of why (on Windows) the <tt class="docutils literal"><span class="pre">if</span> <span class="pre">__name__</span> <span class="pre">==</span> <span class="pre">'__main__'</span></tt>
part is necessary see <a class="reference" href="programming-guidelines.html">Programming guidelines</a>.</p>
</div>
<div class="section">
<h1><a id="exchanging-objects-between-processes" name="exchanging-objects-between-processes">Exchanging objects between processes</a></h1>
<p><tt class="docutils literal"><span class="pre">processing</span></tt> supports two types of communication channel between
processes:</p>
<dl class="docutils">
<dt><strong>Queues</strong>:</dt>
<dd><p class="first">The function <tt class="docutils literal"><span class="pre">Queue()</span></tt> returns a near clone of <tt class="docutils literal"><span class="pre">Queue.Queue</span></tt>
-- see the Python standard documentation. For example</p>
<pre class="literal-block">
from processing import Process, Queue
def f(q):
q.put([42, None, 'hello'])
if __name__ == '__main__':
q = Queue()
p = Process(target=f, args=(q,))
p.start()
print q.get() # prints "[42, None, 'hello']"
p.join()
</pre>
<p class="last">Queues are thread and process safe. See <a class="reference" href="processing-ref.html#pipes-and-queues">Queues</a>.</p>
</dd>
<dt><strong>Pipes</strong>:</dt>
<dd><p class="first">The <tt class="docutils literal"><span class="pre">Pipe()</span></tt> function returns a pair of connection objects
connected by a pipe which by default is duplex (two-way). For
example</p>
<pre class="literal-block">
from processing import Process, Pipe
def f(conn):
conn.send([42, None, 'hello'])
conn.close()
if __name__ == '__main__':
parent_conn, child_conn = Pipe()
p = Process(target=f, args=(child_conn,))
p.start()
print parent_conn.recv() # prints "[42, None, 'hello']"
p.join()
</pre>
<p class="last">The two connection objects returned by <tt class="docutils literal"><span class="pre">Pipe()</span></tt> represent the two
ends of the pipe. Each connection object has <tt class="docutils literal"><span class="pre">send()</span></tt> and
<tt class="docutils literal"><span class="pre">recv()</span></tt> methods (among others). Note that data in a pipe may
become corrupted if two processes (or threads) try to read from or
write to the <em>same</em> end of the pipe at the same time. Of course
there is no risk of corruption from processes using different ends
of the pipe at the same time. See <a class="reference" href="processing-ref.html#pipes-and-queues">Pipes</a>.</p>
</dd>
</dl>
</div>
<div class="section">
<h1><a id="synchronization-between-processes" name="synchronization-between-processes">Synchronization between processes</a></h1>
<p><tt class="docutils literal"><span class="pre">processing</span></tt> contains equivalents of all the synchronization
primitives from <tt class="docutils literal"><span class="pre">threading</span></tt>. For instance one can use a lock to
ensure that only one process prints to standard output at a time:</p>
<pre class="literal-block">
from processing import Process, Lock
def f(l, i):
l.acquire()
print 'hello world', i
l.release()
if __name__ == '__main__':
lock = Lock()
for num in range(10):
Process(target=f, args=(lock, num)).start()
</pre>
<p>Without using the lock output from the different processes is liable
to get all mixed up.</p>
</div>
<div class="section">
<h1><a id="sharing-state-between-processes" name="sharing-state-between-processes">Sharing state between processes</a></h1>
<p>As mentioned above, when doing concurrent programming it is usually
best to avoid using shared state as far as possible. This is
particularly true when using multiple processes.</p>
<p>However, if you really do need to use some shared data then
<tt class="docutils literal"><span class="pre">processing</span></tt> provides a couple of ways of doing so.</p>
<dl class="docutils">
<dt><strong>Shared memory</strong>:</dt>
<dd><p class="first">Data can be stored in a shared memory map using <tt class="docutils literal"><span class="pre">Value</span></tt> or <tt class="docutils literal"><span class="pre">Array</span></tt>.
For example the following code</p>
<pre class="literal-block">
from processing import Process, Value, Array
def f(n, a):
n.value = 3.1415927
for i in range(len(a)):
a[i] = -a[i]
if __name__ == '__main__':
num = Value('d', 0.0)
arr = Array('i', range(10))
p = Process(target=f, args=(num, arr))
p.start()
p.join()
print num.value
print arr[:]
</pre>
<p>will print</p>
<pre class="literal-block">
3.1415927
[0, -1, -2, -3, -4, -5, -6, -7, -8, -9]
</pre>
<p>The <tt class="docutils literal"><span class="pre">'d'</span></tt> and <tt class="docutils literal"><span class="pre">'i'</span></tt> arguments used when creating <tt class="docutils literal"><span class="pre">num</span></tt> and <tt class="docutils literal"><span class="pre">arr</span></tt>
are typecodes of the kind used by the <tt class="docutils literal"><span class="pre">array</span></tt> module: <tt class="docutils literal"><span class="pre">'d'</span></tt>
indicates a double precision float and <tt class="docutils literal"><span class="pre">'i'</span></tt> inidicates a signed
integer. These shared objects will be process and thread safe.</p>
<p class="last">For more flexibility in using shared memory one can use the
<tt class="docutils literal"><span class="pre">processing.sharedctypes</span></tt> module which supports the creation of
arbitrary <a class="reference" href="sharedctypes.html">ctypes objects allocated from shared memory</a>.</p>
</dd>
<dt><strong>Server process</strong>:</dt>
<dd><p class="first">A manager object returned by <tt class="docutils literal"><span class="pre">Manager()</span></tt> controls a server process
which holds python objects and allows other processes to manipulate
them using proxies.</p>
<p>A manager returned by <tt class="docutils literal"><span class="pre">Manager()</span></tt> will support types <tt class="docutils literal"><span class="pre">list</span></tt>,
<tt class="docutils literal"><span class="pre">dict</span></tt>, <tt class="docutils literal"><span class="pre">Namespace</span></tt>, <tt class="docutils literal"><span class="pre">Lock</span></tt>, <tt class="docutils literal"><span class="pre">RLock</span></tt>, <tt class="docutils literal"><span class="pre">Semaphore</span></tt>,
<tt class="docutils literal"><span class="pre">BoundedSemaphore</span></tt>, <tt class="docutils literal"><span class="pre">Condition</span></tt>, <tt class="docutils literal"><span class="pre">Event</span></tt>, <tt class="docutils literal"><span class="pre">Queue</span></tt>, <tt class="docutils literal"><span class="pre">Value</span></tt>
and <tt class="docutils literal"><span class="pre">Array</span></tt>. For example:</p>
<pre class="literal-block">
from processing import Process, Manager
def f(d, l):
d[1] = '1'
d['2'] = 2
d[0.25] = None
l.reverse()
if __name__ == '__main__':
manager = Manager()
d = manager.dict()
l = manager.list(range(10))
p = Process(target=f, args=(d, l))
p.start()
p.join()
print d
print l
</pre>
<p>will print</p>
<pre class="literal-block">
{0.25: None, 1: '1', '2': 2}
[9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
</pre>
<p>Creating managers which support other types is not hard --- see
<a class="reference" href="manager-objects.html#customized-managers">Customized managers</a>.</p>
<p class="last">Server process managers are more flexible than using shared memory
objects because they can be made to support arbitrary object types.
Also, a single manager can be shared by processes on different
computers over a network. They are, however, slower than using
shared memory. See <a class="reference" href="manager-objects.html#server-process-managers">Server process managers</a>.</p>
</dd>
</dl>
</div>
<div class="section">
<h1><a id="using-a-pool-of-workers" name="using-a-pool-of-workers">Using a pool of workers</a></h1>
<p>The <tt class="docutils literal"><span class="pre">Pool()</span></tt> function returns an object representing a pool of worker
processes. It has methods which allows tasks to be offloaded to the
worker processes in a few different ways.</p>
<p>For example:</p>
<pre class="literal-block">
from processing import Pool
def f(x):
return x*x
if __name__ == '__main__':
pool = Pool(processes=4) # start 4 worker processes
result = pool.applyAsync(f, [10]) # evaluate "f(10)" asynchronously
print result.get(timeout=1) # prints "100" unless your computer is *very* slow
print pool.map(f, range(10)) # prints "[0, 1, 4,..., 81]"
</pre>
<p>See <a class="reference" href="pool-objects.html">Process pools</a>.</p>
</div>
<div class="section">
<h1><a id="speed" name="speed">Speed</a></h1>
<p>The following benchmarks were performed on a single core Pentium 4,
2.5Ghz laptop running Windows XP and Ubuntu Linux 6.10 --- see
<a class="reference" href="../examples/benchmarks.py">benchmarks.py</a>.</p>
<p><em>Number of 256 byte string objects passed between processes/threads per sec</em>:</p>
<table border="1" class="docutils">
<colgroup>
<col width="55%" />
<col width="16%" />
<col width="29%" />
</colgroup>
<thead valign="bottom">
<tr><th class="head">Connection type</th>
<th class="head">Windows</th>
<th class="head">Linux</th>
</tr>
</thead>
<tbody valign="top">
<tr><td>Queue.Queue</td>
<td>49,000</td>
<td>17,000-50,000 <a class="footnote-reference" href="#id2" id="id1" name="id1">[1]</a></td>
</tr>
<tr><td>processing.Queue</td>
<td>22,000</td>
<td>21,000</td>
</tr>
<tr><td>Queue managed by server</td>
<td>6,900</td>
<td>6,500</td>
</tr>
<tr><td>processing.Pipe</td>
<td>52,000</td>
<td>57,000</td>
</tr>
</tbody>
</table>
<table class="docutils footnote" frame="void" id="id2" rules="none">
<colgroup><col class="label" /><col /></colgroup>
<tbody valign="top">
<tr><td class="label"><a class="fn-backref" href="#id1" name="id2">[1]</a></td><td>For some reason the performance of <tt class="docutils literal"><span class="pre">Queue.Queue</span></tt> is very
variable on Linux.</td></tr>
</tbody>
</table>
<p><em>Number of acquires/releases of a lock per sec</em>:</p>
<table border="1" class="docutils">
<colgroup>
<col width="60%" />
<col width="20%" />
<col width="20%" />
</colgroup>
<thead valign="bottom">
<tr><th class="head">Lock type</th>
<th class="head">Windows</th>
<th class="head">Linux</th>
</tr>
</thead>
<tbody valign="top">
<tr><td>threading.Lock</td>
<td>850,000</td>
<td>560,000</td>
</tr>
<tr><td>processing.Lock</td>
<td>420,000</td>
<td>510,000</td>
</tr>
<tr><td>Lock managed by server</td>
<td>10,000</td>
<td>8,400</td>
</tr>
<tr><td>threading.RLock</td>
<td>93,000</td>
<td>76,000</td>
</tr>
<tr><td>processing.RLock</td>
<td>420,000</td>
<td>500,000</td>
</tr>
<tr><td>RLock managed by server</td>
<td>8,800</td>
<td>7,400</td>
</tr>
</tbody>
</table>
<p><em>Number of interleaved waits/notifies per sec on a
condition variable by two processes</em>:</p>
<table border="1" class="docutils">
<colgroup>
<col width="60%" />
<col width="20%" />
<col width="20%" />
</colgroup>
<thead valign="bottom">
<tr><th class="head">Condition type</th>
<th class="head">Windows</th>
<th class="head">Linux</th>
</tr>
</thead>
<tbody valign="top">
<tr><td>threading.Condition</td>
<td>27,000</td>
<td>31,000</td>
</tr>
<tr><td>processing.Condition</td>
<td>26,000</td>
<td>25,000</td>
</tr>
<tr><td>Condition managed by server</td>
<td>6,600</td>
<td>6,000</td>
</tr>
</tbody>
</table>
<p><em>Number of integers retrieved from a sequence per sec</em>:</p>
<table border="1" class="docutils">
<colgroup>
<col width="60%" />
<col width="20%" />
<col width="20%" />
</colgroup>
<thead valign="bottom">
<tr><th class="head">Sequence type</th>
<th class="head">Windows</th>
<th class="head">Linux</th>
</tr>
</thead>
<tbody valign="top">
<tr><td>list</td>
<td>6,400,000</td>
<td>5,100,000</td>
</tr>
<tr><td>unsynchornized shared array</td>
<td>3,900,000</td>
<td>3,100,000</td>
</tr>
<tr><td>synchronized shared array</td>
<td>200,000</td>
<td>220,000</td>
</tr>
<tr><td>list managed by server</td>
<td>20,000</td>
<td>17,000</td>
</tr>
</tbody>
</table>
</div>
</div>
<div class="footer">
<hr class="footer" />
<a class="reference" href="index.html">Prev</a> <a class="reference" href="index.html">Up</a> <a class="reference" href="processing-ref.html">Next</a>
</div>
</body>
</html>
| {
"pile_set_name": "Github"
} |
/*
* File: StringFunctions.h
* Author: Dorian Galvez-Lopez
* Date: December 2010
* Description: string functions
*
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
*/
#ifndef __D_STRING__
#define __D_STRING__
#include <string>
#include <vector>
#include <string>
#include <sstream>
namespace DUtils {
/// Functions to manipulate strings
class StringFunctions
{
public:
/**
* Splits the given string into single tokens
* @param s string
* @param tokens returned tokens (no empty tokens are returned)
* @param delims delimitation characters
* @param max_splits maximum number of splits. If -1, all the possible splits
* are done. Otherwise, those delimiters found after the max number of
* splits has been done are ignored
*/
static void split(const std::string &s, std::vector<std::string> &tokens,
const std::string &delims = " \t\n", int max_splits = -1);
/**
* Removes blank spaces, tabs and new lines from the beginning and the
* end of the string
* @param s
*/
static void trim(std::string &s);
/**
* Removes from a string all the chars after finding the first char given
* (this included)
* @param s string to modify
* @param c character to find and to start the removal from
* @param escape if given, the char c found in the string is ignored if it
* is the same as escape. This string must include the character c
* @example removeFrom(s, '#', "\#")
*/
static void removeFrom(std::string &s, const char c,
const std::string &escape = "");
/**
* Replaces each occurrence of one of several strings in s by another string
* @param s the original string whose substring will be replaced
* @param map a vector of pairs of string where the first one is the substring
* to search for, and the second one, the replacing text
* @note the entries are searched for as they appear in the map vector. An
* entry can replace the text already put by previous entries
*/
static void replace(std::string &s,
const std::vector<std::pair<std::string, std::string> > &map);
/**
* The same as above, but only with one "pair"
* @param s
* @param search
* @param rep
*/
static void replace(std::string &s, const std::string &search,
const std::string &rep);
/**
* Converts a piece of data into a string
* @param data
*/
template<class T>
static std::string toString(const T& data);
/**
* Returns a data type from a string representation
* @param s
*/
template<class T>
static T fromString(const std::string &s);
};
// --------------------------------------------------------------------------
template<class T>
std::string StringFunctions::toString(const T& data)
{
std::stringstream ss;
ss << data;
return ss.str();
}
// --------------------------------------------------------------------------
template<>
inline std::string StringFunctions::fromString(const std::string &s)
{
return s;
}
template<class T>
T StringFunctions::fromString(const std::string &s)
{
if(s.empty())
return T();
else
{
std::stringstream ss(s);
T ret;
ss >> ret;
return ret;
}
}
// --------------------------------------------------------------------------
}
#endif
| {
"pile_set_name": "Github"
} |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import * as React from "react";
import { useMemo } from "react";
import {
PropsComponentProps,
useComponents
} from "../../../../docz-lib/docz/dist";
import styled from "styled-components";
import { get } from "../../../utils/theme";
const breakpoint = "600px";
const Container = styled.div`
border: 1px solid ${get("colors.border")};
border-radius: 4px;
overflow: hidden;
background: ${get("colors.propsBg")};
color: ${get("colors.propsText")};
`;
const Line = styled.div`
padding: 8px 0;
@media (min-width: ${breakpoint}) {
padding: O;
}
& + & {
border-top: 1px solid ${get("colors.border")};
}
`;
const Column = styled.div`
min-width: 0;
padding: 2px 15px;
word-wrap: break-word;
@media (min-width: ${breakpoint}) {
padding: 8px 15px;
}
`;
const ColumnName = styled(Column)`
@media (min-width: ${breakpoint}) {
flex-basis: 25%;
}
`;
const ColumnType = styled(Column)`
@media (min-width: ${breakpoint}) {
flex-basis: 50%;
}
`;
const ColumnValue = styled(Column)`
@media (min-width: ${breakpoint}) {
flex-basis: 25%;
}
`;
const Content = styled.div`
display: flex;
flex-direction: column;
font-family: ${get("fonts.mono")};
@media (min-width: ${breakpoint}) {
flex-wrap: nowrap;
flex-direction: row;
}
`;
const PropName = styled.span`
color: ${get("colors.primary")};
font-weight: bold;
`;
const PropType = styled.span`
font-size: 0.9em;
`;
const PropDefaultValue = styled.span`
font-size: 0.9em;
`;
const PropRequired = styled.span`
font-size: 0.8em;
opacity: 0.8;
`;
export const PropsTable = ({ props, getPropType }) => {
const entries = Object.entries(props);
const components = useComponents();
const Paragraph = useMemo(
() => styled(components.P || "p")`
margin: 0;
font-size: 16px;
color: ${get("colors.blockquoteColor")};
padding: 0 15px 8px 15px;
`,
[]
);
return (
<Container>
{entries.map(([key, prop]) => {
if (!prop.type && !prop.flowType) return null;
return (
<Line key={key}>
<Content>
<ColumnName>
<PropName>{key}</PropName>
</ColumnName>
<ColumnType>
<PropType>{getPropType(prop)}</PropType>
</ColumnType>
<ColumnValue>
{prop.defaultValue && (
<PropDefaultValue>
{prop.defaultValue.value === "''" ? (
<em>= [Empty String]</em>
) : (
<em>= {prop.defaultValue.value.replace(/\'/g, '"')}</em>
)}
</PropDefaultValue>
)}
{prop.required && (
<PropRequired>
<strong>required</strong>
</PropRequired>
)}
</ColumnValue>
</Content>
{prop.description && <Paragraph>{prop.description}</Paragraph>}
</Line>
);
})}
</Container>
);
}; | {
"pile_set_name": "Github"
} |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.tools.offlineImageViewer;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.text.StringEscapeUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.XAttr;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
import org.apache.hadoop.hdfs.server.namenode.FSImageFormatPBINode;
import org.apache.hadoop.hdfs.server.namenode.FSImageFormatProtobuf;
import org.apache.hadoop.hdfs.server.namenode.FSImageFormatProtobuf.SectionName;
import org.apache.hadoop.hdfs.server.namenode.FSImageUtil;
import org.apache.hadoop.hdfs.server.namenode.FsImageProto;
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary;
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection;
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode;
import org.apache.hadoop.hdfs.server.namenode.INodeId;
import org.apache.hadoop.hdfs.server.namenode.SerialNumberManager;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.util.LimitInputStream;
import org.apache.hadoop.util.Time;
import org.fusesource.leveldbjni.JniDBFactory;
import org.iq80.leveldb.DB;
import org.iq80.leveldb.Options;
import org.iq80.leveldb.WriteBatch;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.BufferedInputStream;
import java.io.Closeable;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.PrintStream;
import java.io.RandomAccessFile;
import java.io.UnsupportedEncodingException;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.HashMap;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
/**
* This class reads the protobuf-based fsimage and generates text output
* for each inode to {@link PBImageTextWriter#out}. The sub-class can override
* {@link getEntry()} to generate formatted string for each inode.
*
* Since protobuf-based fsimage does not guarantee the order of inodes and
* directories, PBImageTextWriter runs two-phase scans:
*
* <ol>
* <li>The first phase, PBImageTextWriter scans the INode sections to reads the
* filename of each directory. It also scans the INode_Dir sections to loads
* the relationships between a directory and its children. It uses these metadata
* to build FS namespace and stored in {@link MetadataMap}</li>
* <li>The second phase, PBImageTextWriter re-scans the INode sections. For each
* inode, it looks up the path of the parent directory in the {@link MetadataMap},
* and generate output.</li>
* </ol>
*
* Two various of {@link MetadataMap} are provided. {@link InMemoryMetadataDB}
* stores all metadata in memory (O(n) memory) while
* {@link LevelDBMetadataMap} stores metadata in LevelDB on disk (O(1) memory).
* User can choose between them based on the time/space tradeoffs.
*/
abstract class PBImageTextWriter implements Closeable {
private static final Logger LOG =
LoggerFactory.getLogger(PBImageTextWriter.class);
static final String DEFAULT_DELIMITER = "\t";
static final String CRLF = StringUtils.CR + StringUtils.LF;
/**
* This metadata map is used to construct the namespace before generating
* text outputs.
*
* It contains two mapping relationships:
* <p>
* <li>It maps each inode (inode Id) to its parent directory (inode Id).</li>
* <li>It maps each directory from its inode Id.</li>
* </p>
*/
private static interface MetadataMap extends Closeable {
/**
* Associate an inode with its parent directory.
*/
public void putDirChild(long parentId, long childId) throws IOException;
/**
* Associate a directory with its inode Id.
*/
public void putDir(INode dir) throws IOException;
/** Get the full path of the parent directory for the given inode. */
public String getParentPath(long inode) throws IOException;
/** Synchronize metadata to persistent storage, if possible */
public void sync() throws IOException;
/** Returns the name of inode. */
String getName(long id) throws IOException;
/**
* Returns the id of the parent's inode, if mentioned in
* INodeDirectorySection, throws IgnoreSnapshotException otherwise.
*/
long getParentId(long id) throws IOException;
}
/**
* Maintain all the metadata in memory.
*/
private static class InMemoryMetadataDB implements MetadataMap {
/**
* Represent a directory in memory.
*/
private static class Dir {
private final long inode;
private Dir parent = null;
private String name;
private String path = null; // cached full path of the directory.
Dir(long inode, String name) {
this.inode = inode;
this.name = name;
}
private void setParent(Dir parent) {
Preconditions.checkState(this.parent == null);
this.parent = parent;
}
/**
* Returns the full path of this directory.
*/
String getPath() throws IgnoreSnapshotException {
if (this.parent == null) {
if (this.inode == INodeId.ROOT_INODE_ID) {
return "/";
} else {
if (LOG.isDebugEnabled()) {
LOG.debug("Not root inode with id {} having no parent.", inode);
}
throw PBImageTextWriter.createIgnoredSnapshotException(inode);
}
}
if (this.path == null) {
this.path = new Path(parent.getPath(), name.isEmpty() ? "/" : name).
toString();
}
return this.path;
}
String getName() throws IgnoreSnapshotException {
return name;
}
long getId() {
return inode;
}
@Override
public boolean equals(Object o) {
return o instanceof Dir && inode == ((Dir) o).inode;
}
@Override
public int hashCode() {
return Long.valueOf(inode).hashCode();
}
}
/**
* If the Dir entry does not exist (i.e. the inode was not contained in
* INodeSection) we still create a Dir entry which throws exceptions
* for calls other than getId().
* We can make sure this way, the getId and getParentId calls will
* always succeed if we have the information.
*/
private static class CorruptedDir extends Dir {
CorruptedDir(long inode) {
super(inode, null);
}
@Override
String getPath() throws IgnoreSnapshotException {
throw PBImageTextWriter.createIgnoredSnapshotException(getId());
}
@Override
String getName() throws IgnoreSnapshotException {
throw PBImageTextWriter.createIgnoredSnapshotException(getId());
}
}
/** INode Id to Dir object mapping */
private Map<Long, Dir> dirMap = new HashMap<>();
/** Children to parent directory INode ID mapping. */
private Map<Long, Dir> dirChildMap = new HashMap<>();
InMemoryMetadataDB() {
}
@Override
public void close() throws IOException {
}
private Dir getOrCreateCorrupted(long id) {
Dir dir = dirMap.get(id);
if (dir == null) {
dir = new CorruptedDir(id);
dirMap.put(id, dir);
}
return dir;
}
@Override
public void putDirChild(long parentId, long childId) {
Dir parent = getOrCreateCorrupted(parentId);
Dir child = getOrCreateCorrupted(childId);
child.setParent(parent);
Preconditions.checkState(!dirChildMap.containsKey(childId));
dirChildMap.put(childId, parent);
}
@Override
public void putDir(INode p) {
Preconditions.checkState(!dirMap.containsKey(p.getId()));
Dir dir = new Dir(p.getId(), p.getName().toStringUtf8());
dirMap.put(p.getId(), dir);
}
@Override
public String getParentPath(long inode) throws IOException {
if (inode == INodeId.ROOT_INODE_ID) {
return "/";
}
Dir parent = dirChildMap.get(inode);
if (parent == null) {
// The inode is an INodeReference, which is generated from snapshot.
// For delimited oiv tool, no need to print out metadata in snapshots.
throw PBImageTextWriter.createIgnoredSnapshotException(inode);
}
return parent.getPath();
}
@Override
public void sync() {
}
@Override
public String getName(long id) throws IgnoreSnapshotException {
Dir dir = dirMap.get(id);
if (dir != null) {
return dir.getName();
}
throw PBImageTextWriter.createIgnoredSnapshotException(id);
}
@Override
public long getParentId(long id) throws IgnoreSnapshotException {
Dir parentDir = dirChildMap.get(id);
if (parentDir != null) {
return parentDir.getId();
}
throw PBImageTextWriter.createIgnoredSnapshotException(id);
}
}
/**
* A MetadataMap that stores metadata in LevelDB.
*/
private static class LevelDBMetadataMap implements MetadataMap {
/**
* Store metadata in LevelDB.
*/
private static class LevelDBStore implements Closeable {
private DB db = null;
private WriteBatch batch = null;
private int writeCount = 0;
private static final int BATCH_SIZE = 1024;
LevelDBStore(final File dbPath) throws IOException {
Options options = new Options();
options.createIfMissing(true);
options.errorIfExists(true);
db = JniDBFactory.factory.open(dbPath, options);
batch = db.createWriteBatch();
}
@Override
public void close() throws IOException {
if (batch != null) {
IOUtils.cleanup(null, batch);
batch = null;
}
IOUtils.cleanup(null, db);
db = null;
}
public void put(byte[] key, byte[] value) throws IOException {
batch.put(key, value);
writeCount++;
if (writeCount >= BATCH_SIZE) {
sync();
}
}
public byte[] get(byte[] key) throws IOException {
return db.get(key);
}
public void sync() throws IOException {
try {
db.write(batch);
} finally {
batch.close();
batch = null;
}
batch = db.createWriteBatch();
writeCount = 0;
}
}
/**
* A LRU cache for directory path strings.
*
* The key of this LRU cache is the inode of a directory.
*/
private static class DirPathCache extends LinkedHashMap<Long, String> {
private final static int CAPACITY = 16 * 1024;
DirPathCache() {
super(CAPACITY);
}
@Override
protected boolean removeEldestEntry(Map.Entry<Long, String> entry) {
return super.size() > CAPACITY;
}
}
/** Map the child inode to the parent directory inode. */
private LevelDBStore dirChildMap = null;
/** Directory entry map */
private LevelDBStore dirMap = null;
private DirPathCache dirPathCache = new DirPathCache();
LevelDBMetadataMap(String baseDir) throws IOException {
File dbDir = new File(baseDir);
if (dbDir.exists()) {
throw new IOException("Folder " + dbDir + " already exists! Delete " +
"manually or provide another (not existing) directory!");
}
if (!dbDir.mkdirs()) {
throw new IOException("Failed to mkdir on " + dbDir);
}
try {
dirChildMap = new LevelDBStore(new File(dbDir, "dirChildMap"));
dirMap = new LevelDBStore(new File(dbDir, "dirMap"));
} catch (IOException e) {
LOG.error("Failed to open LevelDBs", e);
IOUtils.cleanup(null, this);
}
}
@Override
public void close() throws IOException {
IOUtils.cleanup(null, dirChildMap, dirMap);
dirChildMap = null;
dirMap = null;
}
private static byte[] toBytes(long value) {
return ByteBuffer.allocate(8).putLong(value).array();
}
private static byte[] toBytes(String value)
throws UnsupportedEncodingException {
return value.getBytes("UTF-8");
}
private static long toLong(byte[] bytes) {
Preconditions.checkArgument(bytes.length == 8);
return ByteBuffer.wrap(bytes).getLong();
}
private static String toString(byte[] bytes) throws IOException {
try {
return new String(bytes, "UTF-8");
} catch (UnsupportedEncodingException e) {
throw new IOException(e);
}
}
@Override
public void putDirChild(long parentId, long childId) throws IOException {
dirChildMap.put(toBytes(childId), toBytes(parentId));
}
@Override
public void putDir(INode dir) throws IOException {
Preconditions.checkArgument(dir.hasDirectory(),
"INode %s (%s) is not a directory.", dir.getId(), dir.getName());
dirMap.put(toBytes(dir.getId()), toBytes(dir.getName().toStringUtf8()));
}
private long getFromDirChildMap(long inode) throws IOException {
byte[] bytes = dirChildMap.get(toBytes(inode));
if (bytes == null) {
// The inode is an INodeReference, which is generated from snapshot.
// For delimited oiv tool, no need to print out metadata in snapshots.
throw PBImageTextWriter.createIgnoredSnapshotException(inode);
}
if (bytes.length != 8) {
throw new IOException(
"bytes array length error. Actual length is " + bytes.length);
}
return toLong(bytes);
}
@Override
public String getParentPath(long inode) throws IOException {
if (inode == INodeId.ROOT_INODE_ID) {
return "/";
}
long parent = getFromDirChildMap(inode);
if (!dirPathCache.containsKey(parent)) {
byte[] bytes = dirMap.get(toBytes(parent));
if (parent != INodeId.ROOT_INODE_ID && bytes == null) {
// The parent is an INodeReference, which is generated from snapshot.
// For delimited oiv tool, no need to print out metadata in snapshots.
throw PBImageTextWriter.createIgnoredSnapshotException(inode);
}
String parentName = toString(bytes);
String parentPath =
new Path(getParentPath(parent),
parentName.isEmpty() ? "/" : parentName).toString();
dirPathCache.put(parent, parentPath);
}
return dirPathCache.get(parent);
}
@Override
public void sync() throws IOException {
dirChildMap.sync();
dirMap.sync();
}
@Override
public String getName(long id) throws IOException {
byte[] bytes = dirMap.get(toBytes(id));
if (bytes != null) {
return toString(bytes);
}
throw PBImageTextWriter.createIgnoredSnapshotException(id);
}
@Override
public long getParentId(long id) throws IOException {
return getFromDirChildMap(id);
}
}
private SerialNumberManager.StringTable stringTable;
private PrintStream out;
private MetadataMap metadataMap = null;
private String delimiter;
/**
* Construct a PB FsImage writer to generate text file.
* @param out the writer to output text information of fsimage.
* @param tempPath the path to store metadata. If it is empty, store metadata
* in memory instead.
*/
PBImageTextWriter(PrintStream out, String delimiter, String tempPath)
throws IOException {
this.out = out;
this.delimiter = delimiter;
if (tempPath.isEmpty()) {
metadataMap = new InMemoryMetadataDB();
} else {
metadataMap = new LevelDBMetadataMap(tempPath);
}
}
@Override
public void close() throws IOException {
out.flush();
IOUtils.cleanup(null, metadataMap);
}
void append(StringBuffer buffer, int field) {
buffer.append(delimiter);
buffer.append(field);
}
void append(StringBuffer buffer, long field) {
buffer.append(delimiter);
buffer.append(field);
}
void append(StringBuffer buffer, String field) {
buffer.append(delimiter);
String escapedField = StringEscapeUtils.escapeCsv(field);
if (escapedField.contains(CRLF)) {
escapedField = escapedField.replace(CRLF, "%x0D%x0A");
} else if (escapedField.contains(StringUtils.LF)) {
escapedField = escapedField.replace(StringUtils.LF, "%x0A");
}
buffer.append(escapedField);
}
/**
* Get text output for the given inode.
* @param parent the path of parent directory
* @param inode the INode object to output.
*/
abstract protected String getEntry(String parent, INode inode);
/**
* Get text output for the header line.
*/
abstract protected String getHeader();
/**
* Method called at the end of output() phase after all the inodes
* with known parentPath has been printed out. Can be used to print
* additional data depending on the written inodes.
*/
abstract protected void afterOutput() throws IOException;
public void visit(RandomAccessFile file) throws IOException {
Configuration conf = new Configuration();
if (!FSImageUtil.checkFileFormat(file)) {
throw new IOException("Unrecognized FSImage");
}
FileSummary summary = FSImageUtil.loadSummary(file);
try (FileInputStream fin = new FileInputStream(file.getFD())) {
InputStream is;
ArrayList<FileSummary.Section> sections =
Lists.newArrayList(summary.getSectionsList());
Collections.sort(sections,
new Comparator<FileSummary.Section>() {
@Override
public int compare(FsImageProto.FileSummary.Section s1,
FsImageProto.FileSummary.Section s2) {
FSImageFormatProtobuf.SectionName n1 =
FSImageFormatProtobuf.SectionName.fromString(s1.getName());
FSImageFormatProtobuf.SectionName n2 =
FSImageFormatProtobuf.SectionName.fromString(s2.getName());
if (n1 == null) {
return n2 == null ? 0 : -1;
} else if (n2 == null) {
return -1;
} else {
return n1.ordinal() - n2.ordinal();
}
}
});
ImmutableList<Long> refIdList = null;
for (FileSummary.Section section : sections) {
fin.getChannel().position(section.getOffset());
is = FSImageUtil.wrapInputStreamForCompression(conf,
summary.getCodec(), new BufferedInputStream(new LimitInputStream(
fin, section.getLength())));
SectionName sectionName = SectionName.fromString(section.getName());
if (sectionName == null) {
throw new IOException("Unrecognized section " + section.getName());
}
switch (sectionName) {
case STRING_TABLE:
LOG.info("Loading string table");
stringTable = FSImageLoader.loadStringTable(is);
break;
case INODE_REFERENCE:
// Load INodeReference so that all INodes can be processed.
// Snapshots are not handled and will just be ignored for now.
LOG.info("Loading inode references");
refIdList = FSImageLoader.loadINodeReferenceSection(is);
break;
default:
break;
}
}
loadDirectories(fin, sections, summary, conf);
loadINodeDirSection(fin, sections, summary, conf, refIdList);
metadataMap.sync();
output(conf, summary, fin, sections);
}
}
void putDirChildToMetadataMap(long parentId, long childId)
throws IOException {
metadataMap.putDirChild(parentId, childId);
}
String getNodeName(long id) throws IOException {
return metadataMap.getName(id);
}
long getParentId(long id) throws IOException {
return metadataMap.getParentId(id);
}
private void output(Configuration conf, FileSummary summary,
FileInputStream fin, ArrayList<FileSummary.Section> sections)
throws IOException {
InputStream is;
long startTime = Time.monotonicNow();
out.println(getHeader());
for (FileSummary.Section section : sections) {
if (SectionName.fromString(section.getName()) == SectionName.INODE) {
fin.getChannel().position(section.getOffset());
is = FSImageUtil.wrapInputStreamForCompression(conf,
summary.getCodec(), new BufferedInputStream(new LimitInputStream(
fin, section.getLength())));
outputINodes(is);
}
}
afterOutput();
long timeTaken = Time.monotonicNow() - startTime;
LOG.debug("Time to output inodes: {}ms", timeTaken);
}
protected PermissionStatus getPermission(long perm) {
return FSImageFormatPBINode.Loader.loadPermission(perm, stringTable);
}
/** Load the directories in the INode section. */
private void loadDirectories(
FileInputStream fin, List<FileSummary.Section> sections,
FileSummary summary, Configuration conf)
throws IOException {
LOG.info("Loading directories");
long startTime = Time.monotonicNow();
for (FileSummary.Section section : sections) {
if (SectionName.fromString(section.getName())
== SectionName.INODE) {
fin.getChannel().position(section.getOffset());
InputStream is = FSImageUtil.wrapInputStreamForCompression(conf,
summary.getCodec(), new BufferedInputStream(new LimitInputStream(
fin, section.getLength())));
loadDirectoriesInINodeSection(is);
}
}
long timeTaken = Time.monotonicNow() - startTime;
LOG.info("Finished loading directories in {}ms", timeTaken);
}
private void loadINodeDirSection(
FileInputStream fin, List<FileSummary.Section> sections,
FileSummary summary, Configuration conf, List<Long> refIdList)
throws IOException {
LOG.info("Loading INode directory section.");
long startTime = Time.monotonicNow();
for (FileSummary.Section section : sections) {
if (SectionName.fromString(section.getName())
== SectionName.INODE_DIR) {
fin.getChannel().position(section.getOffset());
InputStream is = FSImageUtil.wrapInputStreamForCompression(conf,
summary.getCodec(), new BufferedInputStream(
new LimitInputStream(fin, section.getLength())));
buildNamespace(is, refIdList);
}
}
long timeTaken = Time.monotonicNow() - startTime;
LOG.info("Finished loading INode directory section in {}ms", timeTaken);
}
/**
* Checks the inode (saves if directory), and counts them. Can be overridden
* if additional steps are taken when iterating through INodeSection.
*/
protected void checkNode(INode p, AtomicInteger numDirs) throws IOException {
if (p.hasDirectory()) {
metadataMap.putDir(p);
numDirs.incrementAndGet();
}
}
/**
* Load the filenames of the directories from the INode section.
*/
private void loadDirectoriesInINodeSection(InputStream in)
throws IOException {
INodeSection s = INodeSection.parseDelimitedFrom(in);
LOG.info("Loading directories in INode section.");
AtomicInteger numDirs = new AtomicInteger(0);
for (int i = 0; i < s.getNumInodes(); ++i) {
INode p = INode.parseDelimitedFrom(in);
if (LOG.isDebugEnabled() && i % 10000 == 0) {
LOG.debug("Scanned {} inodes.", i);
}
checkNode(p, numDirs);
}
LOG.info("Found {} directories in INode section.", numDirs);
}
/**
* Scan the INodeDirectory section to construct the namespace.
*/
protected void buildNamespace(InputStream in, List<Long> refIdList)
throws IOException {
int count = 0;
while (true) {
FsImageProto.INodeDirectorySection.DirEntry e =
FsImageProto.INodeDirectorySection.DirEntry.parseDelimitedFrom(in);
if (e == null) {
break;
}
count++;
if (LOG.isDebugEnabled() && count % 10000 == 0) {
LOG.debug("Scanned {} directories.", count);
}
long parentId = e.getParent();
for (int i = 0; i < e.getChildrenCount(); i++) {
long childId = e.getChildren(i);
metadataMap.putDirChild(parentId, childId);
}
for (int i = e.getChildrenCount();
i < e.getChildrenCount() + e.getRefChildrenCount(); i++) {
int refId = e.getRefChildren(i - e.getChildrenCount());
metadataMap.putDirChild(parentId, refIdList.get(refId));
}
}
LOG.info("Scanned {} INode directories to build namespace.", count);
}
void printIfNotEmpty(String line) {
if (!line.isEmpty()) {
out.println(line);
}
}
private void outputINodes(InputStream in) throws IOException {
INodeSection s = INodeSection.parseDelimitedFrom(in);
LOG.info("Found {} INodes in the INode section", s.getNumInodes());
long ignored = 0;
long ignoredSnapshots = 0;
for (int i = 0; i < s.getNumInodes(); ++i) {
INode p = INode.parseDelimitedFrom(in);
try {
String parentPath = metadataMap.getParentPath(p.getId());
printIfNotEmpty(getEntry(parentPath, p));
} catch (IOException ioe) {
ignored++;
if (!(ioe instanceof IgnoreSnapshotException)) {
LOG.warn("Exception caught, ignoring node:{}", p.getId(), ioe);
} else {
ignoredSnapshots++;
if (LOG.isDebugEnabled()) {
LOG.debug("Exception caught, ignoring node:{}.", p.getId(), ioe);
}
}
}
if (LOG.isDebugEnabled() && i % 100000 == 0) {
LOG.debug("Outputted {} INodes.", i);
}
}
if (ignored > 0) {
LOG.warn("Ignored {} nodes, including {} in snapshots. Please turn on"
+ " debug log for details", ignored, ignoredSnapshots);
}
LOG.info("Outputted {} INodes.", s.getNumInodes());
}
private static IgnoreSnapshotException createIgnoredSnapshotException(
long inode) {
// Ignore snapshots - we want the output similar to -ls -R.
if (LOG.isDebugEnabled()) {
LOG.debug("No snapshot name found for inode {}", inode);
}
return new IgnoreSnapshotException();
}
public int getStoragePolicy(
INodeSection.XAttrFeatureProto xattrFeatureProto) {
List<XAttr> xattrs =
FSImageFormatPBINode.Loader.loadXAttrs(xattrFeatureProto, stringTable);
for (XAttr xattr : xattrs) {
if (BlockStoragePolicySuite.isStoragePolicyXAttr(xattr)) {
return xattr.getValue()[0];
}
}
return HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
}
}
| {
"pile_set_name": "Github"
} |
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// This file was autogenerated by go-to-protobuf. Do not edit it manually!
syntax = 'proto2';
package k8s.io.api.authorization.v1;
import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
import "k8s.io/apimachinery/pkg/runtime/generated.proto";
import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
// Package-wide variables from generator "generated".
option go_package = "v1";
// ExtraValue masks the value so protobuf can generate
// +protobuf.nullable=true
// +protobuf.options.(gogoproto.goproto_stringer)=false
message ExtraValue {
// items, if empty, will result in an empty slice
repeated string items = 1;
}
// LocalSubjectAccessReview checks whether or not a user or group can perform an action in a given namespace.
// Having a namespace scoped resource makes it much easier to grant namespace scoped policy that includes permissions
// checking.
message LocalSubjectAccessReview {
// +optional
optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
// Spec holds information about the request being evaluated. spec.namespace must be equal to the namespace
// you made the request against. If empty, it is defaulted.
optional SubjectAccessReviewSpec spec = 2;
// Status is filled in by the server and indicates whether the request is allowed or not
// +optional
optional SubjectAccessReviewStatus status = 3;
}
// NonResourceAttributes includes the authorization attributes available for non-resource requests to the Authorizer interface
message NonResourceAttributes {
// Path is the URL path of the request
// +optional
optional string path = 1;
// Verb is the standard HTTP verb
// +optional
optional string verb = 2;
}
// NonResourceRule holds information that describes a rule for the non-resource
message NonResourceRule {
// Verb is a list of kubernetes non-resource API verbs, like: get, post, put, delete, patch, head, options. "*" means all.
repeated string verbs = 1;
// NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full,
// final step in the path. "*" means all.
// +optional
repeated string nonResourceURLs = 2;
}
// ResourceAttributes includes the authorization attributes available for resource requests to the Authorizer interface
message ResourceAttributes {
// Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces
// "" (empty) is defaulted for LocalSubjectAccessReviews
// "" (empty) is empty for cluster-scoped resources
// "" (empty) means "all" for namespace scoped resources from a SubjectAccessReview or SelfSubjectAccessReview
// +optional
optional string namespace = 1;
// Verb is a kubernetes resource API verb, like: get, list, watch, create, update, delete, proxy. "*" means all.
// +optional
optional string verb = 2;
// Group is the API Group of the Resource. "*" means all.
// +optional
optional string group = 3;
// Version is the API Version of the Resource. "*" means all.
// +optional
optional string version = 4;
// Resource is one of the existing resource types. "*" means all.
// +optional
optional string resource = 5;
// Subresource is one of the existing resource types. "" means none.
// +optional
optional string subresource = 6;
// Name is the name of the resource being requested for a "get" or deleted for a "delete". "" (empty) means all.
// +optional
optional string name = 7;
}
// ResourceRule is the list of actions the subject is allowed to perform on resources. The list ordering isn't significant,
// may contain duplicates, and possibly be incomplete.
message ResourceRule {
// Verb is a list of kubernetes resource API verbs, like: get, list, watch, create, update, delete, proxy. "*" means all.
repeated string verbs = 1;
// APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of
// the enumerated resources in any API group will be allowed. "*" means all.
// +optional
repeated string apiGroups = 2;
// Resources is a list of resources this rule applies to. "*" means all in the specified apiGroups.
// "*/foo" represents the subresource 'foo' for all resources in the specified apiGroups.
// +optional
repeated string resources = 3;
// ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. "*" means all.
// +optional
repeated string resourceNames = 4;
}
// SelfSubjectAccessReview checks whether or the current user can perform an action. Not filling in a
// spec.namespace means "in all namespaces". Self is a special case, because users should always be able
// to check whether they can perform an action
message SelfSubjectAccessReview {
// +optional
optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
// Spec holds information about the request being evaluated. user and groups must be empty
optional SelfSubjectAccessReviewSpec spec = 2;
// Status is filled in by the server and indicates whether the request is allowed or not
// +optional
optional SubjectAccessReviewStatus status = 3;
}
// SelfSubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAuthorizationAttributes
// and NonResourceAuthorizationAttributes must be set
message SelfSubjectAccessReviewSpec {
// ResourceAuthorizationAttributes describes information for a resource access request
// +optional
optional ResourceAttributes resourceAttributes = 1;
// NonResourceAttributes describes information for a non-resource access request
// +optional
optional NonResourceAttributes nonResourceAttributes = 2;
}
// SelfSubjectRulesReview enumerates the set of actions the current user can perform within a namespace.
// The returned list of actions may be incomplete depending on the server's authorization mode,
// and any errors experienced during the evaluation. SelfSubjectRulesReview should be used by UIs to show/hide actions,
// or to quickly let an end user reason about their permissions. It should NOT Be used by external systems to
// drive authorization decisions as this raises confused deputy, cache lifetime/revocation, and correctness concerns.
// SubjectAccessReview, and LocalAccessReview are the correct way to defer authorization decisions to the API server.
message SelfSubjectRulesReview {
// +optional
optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
// Spec holds information about the request being evaluated.
optional SelfSubjectRulesReviewSpec spec = 2;
// Status is filled in by the server and indicates the set of actions a user can perform.
// +optional
optional SubjectRulesReviewStatus status = 3;
}
message SelfSubjectRulesReviewSpec {
// Namespace to evaluate rules for. Required.
optional string namespace = 1;
}
// SubjectAccessReview checks whether or not a user or group can perform an action.
message SubjectAccessReview {
// +optional
optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
// Spec holds information about the request being evaluated
optional SubjectAccessReviewSpec spec = 2;
// Status is filled in by the server and indicates whether the request is allowed or not
// +optional
optional SubjectAccessReviewStatus status = 3;
}
// SubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAuthorizationAttributes
// and NonResourceAuthorizationAttributes must be set
message SubjectAccessReviewSpec {
// ResourceAuthorizationAttributes describes information for a resource access request
// +optional
optional ResourceAttributes resourceAttributes = 1;
// NonResourceAttributes describes information for a non-resource access request
// +optional
optional NonResourceAttributes nonResourceAttributes = 2;
// User is the user you're testing for.
// If you specify "User" but not "Groups", then is it interpreted as "What if User were not a member of any groups
// +optional
optional string user = 3;
// Groups is the groups you're testing for.
// +optional
repeated string groups = 4;
// Extra corresponds to the user.Info.GetExtra() method from the authenticator. Since that is input to the authorizer
// it needs a reflection here.
// +optional
map<string, ExtraValue> extra = 5;
// UID information about the requesting user.
// +optional
optional string uid = 6;
}
// SubjectAccessReviewStatus
message SubjectAccessReviewStatus {
// Allowed is required. True if the action would be allowed, false otherwise.
optional bool allowed = 1;
// Denied is optional. True if the action would be denied, otherwise
// false. If both allowed is false and denied is false, then the
// authorizer has no opinion on whether to authorize the action. Denied
// may not be true if Allowed is true.
// +optional
optional bool denied = 4;
// Reason is optional. It indicates why a request was allowed or denied.
// +optional
optional string reason = 2;
// EvaluationError is an indication that some error occurred during the authorization check.
// It is entirely possible to get an error and be able to continue determine authorization status in spite of it.
// For instance, RBAC can be missing a role, but enough roles are still present and bound to reason about the request.
// +optional
optional string evaluationError = 3;
}
// SubjectRulesReviewStatus contains the result of a rules check. This check can be incomplete depending on
// the set of authorizers the server is configured with and any errors experienced during evaluation.
// Because authorization rules are additive, if a rule appears in a list it's safe to assume the subject has that permission,
// even if that list is incomplete.
message SubjectRulesReviewStatus {
// ResourceRules is the list of actions the subject is allowed to perform on resources.
// The list ordering isn't significant, may contain duplicates, and possibly be incomplete.
repeated ResourceRule resourceRules = 1;
// NonResourceRules is the list of actions the subject is allowed to perform on non-resources.
// The list ordering isn't significant, may contain duplicates, and possibly be incomplete.
repeated NonResourceRule nonResourceRules = 2;
// Incomplete is true when the rules returned by this call are incomplete. This is most commonly
// encountered when an authorizer, such as an external authorizer, doesn't support rules evaluation.
optional bool incomplete = 3;
// EvaluationError can appear in combination with Rules. It indicates an error occurred during
// rule evaluation, such as an authorizer that doesn't support rule evaluation, and that
// ResourceRules and/or NonResourceRules may be incomplete.
// +optional
optional string evaluationError = 4;
}
| {
"pile_set_name": "Github"
} |
#!/usr/bin/perl
# ********************************************************************
# * COPYRIGHT:
# * Copyright (c) 2002-2013, International Business Machines Corporation and
# * others. All Rights Reserved.
# ********************************************************************
require "../perldriver/Common.pl";
use lib '../perldriver';
my $p;
if ($OnWindows) {
$p = "cd ".$ICULatest."/bin && ".$ICUPathLatest . "/collationperf/$WindowsPlatform/Release/collationperf.exe";
}
else {
$p = "LD_LIBRARY_PATH=".$ICULatest."/source/lib:".$ICULatest."/source/tools/ctestfw ".$ICUPathLatest . "/collationperf/collperf";
}
my @locale = (
"en_US",
"da_DK",
"de_DE",
"fr_FR",
"ja_JP",
"ja_JP",
"ja_JP",
"ja_JP",
"zh_CN",
"zh_CN",
"zh_CN",
"zh_TW",
"zh_TW",
"ko_KR",
"ko_KR",
"ru_RU",
"ru_RU",
"th_TH",
"th_TH"
);
my $filePath = $CollationDataPath . "/";
my $filePrefix = "TestNames_";
my @data = (
$filePrefix."Latin.txt",
$filePrefix."Latin.txt",
$filePrefix."Latin.txt",
$filePrefix."Latin.txt",
$filePrefix."Latin.txt",
$filePrefix."Japanese_h.txt",
$filePrefix."Japanese_k.txt",
$filePrefix."Asian.txt",
$filePrefix."Latin.txt",
$filePrefix."Chinese.txt",
$filePrefix."Simplified_Chinese.txt",
$filePrefix."Latin.txt",
$filePrefix."Chinese.txt",
$filePrefix."Latin.txt",
$filePrefix."Korean.txt",
$filePrefix."Latin.txt",
$filePrefix."Russian.txt",
$filePrefix."Latin.txt",
$filePrefix."Thai.txt"
);
my @resultPER;
my @resultFIN;
for ( $n = 0 ; $n < @data ; $n++ ) {
my $resultICU;
my $resultNIX;
$resultICU = @locale[$n].",".@data[$n].",";
$resultNIX = @locale[$n].",".@data[$n].",";
@resultFIN[$n] = @locale[$n].",".@data[$n].",";
#quicksort
my @icu = `$p -locale @locale[$n] -loop 1000 -file $filePath@data[$n] -qsort`;
my @nix = `$p -locale @locale[$n] -unix -loop 1000 -file $filePath@data[$n] -qsort`;
my @icua = split( ' = ', $icu[2] );
my @icub = split( ' ', $icua[1] );
my @nixa = split( ' = ', $nix[2] );
my @nixb = split( ' ', $nixa[1] );
$resultICU = $resultICU.$icub[0].",";
$resultNIX = $resultNIX.$nixb[0].",";
#keygen time
@icu = `$p -locale @locale[$n] -loop 1000 -file $filePath@data[$n] -keygen`;
@nix = `$p -locale @locale[$n] -unix -loop 1000 -file $filePath@data[$n] -keygen`;
@icua = split( ' = ', $icu[2] );
@icub = split( ' ', $icua[1] );
@nixa = split( ' = ', $nix[2] );
@nixb = split( ' ', $nixa[1] );
$resultICU = $resultICU.$icub[0].",";
$resultNIX = $resultNIX.$nixb[0].",";
#keygen len
@icua = split( ' = ', $icu[3] );
@nixa = split( ' = ', $nix[3] );
chomp( @icua[1] );
chomp( @nixa[1] );
$resultICU = $resultICU.$icua[1].",";
$resultNIX = $resultNIX.$nixa[1].",";
my @resultSplitICU;
my @resultSplitNIX;
#percent
for ( $i = 0 ; $i < 3 ; $i++ ) {
my $percent = 0;
@resultSplitICU = split( ',', $resultICU );
@resultSplitNIX = split( ',', $resultNIX );
if ( @resultSplitICU[ 2 + $i ] > 0 ) {
$percent = substr((((
@resultSplitNIX[ 2 + $i ] - @resultSplitICU[ 2 + $i ]) / @resultSplitICU[ 2 + $i ]) * 100),
0, 7);
}
@resultPER[$n] = @resultPER[$n].$percent."%,";
}
#store ICU result
for ( $j = 0 ; $j < 3 ; $j++ ) {
@resultFIN[$n] = @resultFIN[$n].@resultSplitICU[ 2 + $j ].",";
}
#store Unix result
for ( $j = 0 ; $j < 3 ; $j++ ) {
@resultFIN[$n] = @resultFIN[$n].@resultSplitNIX[ 2 + $j ].",";
}
#store Percent result
@resultFIN[$n] = @resultFIN[$n].@resultPER[$n];
}
# Print the results in a HTML page
printOutput();
exit(0);
# This subroutine creates the web page and prints out the results in a table
sub printOutput {
my $title = "Collation: ICU " . $ICULatestVersion . " vs GLIBC";
my $html = localtime;
$html =~ s/://g; # ':' illegal
$html =~ s/\s*\d+$//; # delete year
$html =~ s/^\w+\s*//; # delete dow
$html = "CollationPerformance $html.html";
$html = "../results/" . $html;
$html =~ s/ /_/g;
open( HTML, ">$html" ) or die "Can't write to $html: $!";
print HTML <<EOF;
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<title>Collation: ICU4C vs. glibc</title>
<link rel="stylesheet" href="../icu.css" type="text/css" />
</head>
<body>
<!--#include virtual="../ssi/header.html" -->
EOF
print HTML "<h2>Collation: ICU4C ".$ICULatestVersion." vs. GLIBC</h2>\n";
print HTML <<EOF;
<p>The performance test takes a locale and creates a RuleBasedCollator with
default options. A large list of names is used as data in each test, where the
names vary according to language. Each Collation operation over the whole list
is repeated 1000 times. The percentage values in the final column are the most
useful. They measure differences, where positive is better for ICU4C, and
negative is better for the compared implementation.</p>
<h3>Key</h3>
<table border="1" cellspacing="0" cellpadding="4">
<tr>
<th align="left">Operation</th>
<th align="left">Units</th>
<th align="left">Description</th>
</tr>
<tr>
<td>strcoll</td>
<td>nanosecs</td>
<td>Timing for string collation, an incremental compare of strings.</td>
</tr>
<tr>
<td>keygen</td>
<td>nanosecs</td>
<td>Timing for generation of sort keys, used to 'precompile' information so
that subsequent operations can use binary comparison.</td>
</tr>
<tr>
<td>keylen</td>
<td>bytes/char</td>
<td>The average length of the generated sort keys, in bytes per character
(Unicode/ISO 10646 code point). Generally this is the important field for sort
key performance, since it directly impacts the time necessary for binary
comparison, and the overhead of memory usage and retrieval time for sort
keys.</td>
</tr>
</table>
EOF
printData();
print HTML <<EOF;
<h3><i>Notes</i></h3>
<ol>
<li>As with all performance measurements, the results will vary according to
the hardware and compiler. The strcoll operation is particularly sensitive; we
have found that even slight changes in code alignment can produce 10%
differences.</li>
<li>For more information on incremental vs. sort key comparison, the importance
of multi-level sorting, and other features of collation, see <a href=
"http://www.unicode.org/reports/tr10/">Unicode Collation (UCA)</a>.</li>
<li>For general information on ICU collation see <a href=
"/userguide/Collate_Intro.html">User Guide</a>.</li>
<li>For information on APIs, see <a href="/apiref/icu4c/ucol_8h.html">C</a>,
<a href="/apiref/icu4c/classCollator.html">C++</a>, or <a href=
"/apiref/icu4j/com/ibm/icu/text/Collator.html">Java</a>.</li>
</ol>
<!--#include virtual="../ssi/footer.html" -->
</body>
</html>
EOF
close(HTML) or die "Can't close $html: $!";
}
# This subroutine formats and prints the table.
sub printData() {
print HTML <<EOF;
<h3>Data</h3>
<table border="1" cellspacing="0" cellpadding="4">
<tr>
<td align="left"><b>Locale</b></td>
<td align="left"><b>Data file</b></td>
<td align="left"><b>strcoll</b> <i>(ICU)</i></td>
<td align="left"><b>keygen</b> <i>(ICU)</i></td>
<td align="left"><b>keylen</b> <i>(ICU)</i></td>
<td align="left"><b>strcoll</b> <i>(GLIBC)</i></td>
<td align="left"><b>keygen</b> <i>(GLIBC)</i></td>
<td align="left"><b>keylen</b> <i>(GLIBC)</i></td>
<td align="left"><b>strcoll</b> <i>(GLIBC-ICU)/ICU)</i></td>
<td align="left"><b>keygen</b> <i>(GLIBC-ICU)/ICU)</i></td>
<td align="left"><b>keylen</b> <i>(GLIBC-ICU)/ICU)</i></td>
</tr>
EOF
for ( $n = 0 ; $n < @resultFIN ; $n++ ) {
print HTML "<tr>";
my @parsed = split( ',', @resultFIN[$n] );
for ( $i = 0 ; $i < @parsed ; $i++ ) {
my $value = @parsed[$i];
print HTML "<td align=\"center\">";
if ( $value =~ m/^[-]/ ) {
print HTML "<font color=\"red\">$value</font>";
}
else {
print HTML "$value";
}
print HTML "</td>";
}
print HTML "</tr>\n";
}
print HTML<<EOF;
</table>
EOF
}
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="utf-8"?>
<selector xmlns:android="http://schemas.android.com/apk/res/android">
<item android:state_pressed="true" android:drawable="@drawable/jc_seek_thumb_pressed" />
<item android:drawable="@drawable/jc_seek_thumb_normal" />
</selector>
| {
"pile_set_name": "Github"
} |
julia 0.2 0.6.0-pre
MathProgBase 0.0.0 0.3.0-
ReverseDiffSparse 0.1 0.4
| {
"pile_set_name": "Github"
} |
/*
LZ4 auto-framing library
Header File
Copyright (C) 2011-2014, Yann Collet.
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- LZ4 source repository : http://code.google.com/p/lz4/
- LZ4 public forum : https://groups.google.com/forum/#!forum/lz4c
*/
/* LZ4F is a stand-alone API to create LZ4-compressed frames
* fully conformant to specification v1.4.1.
* All related operations, including memory management, are handled by the library.
* You don't need lz4.h when using lz4frame.h.
* */
#pragma once
#if defined (__cplusplus)
extern "C" {
#endif
/****************************************
Note : experimental API.
Not yet integrated within lz4 library.
****************************************/
/**************************************
Includes
**************************************/
#include <stddef.h> /* size_t */
/**************************************
Error management
**************************************/
typedef size_t LZ4F_errorCode_t;
#define LZ4F_LIST_ERRORS(ITEM) \
ITEM(OK_NoError) ITEM(ERROR_GENERIC) \
ITEM(ERROR_maxBlockSize_invalid) ITEM(ERROR_blockMode_invalid) ITEM(ERROR_contentChecksumFlag_invalid) \
ITEM(ERROR_compressionLevel_invalid) \
ITEM(ERROR_allocation_failed) \
ITEM(ERROR_srcSize_tooLarge) ITEM(ERROR_dstMaxSize_tooSmall) \
ITEM(ERROR_decompressionFailed) \
ITEM(ERROR_checksum_invalid) \
ITEM(ERROR_maxCode)
#define LZ4F_GENERATE_ENUM(ENUM) ENUM,
typedef enum { LZ4F_LIST_ERRORS(LZ4F_GENERATE_ENUM) } LZ4F_errorCodes; /* enum is exposed, to detect & handle specific errors; compare function result to -enum value */
int LZ4F_isError(LZ4F_errorCode_t code); /* Basically : code > -ERROR_maxCode */
const char* LZ4F_getErrorName(LZ4F_errorCode_t code); /* return enum as string */
/**************************************
Framing compression functions
**************************************/
typedef enum { LZ4F_default=0, max64KB=4, max256KB=5, max1MB=6, max4MB=7} blockSizeID_t;
typedef enum { blockLinked=0, blockIndependent} blockMode_t;
typedef enum { noContentChecksum=0, contentChecksumEnabled } contentChecksum_t;
typedef struct {
blockSizeID_t blockSizeID; /* max64KB, max256KB, max1MB, max4MB ; 0 == default */
blockMode_t blockMode; /* blockLinked, blockIndependent ; 0 == default */
contentChecksum_t contentChecksumFlag; /* noContentChecksum, contentChecksumEnabled ; 0 == default */
unsigned reserved[5];
} LZ4F_frameInfo_t;
typedef struct {
LZ4F_frameInfo_t frameInfo;
unsigned compressionLevel; /* Not yet supported : only fast compression for the time being */
unsigned autoFlush; /* 1 == always flush; reduce need for tmp buffer */
unsigned reserved[4];
} LZ4F_preferences_t;
/***********************************
* Simple compression function
* *********************************/
size_t LZ4F_compressFrameBound(size_t srcSize, const LZ4F_preferences_t* preferencesPtr);
size_t LZ4F_compressFrame(void* dstBuffer, size_t dstMaxSize, const void* srcBuffer, size_t srcSize, const LZ4F_preferences_t* preferencesPtr);
/* LZ4F_compressFrame()
* Compress an entire srcBuffer into a valid LZ4 frame, as defined by specification v1.4.1, in a single step.
* The most important rule is that dstBuffer MUST be large enough (dstMaxSize) to ensure compression completion even in worst case.
* You can get the minimum value of dstMaxSize by using LZ4F_compressFrameBound()
* If this condition is not respected, LZ4F_compressFrame() will fail (result is an errorCode)
* The LZ4F_preferences_t structure is optional : you can provide NULL as argument. All preferences will be set to default.
* The result of the function is the number of bytes written into dstBuffer.
* The function outputs an error code if it fails (can be tested using LZ4F_isError())
*/
/**********************************
* Advanced compression functions
* *********************************/
typedef void* LZ4F_compressionContext_t;
typedef struct {
unsigned stableSrc; /* 1 == src content will remain available on future calls to LZ4F_compress(); avoid saving src content within tmp buffer as future dictionary */
unsigned reserved[3];
} LZ4F_compressOptions_t;
/* Resource Management */
#define LZ4F_VERSION 100
LZ4F_errorCode_t LZ4F_createCompressionContext(LZ4F_compressionContext_t* LZ4F_compressionContextPtr, unsigned version);
LZ4F_errorCode_t LZ4F_freeCompressionContext(LZ4F_compressionContext_t LZ4F_compressionContext);
/* LZ4F_createCompressionContext() :
* The first thing to do is to create a compressionContext object, which will be used in all compression operations.
* This is achieved using LZ4F_createCompressionContext(), which takes as argument a version and an LZ4F_preferences_t structure.
* The version provided MUST be LZ4F_VERSION. It is intended to track potential version differences between different binaries.
* The function will provide a pointer to a fully allocated LZ4F_compressionContext_t object.
* If the result LZ4F_errorCode_t is not zero, there was an error during context creation.
* Object can release its memory using LZ4F_freeCompressionContext();
*/
/* Compression */
size_t LZ4F_compressBegin(LZ4F_compressionContext_t compressionContext, void* dstBuffer, size_t dstMaxSize, const LZ4F_preferences_t* preferencesPtr);
/* LZ4F_compressBegin() :
* will write the frame header into dstBuffer.
* dstBuffer must be large enough to accommodate a header (dstMaxSize). Maximum header size is 19 bytes.
* The LZ4F_preferences_t structure is optional : you can provide NULL as argument, all preferences will then be set to default.
* The result of the function is the number of bytes written into dstBuffer for the header
* or an error code (can be tested using LZ4F_isError())
*/
size_t LZ4F_compressBound(size_t srcSize, const LZ4F_preferences_t* preferencesPtr);
/* LZ4F_compressBound() :
* Provides the minimum size of Dst buffer given srcSize to handle worst case situations.
* preferencesPtr is optional : you can provide NULL as argument, all preferences will then be set to default.
*/
size_t LZ4F_compressUpdate(LZ4F_compressionContext_t compressionContext, void* dstBuffer, size_t dstMaxSize, const void* srcBuffer, size_t srcSize, const LZ4F_compressOptions_t* compressOptionsPtr);
/* LZ4F_compressUpdate()
* LZ4F_compressUpdate() can be called repetitively to compress as much data as necessary.
* The most important rule is that dstBuffer MUST be large enough (dstMaxSize) to ensure compression completion even in worst case.
* If this condition is not respected, LZ4F_compress() will fail (result is an errorCode)
* You can get the minimum value of dstMaxSize by using LZ4F_compressBound()
* The LZ4F_compressOptions_t structure is optional : you can provide NULL as argument.
* The result of the function is the number of bytes written into dstBuffer : it can be zero, meaning input data was just buffered.
* The function outputs an error code if it fails (can be tested using LZ4F_isError())
*/
size_t LZ4F_flush(LZ4F_compressionContext_t compressionContext, void* dstBuffer, size_t dstMaxSize, const LZ4F_compressOptions_t* compressOptionsPtr);
/* LZ4F_flush()
* Should you need to create compressed data immediately, without waiting for a block to be filled,
* you can call LZ4_flush(), which will immediately compress any remaining data buffered within compressionContext.
* The LZ4F_compressOptions_t structure is optional : you can provide NULL as argument.
* The result of the function is the number of bytes written into dstBuffer
* (it can be zero, this means there was no data left within compressionContext)
* The function outputs an error code if it fails (can be tested using LZ4F_isError())
*/
size_t LZ4F_compressEnd(LZ4F_compressionContext_t compressionContext, void* dstBuffer, size_t dstMaxSize, const LZ4F_compressOptions_t* compressOptionsPtr);
/* LZ4F_compressEnd()
* When you want to properly finish the compressed frame, just call LZ4F_compressEnd().
* It will flush whatever data remained within compressionContext (like LZ4_flush())
* but also properly finalize the frame, with an endMark and a checksum.
* The result of the function is the number of bytes written into dstBuffer (necessarily >= 4 (endMark size))
* The function outputs an error code if it fails (can be tested using LZ4F_isError())
* The LZ4F_compressOptions_t structure is optional : you can provide NULL as argument.
* compressionContext can then be used again, starting with LZ4F_compressBegin().
*/
/***********************************
* Decompression functions
* *********************************/
typedef void* LZ4F_decompressionContext_t;
typedef struct {
unsigned stableDst; /* unused for the time being, must be 0 */
unsigned reserved[3];
} LZ4F_decompressOptions_t;
/* Resource management */
LZ4F_errorCode_t LZ4F_createDecompressionContext(LZ4F_compressionContext_t* LZ4F_decompressionContextPtr, unsigned version);
LZ4F_errorCode_t LZ4F_freeDecompressionContext(LZ4F_compressionContext_t LZ4F_decompressionContext);
/* LZ4F_createDecompressionContext() :
* The first thing to do is to create a decompressionContext object, which will be used in all decompression operations.
* This is achieved using LZ4F_createDecompressionContext().
* The function will provide a pointer to a fully allocated and initialized LZ4F_decompressionContext object.
* If the result LZ4F_errorCode_t is not OK_NoError, there was an error during context creation.
* Object can release its memory using LZ4F_freeDecompressionContext();
*/
/* Decompression */
size_t LZ4F_getFrameInfo(LZ4F_decompressionContext_t decompressionContext, LZ4F_frameInfo_t* frameInfoPtr, const void* srcBuffer, size_t* srcSizePtr);
/* LZ4F_getFrameInfo()
* This function decodes frame header information, such as blockSize.
* It is optional : you could start by calling directly LZ4F_decompress() instead.
* The objective is to extract header information without starting decompression, typically for allocation purposes.
* LZ4F_getFrameInfo() can also be used *after* starting decompression, on a valid LZ4F_decompressionContext_t.
* The number of bytes read from srcBuffer will be provided within *srcSizePtr (necessarily <= original value).
* You are expected to resume decompression from where it stopped (srcBuffer + *srcSizePtr)
* The function result is an hint of the better srcSize to use for next call to LZ4F_decompress,
* or an error code which can be tested using LZ4F_isError().
*/
size_t LZ4F_decompress(LZ4F_decompressionContext_t decompressionContext, void* dstBuffer, size_t* dstSizePtr, const void* srcBuffer, size_t* srcSizePtr, const LZ4F_decompressOptions_t* decompressOptionsPtr);
/* LZ4F_decompress()
* Call this function repetitively to regenerate data compressed within srcBuffer.
* The function will attempt to decode *srcSizePtr bytes from srcBuffer, into dstBuffer of maximum size *dstSizePtr.
*
* The number of bytes regenerated into dstBuffer will be provided within *dstSizePtr (necessarily <= original value).
*
* The number of bytes effectively used from srcBuffer will be provided within *srcSizePtr (necessarily <= original value).
* If the number of bytes read is < number of bytes provided, then the decompression operation is not complete.
* This typically happens when dstBuffer is not large enough to contain all decoded data.
* LZ4F_decompress() will have to be called again, starting from where it stopped (srcBuffer + *srcSizePtr)
* The function will check this condition, and refuse to continue if it is not respected.
* dstBuffer is supposed to be flushed between calls to the function, since its content will be rewritten.
* Different dst arguments can be used between each calls.
*
* The function result is an hint of the better srcSize to use for next call to LZ4F_decompress.
* Basically, it's the size of the current (or remaining) compressed block + header of next block.
* Respecting the hint provides some boost to performance, since it allows less buffer shuffling.
* Note that this is just a hint, you can always provide any srcSize you want.
* When a frame is fully decoded, the function result will be 0.
* If decompression failed, function result is an error code which can be tested using LZ4F_isError().
*/
#if defined (__cplusplus)
}
#endif
| {
"pile_set_name": "Github"
} |
'------------------------------------------------------------------------------
' <auto-generated>
' This code was generated by a tool.
' Runtime Version:4.0.30319.17348
'
' Changes to this file may cause incorrect behavior and will be lost if
' the code is regenerated.
' </auto-generated>
'------------------------------------------------------------------------------
Option Strict Off
Option Explicit On
'''
<Microsoft.VisualStudio.Tools.Applications.Runtime.StartupObjectAttribute(1), _
Global.System.Security.Permissions.PermissionSetAttribute(Global.System.Security.Permissions.SecurityAction.Demand, Name:="FullTrust")> _
Partial Public NotInheritable Class Sheet1
Inherits Microsoft.Office.Tools.Excel.WorksheetBase
'''
<Global.System.Diagnostics.DebuggerNonUserCodeAttribute(), _
Global.System.ComponentModel.EditorBrowsableAttribute(Global.System.ComponentModel.EditorBrowsableState.Never)> _
Public Sub New(ByVal factory As Global.Microsoft.Office.Tools.Excel.Factory, ByVal serviceProvider As Global.System.IServiceProvider)
MyBase.New(factory, serviceProvider, "Sheet1", "Sheet1")
End Sub
'''
<Global.System.Diagnostics.DebuggerNonUserCodeAttribute(), _
Global.System.CodeDom.Compiler.GeneratedCodeAttribute("Microsoft.VisualStudio.Tools.Office.ProgrammingModel.dll", "11.0.0.0"), _
Global.System.ComponentModel.EditorBrowsableAttribute(Global.System.ComponentModel.EditorBrowsableState.Never)> _
Protected Overrides Sub Initialize()
MyBase.Initialize()
Globals.Sheet1 = Me
Global.System.Windows.Forms.Application.EnableVisualStyles()
Me.InitializeCachedData()
Me.InitializeControls()
Me.InitializeComponents()
Me.InitializeData()
End Sub
'''
<Global.System.Diagnostics.DebuggerNonUserCodeAttribute(), _
Global.System.CodeDom.Compiler.GeneratedCodeAttribute("Microsoft.VisualStudio.Tools.Office.ProgrammingModel.dll", "11.0.0.0"), _
Global.System.ComponentModel.EditorBrowsableAttribute(Global.System.ComponentModel.EditorBrowsableState.Never)> _
Protected Overrides Sub FinishInitialization()
Me.OnStartup()
End Sub
'''
<Global.System.Diagnostics.DebuggerNonUserCodeAttribute(), _
Global.System.CodeDom.Compiler.GeneratedCodeAttribute("Microsoft.VisualStudio.Tools.Office.ProgrammingModel.dll", "11.0.0.0"), _
Global.System.ComponentModel.EditorBrowsableAttribute(Global.System.ComponentModel.EditorBrowsableState.Never)> _
Protected Overrides Sub InitializeDataBindings()
Me.BeginInitialization()
Me.BindToData()
Me.EndInitialization()
End Sub
'''
<Global.System.Diagnostics.DebuggerNonUserCodeAttribute(), _
Global.System.CodeDom.Compiler.GeneratedCodeAttribute("Microsoft.VisualStudio.Tools.Office.ProgrammingModel.dll", "11.0.0.0"), _
Global.System.ComponentModel.EditorBrowsableAttribute(Global.System.ComponentModel.EditorBrowsableState.Never)> _
Private Sub InitializeCachedData()
If (Me.DataHost Is Nothing) Then
Return
End If
If Me.DataHost.IsCacheInitialized Then
Me.DataHost.FillCachedData(Me)
End If
End Sub
'''
<Global.System.Diagnostics.DebuggerNonUserCodeAttribute(), _
Global.System.CodeDom.Compiler.GeneratedCodeAttribute("Microsoft.VisualStudio.Tools.Office.ProgrammingModel.dll", "11.0.0.0"), _
Global.System.ComponentModel.EditorBrowsableAttribute(Global.System.ComponentModel.EditorBrowsableState.Never)> _
Private Sub InitializeData()
End Sub
'''
<Global.System.Diagnostics.DebuggerNonUserCodeAttribute(), _
Global.System.CodeDom.Compiler.GeneratedCodeAttribute("Microsoft.VisualStudio.Tools.Office.ProgrammingModel.dll", "11.0.0.0"), _
Global.System.ComponentModel.EditorBrowsableAttribute(Global.System.ComponentModel.EditorBrowsableState.Never)> _
Private Sub BindToData()
End Sub
'''
<Global.System.Diagnostics.DebuggerNonUserCodeAttribute(), _
Global.System.ComponentModel.EditorBrowsableAttribute(Global.System.ComponentModel.EditorBrowsableState.Advanced)> _
Private Sub StartCaching(ByVal MemberName As String)
Me.DataHost.StartCaching(Me, MemberName)
End Sub
'''
<Global.System.Diagnostics.DebuggerNonUserCodeAttribute(), _
Global.System.ComponentModel.EditorBrowsableAttribute(Global.System.ComponentModel.EditorBrowsableState.Advanced)> _
Private Sub StopCaching(ByVal MemberName As String)
Me.DataHost.StopCaching(Me, MemberName)
End Sub
'''
<Global.System.Diagnostics.DebuggerNonUserCodeAttribute(), _
Global.System.ComponentModel.EditorBrowsableAttribute(Global.System.ComponentModel.EditorBrowsableState.Advanced)> _
Private Function IsCached(ByVal MemberName As String) As Boolean
Return Me.DataHost.IsCached(Me, MemberName)
End Function
'''
<Global.System.Diagnostics.DebuggerNonUserCodeAttribute(), _
Global.System.CodeDom.Compiler.GeneratedCodeAttribute("Microsoft.VisualStudio.Tools.Office.ProgrammingModel.dll", "11.0.0.0"), _
Global.System.ComponentModel.EditorBrowsableAttribute(Global.System.ComponentModel.EditorBrowsableState.Never)> _
Private Sub BeginInitialization()
Me.BeginInit()
End Sub
'''
<Global.System.Diagnostics.DebuggerNonUserCodeAttribute(), _
Global.System.CodeDom.Compiler.GeneratedCodeAttribute("Microsoft.VisualStudio.Tools.Office.ProgrammingModel.dll", "11.0.0.0"), _
Global.System.ComponentModel.EditorBrowsableAttribute(Global.System.ComponentModel.EditorBrowsableState.Never)> _
Private Sub EndInitialization()
Me.EndInit()
End Sub
'''
<Global.System.Diagnostics.DebuggerNonUserCodeAttribute(), _
Global.System.CodeDom.Compiler.GeneratedCodeAttribute("Microsoft.VisualStudio.Tools.Office.ProgrammingModel.dll", "11.0.0.0"), _
Global.System.ComponentModel.EditorBrowsableAttribute(Global.System.ComponentModel.EditorBrowsableState.Never)> _
Private Sub InitializeControls()
End Sub
'''
<Global.System.Diagnostics.DebuggerNonUserCodeAttribute(), _
Global.System.CodeDom.Compiler.GeneratedCodeAttribute("Microsoft.VisualStudio.Tools.Office.ProgrammingModel.dll", "11.0.0.0"), _
Global.System.ComponentModel.EditorBrowsableAttribute(Global.System.ComponentModel.EditorBrowsableState.Never)> _
Private Sub InitializeComponents()
End Sub
'''
<Global.System.Diagnostics.DebuggerNonUserCodeAttribute(), _
Global.System.ComponentModel.EditorBrowsableAttribute(Global.System.ComponentModel.EditorBrowsableState.Advanced)> _
Private Function NeedsFill(ByVal MemberName As String) As Boolean
Return Me.DataHost.NeedsFill(Me, MemberName)
End Function
End Class
Partial Friend NotInheritable Class Globals
Private Shared _Sheet1 As Sheet1
Friend Shared Property Sheet1() As Sheet1
Get
Return _Sheet1
End Get
Set(value As Sheet1)
If (_Sheet1 Is Nothing) Then
_Sheet1 = value
Else
Throw New System.NotSupportedException()
End If
End Set
End Property
End Class
| {
"pile_set_name": "Github"
} |
#pragma once
#include "ofMain.h"
#include "KochLine.h"
class testApp : public ofBaseApp{
public:
void setup();
void update();
void draw();
void keyPressed(int key);
void keyReleased(int key);
void mouseMoved(int x, int y );
void mouseDragged(int x, int y, int button);
void mousePressed(int x, int y, int button);
void mouseReleased(int x, int y, int button);
void windowResized(int w, int h);
void dragEvent(ofDragInfo dragInfo);
void gotMessage(ofMessage msg);
void addGeneration();
vector<KochLine> lineList;
float pct;
};
| {
"pile_set_name": "Github"
} |
package de.metas.invoice.interceptor;
/*
* #%L
* de.metas.swat.base
* %%
* Copyright (C) 2015 metas GmbH
* %%
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as
* published by the Free Software Foundation, either version 2 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this program. If not, see
* <http://www.gnu.org/licenses/gpl-2.0.html>.
* #L%
*/
import org.adempiere.ad.modelvalidator.annotations.Interceptor;
import org.adempiere.ad.modelvalidator.annotations.ModelChange;
import org.compiere.model.ModelValidator;
import org.springframework.stereotype.Component;
import de.metas.invoice.model.I_M_MatchInv;
/**
*
* @author ts
*
*/
@Interceptor(I_M_MatchInv.class)
@Component
public class M_MatchInv
{
/**
* Note that we need the C_Invoice_ID for be set, because in the "Eingansrechnung" (PO-Invoice) window, the invoice lines are in an included tab, so the matchInv tab is a subtab of the C_Invoice
* tab.
*
* @param matchInv
*/
@ModelChange(timings = { ModelValidator.TYPE_BEFORE_NEW, ModelValidator.TYPE_BEFORE_CHANGE }, ifColumnsChanged = I_M_MatchInv.COLUMNNAME_C_InvoiceLine_ID)
public void updateC_Invoice_ID(final I_M_MatchInv matchInv)
{
final int invoiceId;
if (matchInv.getC_InvoiceLine_ID() > 0)
{
invoiceId = matchInv.getC_InvoiceLine().getC_Invoice_ID();
}
else
{
invoiceId = 0;
}
matchInv.setC_Invoice_ID(invoiceId);
}
}
| {
"pile_set_name": "Github"
} |
/**
* Copyright (c) 2006-2015, JGraph Ltd
* Copyright (c) 2006-2015, Gaudenz Alder
*/
/**
* Class: mxGraphHierarchyModel
*
* Internal model of a hierarchical graph. This model stores nodes and edges
* equivalent to the real graph nodes and edges, but also stores the rank of the
* cells, the order within the ranks and the new candidate locations of cells.
* The internal model also reverses edge direction were appropriate , ignores
* self-loop and groups parallels together under one edge object.
*
* Constructor: mxGraphHierarchyModel
*
* Creates an internal ordered graph model using the vertices passed in. If
* there are any, leftward edge need to be inverted in the internal model
*
* Arguments:
*
* graph - the facade describing the graph to be operated on
* vertices - the vertices for this hierarchy
* ordered - whether or not the vertices are already ordered
* deterministic - whether or not this layout should be deterministic on each
* tightenToSource - whether or not to tighten vertices towards the sources
* scanRanksFromSinks - Whether rank assignment is from the sinks or sources.
* usage
*/
function mxGraphHierarchyModel(layout, vertices, roots, parent, tightenToSource)
{
var graph = layout.getGraph();
this.tightenToSource = tightenToSource;
this.roots = roots;
this.parent = parent;
// map of cells to internal cell needed for second run through
// to setup the sink of edges correctly
this.vertexMapper = new mxDictionary();
this.edgeMapper = new mxDictionary();
this.maxRank = 0;
var internalVertices = [];
if (vertices == null)
{
vertices = this.graph.getChildVertices(parent);
}
this.maxRank = this.SOURCESCANSTARTRANK;
// map of cells to internal cell needed for second run through
// to setup the sink of edges correctly. Guess size by number
// of edges is roughly same as number of vertices.
this.createInternalCells(layout, vertices, internalVertices);
// Go through edges set their sink values. Also check the
// ordering if and invert edges if necessary
for (var i = 0; i < vertices.length; i++)
{
var edges = internalVertices[i].connectsAsSource;
for (var j = 0; j < edges.length; j++)
{
var internalEdge = edges[j];
var realEdges = internalEdge.edges;
// Only need to process the first real edge, since
// all the edges connect to the same other vertex
if (realEdges != null && realEdges.length > 0)
{
var realEdge = realEdges[0];
var targetCell = layout.getVisibleTerminal(
realEdge, false);
var internalTargetCell = this.vertexMapper.get(targetCell);
if (internalVertices[i] == internalTargetCell)
{
// If there are parallel edges going between two vertices and not all are in the same direction
// you can have navigated across one direction when doing the cycle reversal that isn't the same
// direction as the first real edge in the array above. When that happens the if above catches
// that and we correct the target cell before continuing.
// This branch only detects this single case
targetCell = layout.getVisibleTerminal(
realEdge, true);
internalTargetCell = this.vertexMapper.get(targetCell);
}
if (internalTargetCell != null
&& internalVertices[i] != internalTargetCell)
{
internalEdge.target = internalTargetCell;
if (internalTargetCell.connectsAsTarget.length == 0)
{
internalTargetCell.connectsAsTarget = [];
}
if (mxUtils.indexOf(internalTargetCell.connectsAsTarget, internalEdge) < 0)
{
internalTargetCell.connectsAsTarget.push(internalEdge);
}
}
}
}
// Use the temp variable in the internal nodes to mark this
// internal vertex as having been visited.
internalVertices[i].temp[0] = 1;
}
};
/**
* Variable: maxRank
*
* Stores the largest rank number allocated
*/
mxGraphHierarchyModel.prototype.maxRank = null;
/**
* Variable: vertexMapper
*
* Map from graph vertices to internal model nodes.
*/
mxGraphHierarchyModel.prototype.vertexMapper = null;
/**
* Variable: edgeMapper
*
* Map from graph edges to internal model edges
*/
mxGraphHierarchyModel.prototype.edgeMapper = null;
/**
* Variable: ranks
*
* Mapping from rank number to actual rank
*/
mxGraphHierarchyModel.prototype.ranks = null;
/**
* Variable: roots
*
* Store of roots of this hierarchy model, these are real graph cells, not
* internal cells
*/
mxGraphHierarchyModel.prototype.roots = null;
/**
* Variable: parent
*
* The parent cell whose children are being laid out
*/
mxGraphHierarchyModel.prototype.parent = null;
/**
* Variable: dfsCount
*
* Count of the number of times the ancestor dfs has been used.
*/
mxGraphHierarchyModel.prototype.dfsCount = 0;
/**
* Variable: SOURCESCANSTARTRANK
*
* High value to start source layering scan rank value from.
*/
mxGraphHierarchyModel.prototype.SOURCESCANSTARTRANK = 100000000;
/**
* Variable: tightenToSource
*
* Whether or not to tighten the assigned ranks of vertices up towards
* the source cells.
*/
mxGraphHierarchyModel.prototype.tightenToSource = false;
/**
* Function: createInternalCells
*
* Creates all edges in the internal model
*
* Parameters:
*
* layout - Reference to the <mxHierarchicalLayout> algorithm.
* vertices - Array of <mxCells> that represent the vertices whom are to
* have an internal representation created.
* internalVertices - The array of <mxGraphHierarchyNodes> to have their
* information filled in using the real vertices.
*/
mxGraphHierarchyModel.prototype.createInternalCells = function(layout, vertices, internalVertices)
{
var graph = layout.getGraph();
// Create internal edges
for (var i = 0; i < vertices.length; i++)
{
internalVertices[i] = new mxGraphHierarchyNode(vertices[i]);
this.vertexMapper.put(vertices[i], internalVertices[i]);
// If the layout is deterministic, order the cells
//List outgoingCells = graph.getNeighbours(vertices[i], deterministic);
var conns = layout.getEdges(vertices[i]);
internalVertices[i].connectsAsSource = [];
// Create internal edges, but don't do any rank assignment yet
// First use the information from the greedy cycle remover to
// invert the leftward edges internally
for (var j = 0; j < conns.length; j++)
{
var cell = layout.getVisibleTerminal(conns[j], false);
// Looking for outgoing edges only
if (cell != vertices[i] && layout.graph.model.isVertex(cell) &&
!layout.isVertexIgnored(cell))
{
// We process all edge between this source and its targets
// If there are edges going both ways, we need to collect
// them all into one internal edges to avoid looping problems
// later. We assume this direction (source -> target) is the
// natural direction if at least half the edges are going in
// that direction.
// The check below for edges[0] being in the vertex mapper is
// in case we've processed this the other way around
// (target -> source) and the number of edges in each direction
// are the same. All the graph edges will have been assigned to
// an internal edge going the other way, so we don't want to
// process them again
var undirectedEdges = layout.getEdgesBetween(vertices[i],
cell, false);
var directedEdges = layout.getEdgesBetween(vertices[i],
cell, true);
if (undirectedEdges != null &&
undirectedEdges.length > 0 &&
this.edgeMapper.get(undirectedEdges[0]) == null &&
directedEdges.length * 2 >= undirectedEdges.length)
{
var internalEdge = new mxGraphHierarchyEdge(undirectedEdges);
for (var k = 0; k < undirectedEdges.length; k++)
{
var edge = undirectedEdges[k];
this.edgeMapper.put(edge, internalEdge);
// Resets all point on the edge and disables the edge style
// without deleting it from the cell style
graph.resetEdge(edge);
if (layout.disableEdgeStyle)
{
layout.setEdgeStyleEnabled(edge, false);
layout.setOrthogonalEdge(edge,true);
}
}
internalEdge.source = internalVertices[i];
if (mxUtils.indexOf(internalVertices[i].connectsAsSource, internalEdge) < 0)
{
internalVertices[i].connectsAsSource.push(internalEdge);
}
}
}
}
// Ensure temp variable is cleared from any previous use
internalVertices[i].temp[0] = 0;
}
};
/**
* Function: initialRank
*
* Basic determination of minimum layer ranking by working from from sources
* or sinks and working through each node in the relevant edge direction.
* Starting at the sinks is basically a longest path layering algorithm.
*/
mxGraphHierarchyModel.prototype.initialRank = function()
{
var startNodes = [];
if (this.roots != null)
{
for (var i = 0; i < this.roots.length; i++)
{
var internalNode = this.vertexMapper.get(this.roots[i]);
if (internalNode != null)
{
startNodes.push(internalNode);
}
}
}
var internalNodes = this.vertexMapper.getValues();
for (var i=0; i < internalNodes.length; i++)
{
// Mark the node as not having had a layer assigned
internalNodes[i].temp[0] = -1;
}
var startNodesCopy = startNodes.slice();
while (startNodes.length > 0)
{
var internalNode = startNodes[0];
var layerDeterminingEdges;
var edgesToBeMarked;
layerDeterminingEdges = internalNode.connectsAsTarget;
edgesToBeMarked = internalNode.connectsAsSource;
// flag to keep track of whether or not all layer determining
// edges have been scanned
var allEdgesScanned = true;
// Work out the layer of this node from the layer determining
// edges. The minimum layer number of any node connected by one of
// the layer determining edges variable
var minimumLayer = this.SOURCESCANSTARTRANK;
for (var i = 0; i < layerDeterminingEdges.length; i++)
{
var internalEdge = layerDeterminingEdges[i];
if (internalEdge.temp[0] == 5270620)
{
// This edge has been scanned, get the layer of the
// node on the other end
var otherNode = internalEdge.source;
minimumLayer = Math.min(minimumLayer, otherNode.temp[0] - 1);
}
else
{
allEdgesScanned = false;
break;
}
}
// If all edge have been scanned, assign the layer, mark all
// edges in the other direction and remove from the nodes list
if (allEdgesScanned)
{
internalNode.temp[0] = minimumLayer;
this.maxRank = Math.min(this.maxRank, minimumLayer);
if (edgesToBeMarked != null)
{
for (var i = 0; i < edgesToBeMarked.length; i++)
{
var internalEdge = edgesToBeMarked[i];
// Assign unique stamp ( y/m/d/h )
internalEdge.temp[0] = 5270620;
// Add node on other end of edge to LinkedList of
// nodes to be analysed
var otherNode = internalEdge.target;
// Only add node if it hasn't been assigned a layer
if (otherNode.temp[0] == -1)
{
startNodes.push(otherNode);
// Mark this other node as neither being
// unassigned nor assigned so it isn't
// added to this list again, but it's
// layer isn't used in any calculation.
otherNode.temp[0] = -2;
}
}
}
startNodes.shift();
}
else
{
// Not all the edges have been scanned, get to the back of
// the class and put the dunces cap on
var removedCell = startNodes.shift();
startNodes.push(internalNode);
if (removedCell == internalNode && startNodes.length == 1)
{
// This is an error condition, we can't get out of
// this loop. It could happen for more than one node
// but that's a lot harder to detect. Log the error
// TODO make log comment
break;
}
}
}
// Normalize the ranks down from their large starting value to place
// at least 1 sink on layer 0
for (var i=0; i < internalNodes.length; i++)
{
// Mark the node as not having had a layer assigned
internalNodes[i].temp[0] -= this.maxRank;
}
// Tighten the rank 0 nodes as far as possible
for ( var i = 0; i < startNodesCopy.length; i++)
{
var internalNode = startNodesCopy[i];
var currentMaxLayer = 0;
var layerDeterminingEdges = internalNode.connectsAsSource;
for ( var j = 0; j < layerDeterminingEdges.length; j++)
{
var internalEdge = layerDeterminingEdges[j];
var otherNode = internalEdge.target;
internalNode.temp[0] = Math.max(currentMaxLayer,
otherNode.temp[0] + 1);
currentMaxLayer = internalNode.temp[0];
}
}
// Reset the maxRank to that which would be expected for a from-sink
// scan
this.maxRank = this.SOURCESCANSTARTRANK - this.maxRank;
};
/**
* Function: fixRanks
*
* Fixes the layer assignments to the values stored in the nodes. Also needs
* to create dummy nodes for edges that cross layers.
*/
mxGraphHierarchyModel.prototype.fixRanks = function()
{
var rankList = [];
this.ranks = [];
for (var i = 0; i < this.maxRank + 1; i++)
{
rankList[i] = [];
this.ranks[i] = rankList[i];
}
// Perform a DFS to obtain an initial ordering for each rank.
// Without doing this you would end up having to process
// crossings for a standard tree.
var rootsArray = null;
if (this.roots != null)
{
var oldRootsArray = this.roots;
rootsArray = [];
for (var i = 0; i < oldRootsArray.length; i++)
{
var cell = oldRootsArray[i];
var internalNode = this.vertexMapper.get(cell);
rootsArray[i] = internalNode;
}
}
this.visit(function(parent, node, edge, layer, seen)
{
if (seen == 0 && node.maxRank < 0 && node.minRank < 0)
{
rankList[node.temp[0]].push(node);
node.maxRank = node.temp[0];
node.minRank = node.temp[0];
// Set temp[0] to the nodes position in the rank
node.temp[0] = rankList[node.maxRank].length - 1;
}
if (parent != null && edge != null)
{
var parentToCellRankDifference = parent.maxRank - node.maxRank;
if (parentToCellRankDifference > 1)
{
// There are ranks in between the parent and current cell
edge.maxRank = parent.maxRank;
edge.minRank = node.maxRank;
edge.temp = [];
edge.x = [];
edge.y = [];
for (var i = edge.minRank + 1; i < edge.maxRank; i++)
{
// The connecting edge must be added to the
// appropriate ranks
rankList[i].push(edge);
edge.setGeneralPurposeVariable(i, rankList[i]
.length - 1);
}
}
}
}, rootsArray, false, null);
};
/**
* Function: visit
*
* A depth first search through the internal heirarchy model.
*
* Parameters:
*
* visitor - The visitor function pattern to be called for each node.
* trackAncestors - Whether or not the search is to keep track all nodes
* directly above this one in the search path.
*/
mxGraphHierarchyModel.prototype.visit = function(visitor, dfsRoots, trackAncestors, seenNodes)
{
// Run dfs through on all roots
if (dfsRoots != null)
{
for (var i = 0; i < dfsRoots.length; i++)
{
var internalNode = dfsRoots[i];
if (internalNode != null)
{
if (seenNodes == null)
{
seenNodes = new Object();
}
if (trackAncestors)
{
// Set up hash code for root
internalNode.hashCode = [];
internalNode.hashCode[0] = this.dfsCount;
internalNode.hashCode[1] = i;
this.extendedDfs(null, internalNode, null, visitor, seenNodes,
internalNode.hashCode, i, 0);
}
else
{
this.dfs(null, internalNode, null, visitor, seenNodes, 0);
}
}
}
this.dfsCount++;
}
};
/**
* Function: dfs
*
* Performs a depth first search on the internal hierarchy model
*
* Parameters:
*
* parent - the parent internal node of the current internal node
* root - the current internal node
* connectingEdge - the internal edge connecting the internal node and the parent
* internal node, if any
* visitor - the visitor pattern to be called for each node
* seen - a set of all nodes seen by this dfs a set of all of the
* ancestor node of the current node
* layer - the layer on the dfs tree ( not the same as the model ranks )
*/
mxGraphHierarchyModel.prototype.dfs = function(parent, root, connectingEdge, visitor, seen, layer)
{
if (root != null)
{
var rootId = root.id;
if (seen[rootId] == null)
{
seen[rootId] = root;
visitor(parent, root, connectingEdge, layer, 0);
// Copy the connects as source list so that visitors
// can change the original for edge direction inversions
var outgoingEdges = root.connectsAsSource.slice();
for (var i = 0; i< outgoingEdges.length; i++)
{
var internalEdge = outgoingEdges[i];
var targetNode = internalEdge.target;
// Root check is O(|roots|)
this.dfs(root, targetNode, internalEdge, visitor, seen,
layer + 1);
}
}
else
{
// Use the int field to indicate this node has been seen
visitor(parent, root, connectingEdge, layer, 1);
}
}
};
/**
* Function: extendedDfs
*
* Performs a depth first search on the internal hierarchy model. This dfs
* extends the default version by keeping track of cells ancestors, but it
* should be only used when necessary because of it can be computationally
* intensive for deep searches.
*
* Parameters:
*
* parent - the parent internal node of the current internal node
* root - the current internal node
* connectingEdge - the internal edge connecting the internal node and the parent
* internal node, if any
* visitor - the visitor pattern to be called for each node
* seen - a set of all nodes seen by this dfs
* ancestors - the parent hash code
* childHash - the new hash code for this node
* layer - the layer on the dfs tree ( not the same as the model ranks )
*/
mxGraphHierarchyModel.prototype.extendedDfs = function(parent, root, connectingEdge, visitor, seen, ancestors, childHash, layer)
{
// Explanation of custom hash set. Previously, the ancestors variable
// was passed through the dfs as a HashSet. The ancestors were copied
// into a new HashSet and when the new child was processed it was also
// added to the set. If the current node was in its ancestor list it
// meant there is a cycle in the graph and this information is passed
// to the visitor.visit() in the seen parameter. The HashSet clone was
// very expensive on CPU so a custom hash was developed using primitive
// types. temp[] couldn't be used so hashCode[] was added to each node.
// Each new child adds another int to the array, copying the prefix
// from its parent. Child of the same parent add different ints (the
// limit is therefore 2^32 children per parent...). If a node has a
// child with the hashCode already set then the child code is compared
// to the same portion of the current nodes array. If they match there
// is a loop.
// Note that the basic mechanism would only allow for 1 use of this
// functionality, so the root nodes have two ints. The second int is
// incremented through each node root and the first is incremented
// through each run of the dfs algorithm (therefore the dfs is not
// thread safe). The hash code of each node is set if not already set,
// or if the first int does not match that of the current run.
if (root != null)
{
if (parent != null)
{
// Form this nodes hash code if necessary, that is, if the
// hashCode variable has not been initialized or if the
// start of the parent hash code does not equal the start of
// this nodes hash code, indicating the code was set on a
// previous run of this dfs.
if (root.hashCode == null ||
root.hashCode[0] != parent.hashCode[0])
{
var hashCodeLength = parent.hashCode.length + 1;
root.hashCode = parent.hashCode.slice();
root.hashCode[hashCodeLength - 1] = childHash;
}
}
var rootId = root.id;
if (seen[rootId] == null)
{
seen[rootId] = root;
visitor(parent, root, connectingEdge, layer, 0);
// Copy the connects as source list so that visitors
// can change the original for edge direction inversions
var outgoingEdges = root.connectsAsSource.slice();
for (var i = 0; i < outgoingEdges.length; i++)
{
var internalEdge = outgoingEdges[i];
var targetNode = internalEdge.target;
// Root check is O(|roots|)
this.extendedDfs(root, targetNode, internalEdge, visitor, seen,
root.hashCode, i, layer + 1);
}
}
else
{
// Use the int field to indicate this node has been seen
visitor(parent, root, connectingEdge, layer, 1);
}
}
};
| {
"pile_set_name": "Github"
} |
version https://git-lfs.github.com/spec/v1
oid sha256:52a94d8a967a4f113c19180c5eb5f76139509b2f5a0714a7c22d5a9341dc22ba
size 11535
| {
"pile_set_name": "Github"
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.