text
stringlengths
2
100k
meta
dict
require 'rails_helper' feature "Registration", :type => :feature do it "should let you create a new user" do visit new_user_registration_path within "#new_user" do fill_in "user_email", with: "[email protected]" fill_in "user_password", with: "123456789" fill_in "user_password_confirmation", with: "123456789" end click_button "Sign up" if User.devise_modules.include? :confirmable expect( page.body ).to include( 'A message with a confirmation link has been sent to your email address.' ) body = ActionMailer::Base.deliveries.last.body md = body.encoded.match /(\/users\/confirmation.*) / if !md assert( false, "Confirmation URL not found in message" ) end visit md[1] expect( page.body ).to include( "Your email address has been successfully confirmed." ) else expect( page.body ).to include( "Welcome! You have signed up successfully." ) end click_link "Profile" end it "should require a user to have an email address" do visit new_user_registration_path within "#new_user" do # fill_in "user_email", with: "[email protected]" fill_in "user_password", with: "123456789" fill_in "user_password_confirmation", with: "123456789" end click_button "Sign up" expect( page.body ).to_not include( "Welcome! You have signed up successfully." ) end it "should let a user change their password if they enter in their existing password" do visit new_user_registration_path within "#new_user" do fill_in "user_email", with: "[email protected]" fill_in "user_password", with: "123456789" fill_in "user_password_confirmation", with: "123456789" end click_button "Sign up" if User.devise_modules.include? :confirmable expect( page.body ).to include( 'A message with a confirmation link has been sent to your email address.' ) body = ActionMailer::Base.deliveries.last.body md = body.encoded.match /(\/users\/confirmation.*) / if !md assert( false, "Confirmation URL not found in message" ) end visit md[1] expect( page.body ).to include( "Your email address has been successfully confirmed." ) else expect( page.body ).to include( "Welcome! You have signed up successfully." ) end click_link "Profile" within "#edit_user" do fill_in "user_password", with: "012345678" fill_in "user_password_confirmation", with: "012345678" end click_button "Update" expect( page.body ).to include( "we need your current password to confirm your changes" ) within "#edit_user" do fill_in "user_password", with: "012345678" fill_in "user_password_confirmation", with: "012345678" fill_in "user_current_password", with: "123456789" end click_button "Update" expect( page.body ).to include( "Your account has been updated successfully." ) end it "following a forgot password link should let you reset your password and log in" do user = create :user visit new_user_password_path within "#new_user" do fill_in "user_email", with: user.email end click_button "Send me reset password instructions" expect( page.body ).to include( "You will receive an email with instructions on how to reset your password in a few minutes." ) body = ActionMailer::Base.deliveries.last.body md = body.encoded.match /(\/users\/password\/edit\?reset.*)/ if !md assert( false, "URL NOT FOUND IN MESSAGE") end visit md[1] within "#new_user" do fill_in "user_password", with: "new_password" fill_in "user_password_confirmation", with: "new_password" end click_button "Change my password" expect( page.body ).to_not include( "Email can't be blank" ) visit edit_user_registration_path expect( page.body ).to include( "Sign Out") click_link "Sign Out" expect( page.body ).to include( "Signed out successfully." ) visit new_user_session_path within "#new_user" do fill_in "user_email", with: user.email fill_in "user_password", with: "new_password" end click_button "Log in" expect( page.body ).to include( "Signed in successfully.") end end
{ "pile_set_name": "Github" }
import React from 'react'; import { CollapsibleSection, HealthCounts } from '@spinnaker/core'; import { IAmazonServerGroupDetailsSectionProps } from './IAmazonServerGroupDetailsSectionProps'; export class HealthDetailsSection extends React.Component<IAmazonServerGroupDetailsSectionProps> { public render(): JSX.Element { const { serverGroup } = this.props; if (serverGroup.instanceCounts.total > 0) { return ( <CollapsibleSection heading="Health" defaultExpanded={true}> <dl className="dl-horizontal dl-narrow"> <dt>Instances</dt> <dd> <HealthCounts container={serverGroup.instanceCounts} className="pull-left" /> </dd> </dl> </CollapsibleSection> ); } return null; } }
{ "pile_set_name": "Github" }
<?php /** * This script alters the session variable 'tree', expanding it * at the dn specified in the query string. * * Note: this script is equal and opposite to collapse.php * * @package phpLDAPadmin * @subpackage Tree * @see collapse.php */ /** */ require './common.php'; $dn = get_request('dn','GET',true); $tree = get_cached_item($app['server']->getIndex(),'tree'); $entry = $tree->getEntry($dn); $entry->open(); set_cached_item($app['server']->getIndex(),'tree','null',$tree); header(sprintf('Location:index.php?server_id=%s&junk=%s#%s', $app['server']->getIndex(),random_junk(),htmlid($app['server']->getIndex(),$dn))); die(); ?>
{ "pile_set_name": "Github" }
/*---------------------------------------------------------------------------*\ ========= | \\ / F ield | OpenFOAM: The Open Source CFD Toolbox \\ / O peration | Website: https://openfoam.org \\ / A nd | Copyright (C) 2011-2020 OpenFOAM Foundation \\/ M anipulation | ------------------------------------------------------------------------------- License This file is part of OpenFOAM. OpenFOAM is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. OpenFOAM is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>. \*---------------------------------------------------------------------------*/ #include "cyclicPolyPatch.H" #include "addToRunTimeSelectionTable.H" #include "polyBoundaryMesh.H" #include "polyMesh.H" #include "demandDrivenData.H" #include "OFstream.H" #include "matchPoints.H" #include "EdgeMap.H" #include "Time.H" #include "transformField.H" #include "SubField.H" #include "unitConversion.H" // * * * * * * * * * * * * * * Static Data Members * * * * * * * * * * * * * // namespace Foam { defineTypeNameAndDebug(cyclicPolyPatch, 0); addToRunTimeSelectionTable(polyPatch, cyclicPolyPatch, word); addToRunTimeSelectionTable(polyPatch, cyclicPolyPatch, dictionary); } // * * * * * * * * * * * * Protected Member Functions * * * * * * * * * * * // void Foam::cyclicPolyPatch::initCalcGeometry(PstreamBuffers& pBufs) { polyPatch::initCalcGeometry(pBufs); } void Foam::cyclicPolyPatch::initCalcGeometry ( const primitivePatch& referPatch, pointField& nbrCtrs, vectorField& nbrAreas, pointField& nbrCc ) {} void Foam::cyclicPolyPatch::calcGeometry(PstreamBuffers& pBufs) { static_cast<cyclicTransform&>(*this) = cyclicTransform ( name(), faceCentres(), faceAreas(), *this, nbrPatchName(), nbrPatch().faceCentres(), nbrPatch().faceAreas(), nbrPatch(), matchTolerance() ); } void Foam::cyclicPolyPatch::initMovePoints ( PstreamBuffers& pBufs, const pointField& p ) { polyPatch::initMovePoints(pBufs, p); } void Foam::cyclicPolyPatch::movePoints ( PstreamBuffers& pBufs, const pointField& p ) { polyPatch::movePoints(pBufs, p); } void Foam::cyclicPolyPatch::initUpdateMesh(PstreamBuffers& pBufs) { polyPatch::initUpdateMesh(pBufs); } void Foam::cyclicPolyPatch::updateMesh(PstreamBuffers& pBufs) { polyPatch::updateMesh(pBufs); deleteDemandDrivenData(coupledPointsPtr_); deleteDemandDrivenData(coupledEdgesPtr_); } // * * * * * * * * * * * * * * * Constructors * * * * * * * * * * * * * * * // Foam::cyclicPolyPatch::cyclicPolyPatch ( const word& name, const label size, const label start, const label index, const polyBoundaryMesh& bm, const word& patchType ) : coupledPolyPatch(name, size, start, index, bm, patchType), cyclicTransform(false), nbrPatchName_(word::null), nbrPatchID_(-1), coupledPointsPtr_(nullptr), coupledEdgesPtr_(nullptr), ownToNbrOrderDataPtr_(nullptr), ownToNbrCyclicOrderDataPtr_(nullptr), ownToNbrDebugOrderDataPtr_(nullptr) { // Neighbour patch might not be valid yet so no transformation // calculation possible. } Foam::cyclicPolyPatch::cyclicPolyPatch ( const word& name, const label size, const label start, const label index, const polyBoundaryMesh& bm, const word& patchType, const word& nbrPatchName ) : coupledPolyPatch(name, size, start, index, bm, patchType), cyclicTransform(false), nbrPatchName_(nbrPatchName), nbrPatchID_(-1), coupledPointsPtr_(nullptr), coupledEdgesPtr_(nullptr), ownToNbrOrderDataPtr_(nullptr), ownToNbrCyclicOrderDataPtr_(nullptr), ownToNbrDebugOrderDataPtr_(nullptr) { // Neighbour patch might not be valid yet so no transformation // calculation possible. } Foam::cyclicPolyPatch::cyclicPolyPatch ( const word& name, const dictionary& dict, const label index, const polyBoundaryMesh& bm, const word& patchType ) : coupledPolyPatch(name, dict, index, bm, patchType), cyclicTransform(dict, false), nbrPatchName_(dict.lookupOrDefault("neighbourPatch", word::null)), coupleGroup_(dict), nbrPatchID_(-1), coupledPointsPtr_(nullptr), coupledEdgesPtr_(nullptr), ownToNbrOrderDataPtr_(nullptr), ownToNbrCyclicOrderDataPtr_(nullptr), ownToNbrDebugOrderDataPtr_(nullptr) { if (nbrPatchName_ == word::null && !coupleGroup_.valid()) { FatalIOErrorInFunction ( dict ) << "No \"neighbourPatch\" provided." << endl << exit(FatalIOError); } if (nbrPatchName_ == name) { FatalIOErrorInFunction(dict) << "Neighbour patch name " << nbrPatchName_ << " cannot be the same as this patch " << name << exit(FatalIOError); } // Neighbour patch might not be valid yet so no transformation // calculation possible. } Foam::cyclicPolyPatch::cyclicPolyPatch ( const cyclicPolyPatch& pp, const polyBoundaryMesh& bm ) : coupledPolyPatch(pp, bm), cyclicTransform(pp), nbrPatchName_(pp.nbrPatchName_), coupleGroup_(pp.coupleGroup_), nbrPatchID_(-1), coupledPointsPtr_(nullptr), coupledEdgesPtr_(nullptr), ownToNbrOrderDataPtr_(nullptr), ownToNbrCyclicOrderDataPtr_(nullptr), ownToNbrDebugOrderDataPtr_(nullptr) { // Neighbour patch might not be valid yet so no transformation // calculation possible. } Foam::cyclicPolyPatch::cyclicPolyPatch ( const cyclicPolyPatch& pp, const polyBoundaryMesh& bm, const label index, const label newSize, const label newStart, const word& neiName ) : coupledPolyPatch(pp, bm, index, newSize, newStart), cyclicTransform(pp), nbrPatchName_(neiName), coupleGroup_(pp.coupleGroup_), nbrPatchID_(-1), coupledPointsPtr_(nullptr), coupledEdgesPtr_(nullptr), ownToNbrOrderDataPtr_(nullptr), ownToNbrCyclicOrderDataPtr_(nullptr), ownToNbrDebugOrderDataPtr_(nullptr) { if (neiName == name()) { FatalErrorInFunction << "Neighbour patch name " << neiName << " cannot be the same as this patch " << name() << exit(FatalError); } // Neighbour patch might not be valid yet so no transformation // calculation possible. } Foam::cyclicPolyPatch::cyclicPolyPatch ( const cyclicPolyPatch& pp, const polyBoundaryMesh& bm, const label index, const labelUList& mapAddressing, const label newStart ) : coupledPolyPatch(pp, bm, index, mapAddressing, newStart), cyclicTransform(pp), nbrPatchName_(pp.nbrPatchName_), coupleGroup_(pp.coupleGroup_), nbrPatchID_(-1), coupledPointsPtr_(nullptr), coupledEdgesPtr_(nullptr), ownToNbrOrderDataPtr_(nullptr), ownToNbrCyclicOrderDataPtr_(nullptr), ownToNbrDebugOrderDataPtr_(nullptr) {} // * * * * * * * * * * * * * * * * Destructor * * * * * * * * * * * * * * * // Foam::cyclicPolyPatch::~cyclicPolyPatch() { deleteDemandDrivenData(coupledPointsPtr_); deleteDemandDrivenData(coupledEdgesPtr_); } // * * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * // const Foam::word& Foam::cyclicPolyPatch::nbrPatchName() const { if (nbrPatchName_.empty()) { // Try and use patchGroup to find samplePatch and sampleRegion label patchID = coupleGroup_.findOtherPatchID(*this); nbrPatchName_ = boundaryMesh()[patchID].name(); } return nbrPatchName_; } Foam::label Foam::cyclicPolyPatch::nbrPatchID() const { if (nbrPatchID_ == -1) { nbrPatchID_ = this->boundaryMesh().findPatchID(nbrPatchName()); if (nbrPatchID_ == -1) { FatalErrorInFunction << "Illegal neighbourPatch name " << nbrPatchName() << endl << "Valid patch names are " << this->boundaryMesh().names() << exit(FatalError); } // Check that it is a cyclic const cyclicPolyPatch& nbrPatch = refCast<const cyclicPolyPatch> ( this->boundaryMesh()[nbrPatchID_] ); if (nbrPatch.nbrPatchName() != name()) { WarningInFunction << "Patch " << name() << " specifies neighbour patch " << nbrPatchName() << endl << " but that in return specifies " << nbrPatch.nbrPatchName() << endl; } } return nbrPatchID_; } const Foam::edgeList& Foam::cyclicPolyPatch::coupledPoints() const { if (!coupledPointsPtr_) { const faceList& nbrLocalFaces = nbrPatch().localFaces(); const labelList& nbrMeshPoints = nbrPatch().meshPoints(); // Now all we know is that relative face index in *this is same // as coupled face in nbrPatch and also that the 0th vertex // corresponds. // From local point to nbrPatch or -1. labelList coupledPoint(nPoints(), -1); forAll(*this, patchFacei) { const face& fA = localFaces()[patchFacei]; const face& fB = nbrLocalFaces[patchFacei]; forAll(fA, indexA) { label patchPointA = fA[indexA]; if (coupledPoint[patchPointA] == -1) { label indexB = (fB.size() - indexA) % fB.size(); // Filter out points on wedge axis if (meshPoints()[patchPointA] != nbrMeshPoints[fB[indexB]]) { coupledPoint[patchPointA] = fB[indexB]; } } } } coupledPointsPtr_ = new edgeList(nPoints()); edgeList& connected = *coupledPointsPtr_; // Extract coupled points. label connectedI = 0; forAll(coupledPoint, i) { if (coupledPoint[i] != -1) { connected[connectedI++] = edge(i, coupledPoint[i]); } } connected.setSize(connectedI); if (debug) { OFstream str ( boundaryMesh().mesh().time().path() /name() + "_coupledPoints.obj" ); label vertI = 0; Pout<< "Writing file " << str.name() << " with coordinates of " << "coupled points" << endl; forAll(connected, i) { const point& a = points()[meshPoints()[connected[i][0]]]; const point& b = points()[nbrMeshPoints[connected[i][1]]]; str<< "v " << a.x() << ' ' << a.y() << ' ' << a.z() << nl; str<< "v " << b.x() << ' ' << b.y() << ' ' << b.z() << nl; vertI += 2; str<< "l " << vertI-1 << ' ' << vertI << nl; } } } return *coupledPointsPtr_; } const Foam::edgeList& Foam::cyclicPolyPatch::coupledEdges() const { if (!coupledEdgesPtr_) { const edgeList& pointCouples = coupledPoints(); // Build map from points on *this (A) to points on neighbourpatch (B) Map<label> aToB(2*pointCouples.size()); forAll(pointCouples, i) { const edge& e = pointCouples[i]; aToB.insert(e[0], e[1]); } // Map from edge on A to points (in B indices) EdgeMap<label> edgeMap(nEdges()); forAll(*this, patchFacei) { const labelList& fEdges = faceEdges()[patchFacei]; forAll(fEdges, i) { label edgeI = fEdges[i]; const edge& e = edges()[edgeI]; // Convert edge end points to corresponding points on B side. Map<label>::const_iterator fnd0 = aToB.find(e[0]); if (fnd0 != aToB.end()) { Map<label>::const_iterator fnd1 = aToB.find(e[1]); if (fnd1 != aToB.end()) { edgeMap.insert(edge(fnd0(), fnd1()), edgeI); } } } } // Use the edgeMap to get the edges on the B side. const cyclicPolyPatch& nbrPatch = this->nbrPatch(); const labelList& nbrMp = nbrPatch.meshPoints(); const labelList& mp = meshPoints(); coupledEdgesPtr_ = new edgeList(edgeMap.size()); edgeList& coupledEdges = *coupledEdgesPtr_; label coupleI = 0; forAll(nbrPatch, patchFacei) { const labelList& fEdges = nbrPatch.faceEdges()[patchFacei]; forAll(fEdges, i) { label edgeI = fEdges[i]; const edge& e = nbrPatch.edges()[edgeI]; // Look up A edge from HashTable. EdgeMap<label>::iterator iter = edgeMap.find(e); if (iter != edgeMap.end()) { label edgeA = iter(); const edge& eA = edges()[edgeA]; // Store correspondence. Filter out edges on wedge axis. if ( edge(mp[eA[0]], mp[eA[1]]) != edge(nbrMp[e[0]], nbrMp[e[1]]) ) { coupledEdges[coupleI++] = edge(edgeA, edgeI); } // Remove so we build unique list edgeMap.erase(iter); } } } coupledEdges.setSize(coupleI); // Some checks forAll(coupledEdges, i) { const edge& e = coupledEdges[i]; if (e[0] < 0 || e[1] < 0) { FatalErrorInFunction << "Problem : at position " << i << " illegal couple:" << e << abort(FatalError); } } if (debug) { OFstream str ( boundaryMesh().mesh().time().path() /name() + "_coupledEdges.obj" ); label vertI = 0; Pout<< "Writing file " << str.name() << " with centres of " << "coupled edges" << endl; forAll(coupledEdges, i) { const edge& e = coupledEdges[i]; const point& a = edges()[e[0]].centre(localPoints()); const point& b = nbrPatch.edges()[e[1]].centre ( nbrPatch.localPoints() ); str<< "v " << a.x() << ' ' << a.y() << ' ' << a.z() << nl; str<< "v " << b.x() << ' ' << b.y() << ' ' << b.z() << nl; vertI += 2; str<< "l " << vertI-1 << ' ' << vertI << nl; } } } return *coupledEdgesPtr_; } void Foam::cyclicPolyPatch::initOrder ( PstreamBuffers&, const primitivePatch& pp ) const { if (pp.empty()) { return; } if (owner()) { ownToNbrOrderDataPtr_ = new ownToNbrOrderData(); if (coupledPolyPatch::debug) { ownToNbrDebugOrderDataPtr_ = new ownToNbrDebugOrderData(); } coupledPolyPatch::initOrder ( ownToNbrOrderDataPtr_(), ownToNbrDebugOrderDataPtr_, pp ); const scalarField magAreas(mag(pp.faceAreas())); ownToNbrCyclicOrderDataPtr_ = new ownToNbrCyclicOrderData(); ownToNbrCyclicOrderDataPtr_->ctr = sum(pp.faceCentres()*magAreas)/sum(magAreas); ownToNbrCyclicOrderDataPtr_->area = sum(pp.faceAreas()); } } bool Foam::cyclicPolyPatch::order ( PstreamBuffers& pBufs, const primitivePatch& pp, labelList& faceMap, labelList& rotation ) const { if (pp.empty()) { return false; } ownToNbrOrderData ownToNbr; autoPtr<ownToNbrDebugOrderData> ownToNbrDebugPtr(nullptr); if (!owner()) { ownToNbr = nbrPatch().ownToNbrOrderDataPtr_(); ownToNbrDebugPtr = nbrPatch().ownToNbrDebugOrderDataPtr_; cyclicTransform ct ( name(), pp.faceCentres(), pp.faceAreas(), *this, nbrPatchName(), pointField(1, nbrPatch().ownToNbrCyclicOrderDataPtr_->ctr), vectorField(1, nbrPatch().ownToNbrCyclicOrderDataPtr_->area), nbrPatch(), matchTolerance() ); ownToNbr.transform(ct.transform()); if (ownToNbrDebugPtr.valid()) { ownToNbrDebugPtr->transform(ct.transform()); } } return coupledPolyPatch::order ( ownToNbr, ownToNbrDebugPtr, pp, faceMap, rotation ); } void Foam::cyclicPolyPatch::write(Ostream& os) const { coupledPolyPatch::write(os); if (!nbrPatchName_.empty()) { writeEntry(os, "neighbourPatch", nbrPatchName_); } coupleGroup_.write(os); cyclicTransform::write(os); } // ************************************************************************* //
{ "pile_set_name": "Github" }
To customize config traders copy this CfgServerTrader folder to your mission. Then in description.ext replace this line: #include "\z\addons\dayz_code\Configs\CfgServerTrader\CfgServerTrader.hpp" with this: #include "CfgServerTrader\CfgServerTrader.hpp"
{ "pile_set_name": "Github" }
/* * Copyright (C) 2018 Orange. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy ofthe License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specificlanguage governing permissions and * limitations under the License. * */ /* Parsing OpenVswitch syntax for rules is inherently hard. The main reason * is the lack of clear lexical class for a few characters notably colon that * is both an important action token and a character in IPv6 and mac addresses. * * We have decided to follow a two phase approach. First we split the text in * main components so that filters and individual actions are recognized, * then we split the elementary filters and actions in simpler components. For * filters it only means checking if there is a mask. For actions, parsing may * be more involved. */ package jsonof import ( "bufio" "bytes" "encoding/json" "errors" "fmt" "io" "strconv" "strings" "github.com/skydive-project/skydive/graffiti/logging" ) // JSONRule is an openflow rule ready for JSON export type JSONRule struct { Cookie uint64 `json:"Cookie"` // cookie value of the rule Table int `json:"Table"` // table containing the rule Priority int `json:"Priority"` // priority of rule Meta []*Meta `json:"Meta,omitempty"` // anything that is not a filter. Filters []*Filter `json:"Filters"` // all the filter Actions []*Action `json:"Actions"` // all the actions UUID string `json:"-"` // UUID used by skydive RawFilter string `json:"-"` // Kept to be respawned } // JSONGroup is an openflow group ready for JSON export type JSONGroup struct { GroupID uint `json:"GroupId"` // id of the group Type string `json:"Type"` // group type Meta []*Meta `json:"Meta,omitempty"` // anything that is not a bucket Buckets []*Bucket `json:"Buckets"` // buckets UUID string `json:"-"` // UUID used by skydive } // Bucket is the representation of a bucket in an openflow group type Bucket struct { ID uint `json:"Id"` // id of bucket Meta []*Meta `json:"Meta,omitempty"` // anything that is not an action Actions []*Action `json:"Actions"` // action list } // Action represents an atomic action in an openflow rule type Action struct { Action string `json:"Function"` // Action name Arguments []*Action `json:"Arguments,omitempty"` // Arguments if it exists Key string `json:"Key,omitempty"` // Key for aguments such as k=v } // Filter is an elementary filter in an openflow rule type Filter struct { Key string `json:"Key"` // left hand side Value string `json:"Value"` // right hand side Mask string `json:"Mask,omitempty"` // mask if used } // Meta is anything not a filter or an action always as a pair key/value type Meta struct { Key string `json:"Key"` // key Value string `json:"Value"` // raw value } // Token is a lexical entity type Token int const ( // Token values as recognized by scan tNt Token = iota tEOF tText tSpace tComma tEqual tClosePar ) const ( kwActions = "actions" kwBucket = "bucket" kwBucketID = "bucket_id" kwCookie = "cookie" kwGroupID = "group_id" kwLoad = "load" kwMove = "move" kwPriority = "priority" kwSetField = "set_field" kwTable = "table" kwType = "type" ) // TokenNames is the array of printable names for Token. var TokenNames = []string{ "NT", "EOF", "TEXT", "SPACE", "COMMA", "EQUAL", "CPAR", } var eof = rune(0) // Stream represents a text buffer that can be scanned type Stream struct { r *bufio.Reader last rune token Token value string } // NewStream returns a new instance of Stream. func NewStream(r io.Reader) *Stream { return &Stream{r: bufio.NewReader(r), last: eof, token: tNt} } // isWhitespace check if the rune is a classical separator // (space of tab or eol) func isWhitespace(ch rune) bool { return ch == ' ' || ch == '\t' || ch == '\n' } // read reads the next rune from the bufferred reader. // Returns the rune(0) if an error occurs (or io.EOF is returned). func (s *Stream) read() rune { if s.last != eof { ch := s.last s.last = eof return ch } ch, _, err := s.r.ReadRune() if err != nil { return eof } return ch } // unread places the previously read rune back on the reader. func (s *Stream) unread(r rune) { s.last = r } // unscan puts back the previously read token. func (s *Stream) unscan(tok Token, lit string) { s.token = tok s.value = lit } // scan returns the next token and literal value. // nolint: gocyclo func (s *Stream) scan() (tok Token, lit string) { if s.token != tNt { tok := s.token s.token = tNt return tok, s.value } // Read the next rune. ch := s.read() // If we see whitespace then consume all contiguous whitespace. // If we see a letter then consume as an ident or reserved word. switch ch { case eof: return tEOF, "" case ' ', '\t', '\n': for { ch = s.read() if ch == eof { break } else if !isWhitespace(ch) { s.unread(ch) break } } return tSpace, "" case '=': return tEqual, "" case ',': return tComma, "" case ')': return tClosePar, "" default: var buf bytes.Buffer buf.WriteRune(ch) if ch == '(' { s.fill(&buf, 1) } else { s.fill(&buf, 0) } return tText, buf.String() } } func (s *Stream) fill(buf *bytes.Buffer, parLevel int) { fillLoop: for { ch := s.read() switch ch { case eof: break fillLoop case ' ', '\t', '\n', ',', '=': if parLevel == 0 { s.unread(ch) break fillLoop } case '(': parLevel = parLevel + 1 case ')': if parLevel == 0 { s.unread(ch) break fillLoop } parLevel = parLevel - 1 } _, err := buf.WriteRune(ch) if err != nil { logging.GetLogger().Errorf( "jsonof: fill cannot write into buffer: %s", err) } } } // ParseRule is the main entry point for the rule parser. func (s *Stream) ParseRule(result *JSONRule) error { tok, val := s.scan() if tok == tSpace { tok, val = s.scan() } if tok == tText { return s.ParseRuleEq(result, []*Meta{}, val) } return errors.New("expecting an ident") } func makeFilter(pair *Meta) *Filter { rhs := strings.Split(pair.Value, "/") if len(rhs) == 2 { return &Filter{Key: pair.Key, Value: rhs[0], Mask: rhs[1]} } return &Filter{Key: pair.Key, Value: pair.Value, Mask: ""} } // ParseRuleEq implements the state of the rule parser waiting for an equal // sign or a break signifying a next block (happens with filter abbreviations // like ip, tcp, etc.) func (s *Stream) ParseRuleEq(result *JSONRule, stack []*Meta, lhs string) error { tok, val := s.scan() if tok == tEqual { tok, val = s.scan() if tok == tText { return s.parseRulePair(result, stack, lhs, val) } return errors.New("expecting a right hand side") } if tok == tComma || tok == tSpace { pair := &Meta{Key: lhs} stack = append(stack, pair) switch lhs { case "reset_counts", "no_packet_counts", "no_byte_counts": result.Meta = append(result.Meta, stack...) return s.ParseRule(result) default: s.unscan(tok, val) return s.ParseRuleSep(result, stack) } } return errors.New("expecting = , or ''") } func (s *Stream) parseRulePair( result *JSONRule, stack []*Meta, lhs string, rhs string, ) error { switch lhs { case kwCookie: v, err := strconv.ParseUint(rhs, 0, 64) result.Cookie = v if err != nil { logging.GetLogger().Errorf( "Cannot parse cookie in openflow rule: %s", rhs) } case kwTable: v, err := strconv.ParseUint(rhs, 10, 32) result.Table = int(v) if err != nil { logging.GetLogger().Errorf( "Cannot parse table in openflow rule: %s", rhs) } case kwPriority: v, err := strconv.ParseUint(rhs, 10, 32) result.Priority = int(v) if err != nil { logging.GetLogger().Errorf( "Cannot parse priority in openflow rule: %s", rhs) } case kwActions: result.Actions = append(result.Actions, makeAction(rhs)) return s.ParseRuleAction(result) default: var pair = &Meta{Key: lhs, Value: rhs} stack = append(stack, pair) } return s.ParseRuleSep(result, stack) } // ParseRuleSep implements the state of the parser afer an x=y, a break // is expected but it may be either a func (s *Stream) ParseRuleSep(result *JSONRule, stack []*Meta) error { tok, _ := s.scan() if tok == tComma { tok2, val2 := s.scan() if tok2 == tSpace { result.Meta = append(result.Meta, stack...) return s.ParseRule(result) } if tok2 == tText { return s.ParseRuleEq(result, stack, val2) } return errors.New("expected text or space after comma") } if tok == tSpace { var raw bytes.Buffer for i, meta := range stack { if i > 0 { raw.WriteByte(',') } raw.WriteString(meta.Key) if meta.Value != "" { raw.WriteByte(':') raw.WriteString(meta.Value) } result.Filters = append(result.Filters, makeFilter(meta)) } result.RawFilter = raw.String() return s.ParseRule(result) } return errors.New("expecting a comma or a space") } func makeArg(raw string) *Action { braPos := strings.Index(raw, "[") if braPos == -1 { return &Action{Action: raw} } if raw[len(raw)-1] != ']' { logging.GetLogger().Errorf("Expecting a closing bracket in %s", raw) return nil } actRange := Action{Action: "range"} field := raw[0:braPos] actRange.Arguments = append(actRange.Arguments, &Action{Action: field}) if len(raw)-braPos > 2 { content := raw[braPos+1 : len(raw)-1] dotPos := strings.Index(content, "..") if dotPos == -1 { actRange.Arguments = append( actRange.Arguments, &Action{Action: content}) } else { start := content[0:dotPos] end := content[dotPos+2:] actRange.Arguments = append( actRange.Arguments, &Action{Action: start}, &Action{Action: end}) } } return &actRange } func makeFieldAssign(action *Action, rem string) { arrowStart := strings.Index(rem, "->") if arrowStart == -1 { logging.GetLogger().Errorf( "Expecting an arrow in action %s:%s", action.Action, rem) } else { arg1 := rem[0:arrowStart] arg2 := rem[arrowStart+2:] switch action.Action { case kwLoad: action.Arguments = append( action.Arguments, &Action{Action: arg1}, makeArg(arg2)) case kwMove: action.Arguments = append( action.Arguments, makeArg(arg1), makeArg(arg2)) case kwSetField: slashPos := strings.Index(arg1, "/") if slashPos == -1 { action.Arguments = append( action.Arguments, &Action{Action: arg1}, nil, makeArg(arg2)) } else { arg11 := arg1[0:slashPos] arg12 := arg1[slashPos+1:] action.Arguments = append( action.Arguments, &Action{Action: arg11}, &Action{Action: arg12}, makeArg(arg2)) } } } } func makeAction(raw string) *Action { actionSep := strings.IndexAny(raw, ":(") if actionSep == -1 { return makeArg(raw) } key := raw[0:actionSep] action := Action{Action: key} rem := raw[actionSep+1:] if raw[actionSep] == ':' { switch key { case kwSetField, kwLoad, kwMove: makeFieldAssign(&action, rem) case "enqueue": // This syntax is not consistent, poor choice of ovs // enqueue(port, queue) should be preferred. colonPos := strings.Index(rem, ":") if colonPos == -1 { // Probably should never happens action.Arguments = append(action.Arguments, makeArg(rem)) } else { port := rem[0:colonPos] queue := rem[colonPos+1:] action.Arguments = append( action.Arguments, &Action{Action: port}, &Action{Action: queue}) } default: action.Arguments = append(action.Arguments, makeArg(rem)) } } else { s := NewStream(strings.NewReader(rem)) err := s.ParseActionBody(&action, true) if key == "learn" { fixLearnArguments(&action) } if err != nil { logging.GetLogger().Errorf("Parsing arguments of %s: %s", raw, err) } } return &action } // Learn is a strange beast because some of the k=v arguments are not named // arguments but field assignment. We need to transform them back in real // actions and this can only be done if we have a dictionary of named // arguments for learn. func fixLearnArguments(action *Action) { for i, arg := range action.Arguments { switch arg.Key { case "", "idle_timeout", "hard_timeout", kwPriority, "cookie", "fin_idle_timeout", "fin_hard_timeout", kwTable, "limit", "result_dst": continue default: actAssign := Action{ Action: "=", Arguments: []*Action{makeArg(arg.Key), arg}, } arg.Key = "" action.Arguments[i] = &actAssign } } } // ParseActionBody reads the arguments of an action using parenthesis. func (s *Stream) ParseActionBody(act *Action, isFirst bool) error { tok, v := s.scan() if tok == tText { var a *Action tok1, v1 := s.scan() if tok1 != tEqual { s.unscan(tok1, v1) a = makeAction(v) } else { tok2, v2 := s.scan() if tok2 == tText { a = makeAction(v2) a.Key = v } else { return errors.New("expecting argument after equal") } } act.Arguments = append(act.Arguments, a) tok4, _ := s.scan() if tok4 == tComma { return s.ParseActionBody(act, false) } else if tok4 == tClosePar { return nil } else { return errors.New("expecting comma or closing par") } } else if tok == tComma { act.Arguments = append(act.Arguments, nil) return s.ParseActionBody(act, false) } else if tok == tClosePar { if !isFirst { act.Arguments = append(act.Arguments, nil) } return nil } return errors.New("expecting argument or argument separator") } // ParseRuleAction implements the state of the parser while reading an action // list. We only expect text separated by commas and ending of EOF. func (s *Stream) ParseRuleAction(result *JSONRule) error { tok, _ := s.scan() if tok == tEOF { return nil } else if tok == tComma { tok, val := s.scan() if tok == tText { result.Actions = append(result.Actions, makeAction(val)) return s.ParseRuleAction(result) } return errors.New("parseRuleAction: expecting an action after comma") } return errors.New("parseRuleAction: expecting a comma or eof") } // ParseGroup is the main entry point for the group parser. func (s *Stream) ParseGroup(result *JSONGroup) error { tok, lhs := s.scan() if tok == tSpace { tok, lhs = s.scan() } if tok == tText { return s.parseGroupEq(result, lhs) } if tok == tEOF && len(result.Buckets) > 0 { return nil } return fmt.Errorf("expecting id or eof, got %s", TokenNames[tok]) } func (s *Stream) parseGroupSep(result *JSONGroup) error { tok, _ := s.scan() if tok == tComma { tok2, lhs := s.scan() if tok2 == tText { return s.parseGroupEq(result, lhs) } return fmt.Errorf("expecting key after comma, got %s", TokenNames[tok]) } if tok == tEOF { return nil } return fmt.Errorf("expecting comma or eof, got %s", TokenNames[tok]) } func (s *Stream) parseGroupEq(result *JSONGroup, lhs string) error { tok, v := s.scan() var rhs string if tok != tEqual { s.unscan(tok, v) rhs = "" } else { if lhs == kwBucket { bucket := &Bucket{} if err := s.parseGroupBucket(bucket); err != nil { return err } result.Buckets = append(result.Buckets, bucket) return s.ParseGroup(result) } tok, rhs = s.scan() if tok != tText { return fmt.Errorf("expecting rhs of equal, got %s", TokenNames[tok]) } } switch lhs { case kwGroupID: v, err := strconv.ParseUint(rhs, 0, 32) result.GroupID = uint(v) if err != nil { logging.GetLogger().Errorf( "Cannot parse group_id in openflow group: %s", rhs) } case kwType: result.Type = rhs default: result.Meta = append(result.Meta, &Meta{Key: lhs, Value: rhs}) } return s.parseGroupSep(result) } func (s *Stream) parseGroupBucket(result *Bucket) error { for { tok, v := s.scan() if tok != tText { return fmt.Errorf("expecting id in pair, got %s", TokenNames[tok]) } switch v { case kwActions: tok, _ = s.scan() if tok != tEqual { return fmt.Errorf("Expecting =, got %s", TokenNames[tok]) } return s.parseGroupBucketActions(result) case kwBucket: s.unscan(tok, v) return nil default: if err := parseMetaBucket(result, v); err != nil { return err } } tok, _ = s.scan() if tok == tEOF { return nil } if tok != tComma { return fmt.Errorf( "cannot parse bucket, expecting comma or eof, got: %s", TokenNames[tok]) } } } func parseMetaBucket(result *Bucket, v string) error { colonPos := strings.Index(v, ":") if colonPos == -1 { result.Meta = append(result.Meta, &Meta{Key: v, Value: ""}) } else { lhs := v[0:colonPos] rhs := v[colonPos+1:] if lhs == kwBucketID { bID, err := strconv.ParseUint(rhs, 0, 32) if err != nil { return fmt.Errorf("Cannot parse bucket id: %s", rhs) } result.ID = uint(bID) } else { result.Meta = append(result.Meta, &Meta{Key: v, Value: ""}) } } return nil } func (s *Stream) parseGroupBucketActions(result *Bucket) error { for { tok, v := s.scan() if tok != tText { return fmt.Errorf("expecting id, got %s", TokenNames[tok]) } if v == kwBucket { s.unscan(tok, v) return nil } result.Actions = append(result.Actions, makeAction(v)) tok, _ = s.scan() if tok == tComma { continue } else if tok == tEOF { return nil } else { return fmt.Errorf("expecting comma or eof, got %s", TokenNames[tok]) } } } // ToAST transforms a string representing an openflow rule in an // abstract syntax tree of the rule func ToAST(rule string) (*JSONRule, error) { stream := NewStream(strings.NewReader(rule)) var result JSONRule if err := stream.ParseRule(&result); err != nil { return nil, err } return &result, nil } // ToASTGroup transforms a string representing an openflow rule in an // abstract syntax tree of the rule func ToASTGroup(group string) (*JSONGroup, error) { stream := NewStream(strings.NewReader(group)) var result JSONGroup if err := stream.ParseGroup(&result); err != nil { return nil, err } return &result, nil } // ToJSON transforms a string representing an openflow rule in a string // that is the encoding of the rule.got func ToJSON(rule string) (string, error) { ast, err := ToAST(rule) if err != nil { return "", err } jsBytes, err := json.Marshal(ast) if err != nil { return "", fmt.Errorf("cannot jsonify: %s", rule) } return string(jsBytes), nil } // ToJSONGroup transforms a string representing an openflow group in a string // that is the encoding of the group. func ToJSONGroup(group string) (string, error) { ast, err := ToASTGroup(group) if err != nil { return "", err } jsBytes, err := json.Marshal(ast) if err != nil { return "", fmt.Errorf("cannot jsonify: %s", group) } return string(jsBytes), nil } func writeAction(s *bytes.Buffer, a *Action) { if a == nil { return } if a.Key != "" { s.WriteString(a.Key) // nolint: gas s.Write([]byte("=")) // nolint: gas } if a.Action == "range" { writeAction(s, a.Arguments[0]) s.Write([]byte("[")) // nolint: gas switch len(a.Arguments) { case 2: writeAction(s, a.Arguments[1]) case 3: writeAction(s, a.Arguments[1]) s.Write([]byte("..")) // nolint: gas writeAction(s, a.Arguments[2]) } s.Write([]byte("]")) // nolint: gas return } if a.Action == "=" { writeAction(s, a.Arguments[0]) // Design decision not to choose = as ovs which is pretty confusing. s.Write([]byte(":=")) // nolint: gas writeAction(s, a.Arguments[1]) return } s.WriteString(a.Action) // nolint: gas if len(a.Arguments) > 0 { s.Write([]byte("(")) // nolint: gas for i, arg := range a.Arguments { if i > 0 { s.Write([]byte(",")) // nolint: gas } writeAction(s, arg) } s.Write([]byte(")")) // nolint: gas } } // PrettyAST gives back a string from a AST // // The syntax is close to the one used by OVS but without the quirks. // The most significant differences are: move, load, set_field, enqueue // (as regular actions) and fields in learn actions (using := instead of =). func PrettyAST(ast *JSONRule) string { // TODO: use string buffer when go minimal version bumps to 1.10 var s bytes.Buffer s.Write([]byte("cookie=0x")) s.WriteString(strconv.FormatUint(ast.Cookie, 16)) s.Write([]byte(", table=")) s.WriteString(strconv.Itoa(ast.Table)) s.Write([]byte(", ")) for _, meta := range ast.Meta { s.WriteString(meta.Key) if meta.Value != "" { s.Write([]byte("=")) s.WriteString(meta.Value) } s.Write([]byte(", ")) } s.Write([]byte("priority=")) s.WriteString(strconv.Itoa(ast.Priority)) for _, filter := range ast.Filters { s.Write([]byte(",")) s.WriteString(filter.Key) if filter.Value != "" { s.Write([]byte("=")) s.WriteString(filter.Value) if filter.Mask != "" { s.Write([]byte("/")) s.WriteString(filter.Mask) } } } s.Write([]byte(" actions=")) for i, action := range ast.Actions { if i > 0 { s.Write([]byte(",")) } writeAction(&s, action) } return s.String() } // PrettyASTGroup gives back a string from a AST // // The syntax is close to the one used by OVS but without the quirks. func PrettyASTGroup(ast *JSONGroup) string { // TODO: use string buffer when go minimal version bumps to 1.10 var s bytes.Buffer s.Write([]byte("group_id=")) s.WriteString(strconv.FormatUint(uint64(ast.GroupID), 10)) s.Write([]byte(", type=")) s.WriteString(ast.Type) for _, meta := range ast.Meta { s.Write([]byte(", ")) s.WriteString(meta.Key) if meta.Value != "" { s.Write([]byte("=")) s.WriteString(meta.Value) } } for _, bucket := range ast.Buckets { s.Write([]byte(", bucket=bucket_id:")) s.WriteString(strconv.FormatUint(uint64(bucket.ID), 10)) for _, meta := range bucket.Meta { s.Write([]byte(",")) s.WriteString(meta.Key) if meta.Value != "" { s.Write([]byte(":")) s.WriteString(meta.Value) } } s.Write([]byte(",actions=")) for i, action := range bucket.Actions { if i > 0 { s.Write([]byte(",")) } writeAction(&s, action) } } return s.String() }
{ "pile_set_name": "Github" }
package org.opencv.core; import java.util.Arrays; import java.util.List; public class MatOfDouble extends Mat { // 64FC(x) private static final int _depth = CvType.CV_64F; private static final int _channels = 1; public MatOfDouble() { super(); } protected MatOfDouble(long addr) { super(addr); if( !empty() && checkVector(_channels, _depth) < 0 ) throw new IllegalArgumentException("Incompatible Mat"); //FIXME: do we need release() here? } public static MatOfDouble fromNativeAddr(long addr) { return new MatOfDouble(addr); } public MatOfDouble(Mat m) { super(m, Range.all()); if( !empty() && checkVector(_channels, _depth) < 0 ) throw new IllegalArgumentException("Incompatible Mat"); //FIXME: do we need release() here? } public MatOfDouble(double...a) { super(); fromArray(a); } public void alloc(int elemNumber) { if(elemNumber>0) super.create(elemNumber, 1, CvType.makeType(_depth, _channels)); } public void fromArray(double...a) { if(a==null || a.length==0) return; int num = a.length / _channels; alloc(num); put(0, 0, a); //TODO: check ret val! } public double[] toArray() { int num = checkVector(_channels, _depth); if(num < 0) throw new RuntimeException("Native Mat has unexpected type or size: " + toString()); double[] a = new double[num * _channels]; if(num == 0) return a; get(0, 0, a); //TODO: check ret val! return a; } public void fromList(List<Double> lb) { if(lb==null || lb.size()==0) return; Double ab[] = lb.toArray(new Double[0]); double a[] = new double[ab.length]; for(int i=0; i<ab.length; i++) a[i] = ab[i]; fromArray(a); } public List<Double> toList() { double[] a = toArray(); Double ab[] = new Double[a.length]; for(int i=0; i<a.length; i++) ab[i] = a[i]; return Arrays.asList(ab); } }
{ "pile_set_name": "Github" }
<?xml version="1.0" encoding="utf-8"?> <!-- # # %CopyrightBegin% # # Copyright Ericsson AB 2009-2018. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # %CopyrightEnd% --> <xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform" xmlns:exsl="http://exslt.org/common" xmlns:func="http://exslt.org/functions" xmlns:erl="http://erlang.org" extension-element-prefixes="exsl func" xmlns:fn="http://www.w3.org/2005/02/xpath-functions"> <xsl:include href="db_html_params.xsl"/> <xsl:include href="db_funcs.xsl"/> <func:function name="erl:flip_first_char"> <xsl:param name="in"/> <xsl:variable name="uppercase" select="'ABCDEFGHIJKLMNOPQRSTUVWXYZ'"/> <xsl:variable name="lowercase" select="'abcdefghijklmnopqrstuvwxyz'"/> <xsl:variable name="first-char" select="substring($in, 1, 1)"/> <xsl:variable name="result"> <xsl:choose> <xsl:when test="contains($uppercase, $first-char)"> <xsl:value-of select="concat(translate($first-char, $uppercase, $lowercase), substring($in, 2))"/> </xsl:when> <xsl:otherwise> <xsl:value-of select="concat(translate($first-char, $lowercase, $uppercase), substring($in, 2))"/> </xsl:otherwise> </xsl:choose> </xsl:variable> <func:result select="$result"/> </func:function> <func:function name="erl:lower-case"> <xsl:param name="str"/> <xsl:variable name="uppercase" select="'ABCDEFGHIJKLMNOPQRSTUVWXYZ'"/> <xsl:variable name="lowercase" select="'abcdefghijklmnopqrstuvwxyz'"/> <xsl:variable name="result"> <xsl:value-of select="translate($str, $uppercase, $lowercase)"/> </xsl:variable> <func:result select="$result"/> </func:function> <func:function name="erl:to-link"> <xsl:param name="text"/> <func:result select="translate(erl:lower-case($text),'?: /()&quot;&#10;','--------')"/> </func:function> <!-- Used from template menu.funcs to sort a module's functions for the lefthand index list, from the module's .xml file. Returns a value on which to sort the entity in question (a <name> element). Some functions are listed with the name as an attribute, as in string.xml: <name name="join" arity="2"/> Others use the element value for the name, as in gen_server.xml: <name>start_link(Module, Args, Options) -> Result</name> Additionally, callbacks may be included, as in gen_server.xml: <name>Module:handle_call(Request, From, State) -> Result</name> For C reference pages the name tag has a substructure where the nametext tag is used in the sort, as in erl_nif.xml <name><ret>void *</ret><nametext>enif_alloc(size_t size)</nametext></name> So first, get the name from either the attribute or the element value. Then, reverse the case of the first character. This is because xsltproc, used for processing, orders uppercase before lowercase (even when the 'case-order="lower-first"' option is given). But we want the Module callback functions listed after a module's regular functions, as they are now. This doesn't affect the actual value used in the output, but just the value used as a sort key. To then ensure that uppercase is indeed sorted before lower, as we now want it to be, the 'case-order="upper-first"' option is used. This processing only affect the lefthand index list- the body of the doc page is not affected. --> <func:function name="erl:get_sort_field"> <xsl:param name="elem"/> <xsl:variable name="base"> <xsl:choose> <xsl:when test="ancestor::cref"> <xsl:value-of select="$elem/nametext"/> </xsl:when> <xsl:otherwise> <xsl:choose> <xsl:when test="string-length($elem/@name) > 0"> <xsl:value-of select="$elem/@name"/> </xsl:when> <xsl:otherwise> <xsl:value-of select="substring-before($elem, '(')"/> </xsl:otherwise> </xsl:choose> </xsl:otherwise> </xsl:choose> </xsl:variable> <func:result select="erl:flip_first_char($base)"/> </func:function> <!-- Start of Dialyzer type/spec tags. See also the templates matching "name" and "seealso" as well as the template "menu.funcs" --> <xsl:param name="specs_file" select="''"/> <xsl:variable name="i" select="document($specs_file)"></xsl:variable> <xsl:param name="mod2app_file" select="''"/> <xsl:variable name="m2a" select="document($mod2app_file)"></xsl:variable> <xsl:key name="mod2app" match="module" use="@name"/> <xsl:key name="mfa" match="func/name[string-length(@arity) > 0 and ancestor::erlref]" use="concat(ancestor::erlref/module,':',@name, '/', @arity)"/> <xsl:template name="err"> <xsl:param name="f"/> <xsl:param name="m"/> <xsl:param name="n"/> <xsl:param name="a"/> <xsl:param name="s"/> <xsl:message terminate="yes"> Error <xsl:if test="$f != ''">in <xsl:value-of select ="$f"/>:</xsl:if> <xsl:if test="$m != ''"><xsl:value-of select ="$m"/>:</xsl:if> <xsl:value-of select="$n"/> <xsl:if test="$a != ''">/<xsl:value-of select ="$a"/></xsl:if>: <xsl:value-of select="$s"/> </xsl:message> </xsl:template> <xsl:template name="find_spec"> <xsl:variable name="curModule" select="ancestor::erlref/module"/> <xsl:variable name="mod" select="@mod"/> <xsl:variable name="name" select="@name"/> <xsl:variable name="arity" select="@arity"/> <xsl:variable name="clause_i" select="@clause_i"/> <xsl:variable name="spec0" select= "$i/specs/module[@name=$curModule]/spec [name=$name and arity=$arity and (string-length($mod) = 0 or module = $mod)]"/> <xsl:variable name="spec" select="$spec0[string-length($clause_i) = 0 or position() = $clause_i]"/> <xsl:if test="count($spec) != 1"> <xsl:variable name="why"> <xsl:choose> <xsl:when test="count($spec) > 1">ambiguous spec</xsl:when> <xsl:when test="count($spec) = 0">unknown spec</xsl:when> </xsl:choose> </xsl:variable> <xsl:call-template name="err"> <xsl:with-param name="f" select="$curModule"/> <xsl:with-param name="m" select="$mod"/> <xsl:with-param name="n" select="$name"/> <xsl:with-param name="a" select="$arity"/> <xsl:with-param name="s" select="$why"/> </xsl:call-template> </xsl:if> <xsl:copy-of select="$spec"/> </xsl:template> <xsl:template name="spec_name"> <xsl:variable name="name" select="@name"/> <xsl:variable name="arity" select="@arity"/> <xsl:variable name="anchor" select="@anchor"/> <xsl:variable name="since" select="@since"/> <xsl:variable name="spec0"> <xsl:call-template name="find_spec"/> </xsl:variable> <xsl:variable name="spec" select="exsl:node-set($spec0)/spec"/> <xsl:choose> <xsl:when test="ancestor::cref"> <xsl:message terminate="yes"> Error: did not expect a 'name' tag with name/arity attributes here! </xsl:message> </xsl:when> <xsl:when test="ancestor::erlref"> <!-- Do not to use preceding since it is very slow! --> <xsl:variable name="curModule" select="ancestor::erlref/module"/> <xsl:variable name="mfas" select="key('mfa', concat($curModule,':',$name,'/',$arity))"/> <xsl:choose> <xsl:when test="generate-id($mfas[1]) != generate-id(.)"> <!-- Avoid duplicated anchors. See also menu.funcs. --> </xsl:when> <xsl:otherwise> <a name="{$name}-{$arity}"></a> </xsl:otherwise> </xsl:choose> <!-- Insert an anchor for "anchor" attribute --> <xsl:if test="string-length($anchor) > 0"> <a name="{$anchor}"></a> </xsl:if> <xsl:variable name="global_types" select="ancestor::erlref/datatypes"/> <xsl:variable name="local_types" select="../type[string-length(@name) > 0]"/> <xsl:apply-templates select="$spec/contract/clause/head"> <xsl:with-param name="ghlink" select="ancestor-or-self::*[@ghlink]/@ghlink"/> <xsl:with-param name="local_types" select="$local_types"/> <xsl:with-param name="global_types" select="$global_types"/> <xsl:with-param name="since" select="$since"/> </xsl:apply-templates> </xsl:when> </xsl:choose> </xsl:template> <xsl:template match="head"> <xsl:param name="ghlink"/> <xsl:param name="local_types"/> <xsl:param name="global_types"/> <xsl:param name="since"/> <xsl:variable name="id" select="concat(concat(concat(concat(../../../name,'-'),../../../arity),'-'),generate-id(.))"/> <table class="func-table"> <tr class="func-tr"> <td class="func-td"> <div class="bold_code func-head" onMouseOver="document.getElementById('ghlink-{$id}').style.visibility = 'visible';" onMouseOut="document.getElementById('ghlink-{$id}').style.visibility = 'hidden';"> <xsl:call-template name="ghlink"> <xsl:with-param name="ghlink" select="$ghlink"/> <xsl:with-param name="id" select="$id"/> </xsl:call-template> <xsl:apply-templates mode="local_type"> <xsl:with-param name="local_types" select="$local_types"/> <xsl:with-param name="global_types" select="$global_types"/> </xsl:apply-templates> </div> </td> <td class="func-since-td"> <xsl:if test="string-length($since) > 0"> <span class="since"><xsl:value-of select="$since"/> </span> </xsl:if> </td> </tr> </table> </xsl:template> <!-- The *last* <name name="..." arity=".."/> --> <xsl:template match="name" mode="types"> <xsl:variable name="name" select="@name"/> <xsl:variable name="arity" select="@arity"/> <xsl:variable name="spec0"> <xsl:call-template name="find_spec"/> </xsl:variable> <xsl:variable name="spec" select="exsl:node-set($spec0)/spec"/> <xsl:variable name="clause" select="$spec/contract/clause"/> <xsl:variable name="global_types" select="ancestor::erlref/datatypes"/> <xsl:variable name="type_desc" select="../type_desc"/> <!-- $type is data types to be presented as guards ("local types") --> <xsl:variable name="type" select="../type[string-length(@name) > 0 or string-length(@variable) > 0]"/> <xsl:variable name="type_variables" select ="$type[string-length(@variable) > 0]"/> <xsl:variable name="local_types" select ="$type[string-length(@name) > 0]"/> <xsl:variable name="output_subtypes" select="count($type_variables) = 0"/> <!-- It is assumed there is no support for overloaded specs (there is no spec with more than one clause) --> <xsl:if test="count($clause/guard) > 0 or count($type) > 0"> <div class="REFBODY fun-types"> <h3 class="func-types-title">Types</h3> <xsl:choose> <xsl:when test="$output_subtypes"> <xsl:call-template name="subtype"> <xsl:with-param name="subtype" select="$clause/guard/subtype"/> <xsl:with-param name="type_desc" select="$type_desc"/> <xsl:with-param name="local_types" select="$local_types"/> <xsl:with-param name="global_types" select="$global_types"/> </xsl:call-template> </xsl:when> <xsl:otherwise> <xsl:call-template name="type_variables"> <xsl:with-param name="type_variables" select="$type_variables"/> <xsl:with-param name="type_desc" select="$type_desc"/> <xsl:with-param name="local_types" select="$local_types"/> <xsl:with-param name="global_types" select="$global_types"/> <xsl:with-param name="fname" select="$name"/> <xsl:with-param name="arity" select="$arity"/> </xsl:call-template> </xsl:otherwise> </xsl:choose> <xsl:call-template name="local_type"> <xsl:with-param name="type_desc" select="$type_desc"/> <xsl:with-param name="local_types" select="$local_types"/> <xsl:with-param name="global_types" select="$global_types"/> </xsl:call-template> </div> </xsl:if> </xsl:template> <!-- Handle <type variable="..." name_i="..."/> --> <xsl:template name="type_variables"> <xsl:param name="type_variables"/> <xsl:param name="type_desc"/> <xsl:param name="local_types"/> <xsl:param name="global_types"/> <xsl:param name="fname"/> <xsl:param name="arity"/> <xsl:variable name="names" select="../name[string-length(@arity) > 0]"/> <xsl:for-each select="$type_variables"> <xsl:variable name="name_i"> <xsl:choose> <xsl:when test="string-length(@name_i) > 0"> <xsl:value-of select="@name_i"/> </xsl:when> <xsl:otherwise> <xsl:value-of select="count($names)"/> </xsl:otherwise> </xsl:choose> </xsl:variable> <xsl:variable name="spec0"> <xsl:for-each select="$names[position() = $name_i]"> <xsl:call-template name="find_spec"/> </xsl:for-each> </xsl:variable> <xsl:variable name="spec" select="exsl:node-set($spec0)/spec"/> <xsl:variable name="clause" select="$spec/contract/clause"/> <xsl:variable name="variable" select="@variable"/> <xsl:variable name="subtype" select="$clause/guard/subtype[typename = $variable]"/> <xsl:if test="count($subtype) = 0"> <xsl:call-template name="err"> <xsl:with-param name="f" select="ancestor::erlref/module"/> <xsl:with-param name="n" select="$fname"/> <xsl:with-param name="a" select="$arity"/> <xsl:with-param name="s">unknown type variable <xsl:value-of select="$variable"/> </xsl:with-param> </xsl:call-template> </xsl:if> <xsl:call-template name="subtype"> <xsl:with-param name="subtype" select="$subtype"/> <xsl:with-param name="type_desc" select="$type_desc"/> <xsl:with-param name="local_types" select="$local_types"/> <xsl:with-param name="global_types" select="$global_types"/> </xsl:call-template> </xsl:for-each> </xsl:template> <xsl:template name="subtype"> <xsl:param name="subtype"/> <xsl:param name="type_desc"/> <xsl:param name="local_types"/> <xsl:param name="global_types"/> <xsl:for-each select="$subtype"> <xsl:variable name="tname" select="typename"/> <div class="REFTYPES rt-1"> <span class="bold_code bc-2"> <xsl:apply-templates select="string" mode="local_type"> <xsl:with-param name="local_types" select="$local_types"/> <xsl:with-param name="global_types" select="$global_types"/> </xsl:apply-templates> </span> </div> <xsl:apply-templates select="$type_desc[@variable = $tname]"/> </xsl:for-each> </xsl:template> <xsl:template name="local_type"> <xsl:param name="type_desc"/> <xsl:param name="local_types"/> <xsl:param name="global_types"/> <xsl:for-each select="$local_types"> <div class="REFTYPES rt-2"> <xsl:call-template name="type_name"> <xsl:with-param name="mode" select="'local_type'"/> <xsl:with-param name="local_types" select="$local_types"/> <xsl:with-param name="global_types" select="$global_types"/> </xsl:call-template> </div> <xsl:variable name="tname" select="@name"/> <xsl:variable name="tnvars" select="@n_vars"/> <xsl:apply-templates select= "$type_desc[@name = $tname and (@n_vars = $tnvars or string-length(@n_vars) = 0 and string-length($tnvars) = 0)]"/> </xsl:for-each> </xsl:template> <!-- Note: <type_desc> has not been implemented for data types. --> <!-- Similar to <d> --> <xsl:template match="type_desc"> <div class="REFBODY rb-1"> <xsl:apply-templates/> </div> </xsl:template> <!-- This is for debugging. All modules! --> <xsl:template match="all_etypes"> <xsl:for-each select= "$i//type"> <pre> <span class="bold_code bc-3"> <xsl:apply-templates select="typedecl"/> </span><xsl:text> </xsl:text> </pre> </xsl:for-each> </xsl:template> <!-- Datatypes --> <xsl:template match="datatypes"> <div class="innertube"> <xsl:call-template name="h3_title_link"> <xsl:with-param name="title">Data Types</xsl:with-param> </xsl:call-template> <xsl:apply-templates/> </div> </xsl:template> <!-- Datatype Title, is the really needed? not used by anything --> <xsl:template match="datatype_title"> <xsl:variable name="title" select="."/> <h4> <xsl:call-template name="title_link"> <xsl:with-param name="title"><xsl:apply-templates/></xsl:with-param> <xsl:with-param name="link" select="$title"/> </xsl:call-template> </h4> </xsl:template> <!-- Datatype --> <xsl:template match="datatype"> <div class="data-types-body"> <xsl:choose> <xsl:when test="string-length(name/@name) > 0"> <xsl:variable name="id" select="concat('type-',name/@name)"/> <div class="data-type-name" onMouseOver="document.getElementById('ghlink-{$id}').style.visibility = 'visible';" onMouseOut="document.getElementById('ghlink-{$id}').style.visibility = 'hidden';"> <xsl:call-template name="ghlink"> <xsl:with-param name="id" select="$id"/> </xsl:call-template> <xsl:apply-templates select="name"/> </div> </xsl:when> <xsl:otherwise> <div class="data-type-name"> <xsl:apply-templates select="name"/> </div> </xsl:otherwise> </xsl:choose> <div class="data-type-desc"><xsl:apply-templates select="desc"/></div> </div> </xsl:template> <!-- The "mode" attribute of apply has been used to separate the case when datatypes are copied into specifications' subtypes. A local type has no anchor. There are no links to local types from local types or guards/head of the same specification. --> <xsl:template name="type_name"> <xsl:param name="mode"/> <!-- '' if <datatype> --> <xsl:param name="local_types" select="/.."/> <xsl:param name="global_types" select="/.."/> <xsl:variable name="curModule" select="ancestor::erlref/module"/> <xsl:variable name="mod" select="@mod"/> <xsl:variable name="name" select="@name"/> <xsl:variable name="n_vars" select="@n_vars"/> <xsl:choose> <xsl:when test="string-length($name) > 0"> <xsl:variable name="type" select= "$i/specs/module[@name=$curModule]/type [name=$name and (string-length($n_vars) = 0 or n_vars = $n_vars) and (string-length($mod) = 0 or module = $mod)]"/> <xsl:if test="count($type) != 1"> <xsl:variable name="why"> <xsl:choose> <xsl:when test="count($type) > 1">ambiguous type</xsl:when> <xsl:when test="count($type) = 0">unknown type</xsl:when> </xsl:choose> </xsl:variable> <xsl:call-template name="err"> <xsl:with-param name="f" select="$curModule"/> <xsl:with-param name="m" select="$mod"/> <xsl:with-param name="n" select="$name"/> <xsl:with-param name="a" select="$n_vars"/> <xsl:with-param name="s" select="$why"/> </xsl:call-template> </xsl:if> <xsl:choose> <xsl:when test="$mode = ''"> <xsl:apply-templates select="$type/typedecl"/> </xsl:when> <xsl:when test="$mode = 'local_type'"> <xsl:apply-templates select="$type/typedecl" mode="local_type"> <xsl:with-param name="local_types" select="$local_types"/> <xsl:with-param name="global_types" select="$global_types"/> </xsl:apply-templates> </xsl:when> </xsl:choose> </xsl:when> <xsl:otherwise> <!-- <datatype> with <name> --> <xsl:call-template name="name"/> </xsl:otherwise> </xsl:choose> </xsl:template> <xsl:template match="typehead"> <span class="bold_code bc-4"> <xsl:apply-templates/> </span><br/> </xsl:template> <xsl:template match="typehead" mode="local_type"> <xsl:param name="local_types"/> <xsl:param name="global_types"/> <span class="bold_code bc-5"> <xsl:apply-templates mode="local_type"> <xsl:with-param name="local_types" select="$local_types"/> <xsl:with-param name="global_types" select="$global_types"/> </xsl:apply-templates> </span><br/> </xsl:template> <!-- Not used right now --> <!-- local_defs --> <xsl:template match="local_defs"> <div class="REFBODY rb-2"> <xsl:apply-templates> </xsl:apply-templates> </div> </xsl:template> <!-- Not used right now --> <xsl:template match="local_def"> <div class="REFTYPES rt-3"> <span class="bold_code bc-6"> <xsl:apply-templates/> </span> </div> </xsl:template> <!-- Used both in <datatype> and in <func>! --> <xsl:template match="anno"> <xsl:variable name="curModule" select="ancestor::erlref/module"/> <xsl:variable name="anno" select="normalize-space(text())"/> <xsl:variable name="namespec" select="ancestor::type_desc/preceding-sibling::name | ancestor::desc/preceding-sibling::name"/> <xsl:if test="count($namespec) = 0 and string-length($specs_file) > 0"> <xsl:call-template name="err"> <xsl:with-param name="f" select="$curModule"/> <xsl:with-param name="s">cannot find tag 'name' (anno <xsl:value-of select="$anno"/>) </xsl:with-param> </xsl:call-template> </xsl:if> <!-- Search "local types" as well --> <xsl:variable name="local_types" select="ancestor::desc/preceding-sibling::type [string-length(@name) > 0] | ancestor::type_desc/preceding-sibling::type [string-length(@name) > 0]"/> <xsl:variable name="has_anno_in_local_type"> <xsl:for-each select="$local_types"> <xsl:call-template name="anno_name"> <xsl:with-param name="curModule" select="$curModule"/> <xsl:with-param name="anno" select="$anno"/> </xsl:call-template> </xsl:for-each> </xsl:variable> <xsl:variable name="has_anno"> <xsl:for-each select="$namespec"> <xsl:call-template name="anno_name"> <xsl:with-param name="curModule" select="$curModule"/> <xsl:with-param name="anno" select="$anno"/> </xsl:call-template> </xsl:for-each> </xsl:variable> <xsl:if test="$has_anno = '' and $has_anno_in_local_type = ''"> <xsl:call-template name="err"> <xsl:with-param name="f" select="$curModule"/> <xsl:with-param name="m" select="$namespec/@mod"/> <xsl:with-param name="n" select="$namespec/@name"/> <xsl:with-param name="a" select="'-'"/> <xsl:with-param name="s">unknown annotation <xsl:value-of select="$anno"/> </xsl:with-param> </xsl:call-template> </xsl:if> <xsl:value-of select="$anno"/> </xsl:template> <xsl:template name="anno_name"> <xsl:param name="curModule"/> <xsl:param name="anno"/> <xsl:variable name="mod" select="@mod"/> <xsl:variable name="name" select="@name"/> <xsl:variable name="arity" select="@arity"/> <xsl:variable name="n_vars" select="@n_vars"/> <xsl:variable name="clause_i" select="@clause_i"/> <xsl:variable name="spec0" select= "$i/specs/module[@name=$curModule]/spec [name=$name and arity=$arity and (string-length($mod) = 0 or module = $mod)]"/> <xsl:variable name="spec_annos" select= "$spec0[string-length($clause_i) = 0 or position() = $clause_i]/anno[.=$anno]"/> <xsl:variable name="type_annos" select= "$i/specs/module[@name=$curModule]/type [name=$name and (string-length($n_vars) = 0 or n_vars=$n_vars) and (string-length($mod) = 0 or module = $mod)]/anno[.=$anno]"/> <xsl:if test="count($spec_annos) != 0 or count($type_annos) != 0 or string-length($specs_file) = 0"> <xsl:value-of select="true()"/> </xsl:if> </xsl:template> <!-- Used for indentation of formatted types and specs --> <xsl:template match="nbsp"> <xsl:text>&#160;</xsl:text> </xsl:template> <xsl:template match="nbsp" mode="local_type"> <xsl:apply-templates select="."/> </xsl:template> <xsl:template match="br" mode="local_type"> <xsl:apply-templates select="."/> </xsl:template> <xsl:template match="marker" mode="local_type"> <xsl:param name="local_types"/> <xsl:param name="global_types"/> <!-- Craete no anchor --> <!-- It would be possible to create a link to the global type (if there is one), but that would mean even more code... --> <xsl:apply-templates/> </xsl:template> <!-- Does not look at @n_vars --> <xsl:template match="node()[starts-with(name(), 'see')]" mode="local_type"> <xsl:param name="local_types"/> <xsl:param name="global_types"/> <xsl:variable name="filepart"><xsl:value-of select="substring-before(@marker, '#')"/></xsl:variable> <xsl:variable name="linkpart"><xsl:value-of select="translate(substring-after(@marker, '#'), '/', '-')"/></xsl:variable> <xsl:choose> <xsl:when test="string-length($filepart) > 0"> <xsl:call-template name="seealso"/> </xsl:when> <xsl:when test="count($local_types[@name = $linkpart]) = 0"> <xsl:call-template name="seealso"/> </xsl:when> <xsl:when test="count($global_types/datatype/name[@name = $linkpart]) > 0"> <!-- The type is both local and global; link to the global type --> <xsl:call-template name="seealso"/> </xsl:when> <xsl:otherwise> <!-- No link to local type --> <xsl:apply-templates/> </xsl:otherwise> </xsl:choose> </xsl:template> <!-- End of Dialyzer type/spec tags --> <!-- Cache for each module all the elements used for navigation. --> <xsl:variable name="erlref.nav" select="exsl:node-set($erlref.nav_rtf)"/> <xsl:variable name="erlref.nav_rtf"> <xsl:for-each select="//erlref"> <xsl:variable name="cval" select="module"/> <xsl:variable name="link_cval"><xsl:value-of select="translate($cval, '&#173;', '')"/></xsl:variable> <module name="{$cval}"> <xsl:call-template name="menu.funcs"> <xsl:with-param name="entries" select="funcs/func/name"/> <xsl:with-param name="cval" select="$cval"/> <xsl:with-param name="basename" select="$link_cval"/> </xsl:call-template> </module> </xsl:for-each> </xsl:variable> <!-- Page layout --> <xsl:template name="pagelayout"> <xsl:param name="chapnum"/> <xsl:param name="curModule"/> <html> <head> <xsl:choose> <xsl:when test="string-length($stylesheet) > 0"> <link rel="stylesheet" href="{$topdocdir}/{$stylesheet}" type="text/css"/> </xsl:when> <xsl:otherwise> <link rel="stylesheet" href="{$topdocdir}/otp_doc.css" type="text/css"/> </xsl:otherwise> </xsl:choose> <xsl:choose> <xsl:when test="string-length($winprefix) > 0"> <title><xsl:value-of select="$winprefix"/> -- <xsl:value-of select="header/title"/></title> </xsl:when> <xsl:otherwise> <title>Erlang -- <xsl:value-of select="header/title"/></title> </xsl:otherwise> </xsl:choose> </head> <body> <div id="container"> <script id="js" type="text/javascript" language="JavaScript" src="{$topdocdir}/js/flipmenu/flipmenu.js"/> <script id="js2" type="text/javascript" src="{$topdocdir}/js/erlresolvelinks.js"></script> <script language="JavaScript" type="text/javascript"> <xsl:text disable-output-escaping="yes"><![CDATA[ <!-- function getWinHeight() { var myHeight = 0; if( typeof( window.innerHeight ) == 'number' ) { //Non-IE myHeight = window.innerHeight; } else if( document.documentElement && ( document.documentElement.clientWidth || document.documentElement.clientHeight ) ) { //IE 6+ in 'standards compliant mode' myHeight = document.documentElement.clientHeight; } else if( document.body && ( document.body.clientWidth || document.body.clientHeight ) ) { //IE 4 compatible myHeight = document.body.clientHeight; } return myHeight; } function setscrollpos() { var objf=document.getElementById('loadscrollpos'); document.getElementById("leftnav").scrollTop = objf.offsetTop - getWinHeight()/2; } function addEvent(obj, evType, fn){ if (obj.addEventListener){ obj.addEventListener(evType, fn, true); return true; } else if (obj.attachEvent){ var r = obj.attachEvent("on"+evType, fn); return r; } else { return false; } } addEvent(window, 'load', setscrollpos); //-->]]></xsl:text> </script> <!-- Generate menu --> <xsl:call-template name="menu"> <xsl:with-param name="chapnum" select="$chapnum"/> <xsl:with-param name="curModule" select="$curModule"/> </xsl:call-template> <div id="content"> <!-- Insert the node-specific content --> <xsl:call-template name="content"> <xsl:with-param name="chapnum" select="$chapnum"/> </xsl:call-template> <div class="footer"> <hr/> <p> <xsl:value-of select="$copyright"/> <xsl:value-of select="/book/header/copyright/year[1]"/> <xsl:text>-</xsl:text> <xsl:value-of select="substring-after(normalize-space(substring-after($gendate, ' ')), ' ')"/> <xsl:text> </xsl:text> <xsl:value-of select="/book/header/copyright/holder"/> </p> </div> </div> </div> <script type="text/javascript"><xsl:text>window.__otpTopDocDir = '</xsl:text><xsl:value-of select="$topdocdir"/><xsl:text>/js/';</xsl:text></script> <script type="text/javascript" src="{$topdocdir}/js/highlight.js"/> </body> </html> </xsl:template> <!-- Content --> <xsl:template name="content"> <xsl:param name="chapnum"/> <xsl:variable name="lname"><xsl:value-of select="local-name()"/></xsl:variable> <div class="innertube"> <xsl:if test="$lname = 'releasenotes'"> <!-- .../part --> <xsl:call-template name="releasenotes.content" /> </xsl:if> <xsl:if test="$lname = 'part'"> <!-- .../part --> <xsl:call-template name="part.content" /> </xsl:if> <xsl:if test="$lname = 'internal'"> <!-- .../internals --> <xsl:call-template name="internal.content" /> </xsl:if> <xsl:if test="$lname = 'chapter'"> <!-- .../part/chapter --> <xsl:call-template name="chapter.content"> <xsl:with-param name="chapnum" select="$chapnum"/> </xsl:call-template> </xsl:if> <xsl:if test="$lname = 'application'"> <!-- .../application --> <xsl:call-template name="app.content" /> </xsl:if> </div> <xsl:if test="$lname = 'erlref' or $lname = 'cref' or $lname= 'comref' or $lname= 'fileref' or $lname= 'appref'"> <!-- .../application/*ref --> <xsl:comment> refpage </xsl:comment> <xsl:call-template name="ref.content" /> </xsl:if> </xsl:template> <!-- Menu --> <xsl:template name="menu"> <xsl:param name="chapnum"/> <xsl:param name="curModule"/> <xsl:if test="(local-name() = 'part') or ((local-name() = 'chapter') and ancestor::part)"> <!-- .../part or .../part/chapter --> <xsl:call-template name="menu.ug"> <xsl:with-param name="chapnum" select="$chapnum"/> </xsl:call-template> </xsl:if> <xsl:if test="(local-name() = 'internal' and descendant::chapter) or ((local-name() = 'chapter') and ancestor::internal)"> <!-- .../internal or .../internal/chapter --> <xsl:call-template name="menu.internal.ug"> <xsl:with-param name="chapnum" select="$chapnum"/> </xsl:call-template> </xsl:if> <xsl:if test="(local-name() = 'internal' and descendant::erlref) or (((local-name() = 'erlref') or (local-name() = 'comref') or (local-name() = 'cref') or (local-name() = 'fileref') or (local-name() = 'appref')) and ancestor::internal)"> <!-- .../internal,.../internal/erlref, .../internal/comref or .../internal/cref or .../internal/fileref or .../internal/appref --> <xsl:call-template name="menu.internal.ref"> <xsl:with-param name="curModule" select="$curModule"/> </xsl:call-template> </xsl:if> <xsl:if test="(local-name() = 'application') or (((local-name() = 'erlref') or (local-name() = 'comref') or (local-name() = 'cref') or (local-name() = 'fileref') or (local-name() = 'appref')) and ancestor::application)"> <!-- .../application,.../application/erlref, .../application/comref or .../application/cref or .../application/fileref or .../application/appref --> <xsl:call-template name="menu.ref"> <xsl:with-param name="curModule" select="$curModule"/> </xsl:call-template> </xsl:if> <xsl:if test="(local-name() = 'releasenotes') or ((local-name() = 'chapter') and ancestor::releasenotes)"> <!-- releasenotes --> <xsl:call-template name="menu.rn"> <xsl:with-param name="chapnum" select="$chapnum"/> </xsl:call-template> </xsl:if> </xsl:template> <xsl:template name="erlang_logo"> <xsl:choose> <xsl:when test="string-length($logo) > 0"> <div class="erlang-logo-wrapper"> <a href="{$topdocdir}/index.html"><img alt="Erlang Logo" src="{$topdocdir}/{$logo}" class="erlang-logo"/></a> </div> </xsl:when> <xsl:otherwise> <div class="erlang-logo-wrapper"> <a href="{$topdocdir}/index.html"><img alt="Erlang Logo" src="{$topdocdir}/erlang-logo.png" class="erlang-logo"/></a> </div> </xsl:otherwise> </xsl:choose> </xsl:template> <xsl:template name="menu_top"> <ul class="panel-sections"> <xsl:if test="boolean(/book/parts/part)"> <li><a href="users_guide.html">User's Guide</a></li> </xsl:if> <xsl:if test="boolean(/book/applications)"> <li><a href="index.html">Reference Manual</a></li> </xsl:if> <xsl:if test="boolean(/book/internals)"> <li><a href="internal_docs.html">Internal Documentation</a></li> </xsl:if> <xsl:if test="boolean(/book/releasenotes)"> <li><a href="release_notes.html">Release Notes</a></li> </xsl:if> <xsl:choose> <xsl:when test="string-length($pdfname) > 0"> <li><a href="{$pdfdir}/{$pdfname}.pdf">PDF</a></li> </xsl:when> <xsl:otherwise> <li><a href="{$pdfdir}/{$appname}-{$appver}.pdf">PDF</a></li> </xsl:otherwise> </xsl:choose> <li><a href="{$topdocdir}/index.html">Top</a></li> </ul> </xsl:template> <xsl:template name="menu_middle"> <!-- small> <xsl:choose> <xsl:when test="ancestor::parts"> <a href="users_guide_bibliography.html">Bibliography</a><br/> <a href="users_guide_glossary.html">Glossary</a><br/> </xsl:when> <xsl:when test="ancestor::applications"> <a href="ref_man_bibliography.html">Bibliography</a><br/> <a href="ref_man_glossary.html">Glossary</a><br/> </xsl:when> </xsl:choose> </small --> <ul class="expand-collapse-items"> <li><a href="javascript:openAllFlips()">Expand All</a></li> <li><a href="javascript:closeAllFlips()">Contract All</a></li> </ul> </xsl:template> <!-- Book --> <xsl:template match="/book"> <xsl:apply-templates select="parts"/> <xsl:apply-templates select="applications"/> <xsl:apply-templates select="internals"/> <xsl:apply-templates select="releasenotes"/> </xsl:template> <!-- Parts --> <xsl:template match="parts"> <xsl:apply-templates select="part"/> </xsl:template> <!-- Applications --> <xsl:template match="applications"> <xsl:apply-templates select="application"/> </xsl:template> <!-- Internals --> <xsl:template match="internals"> <xsl:apply-templates select="internal"/> </xsl:template> <!-- Header --> <xsl:template match="header"/> <!-- Section/Title --> <xsl:template match="section/title|fsdescription/title"/> <xsl:template match="pagetext"/> <!-- Chapter/Section, subsection level 1--> <xsl:template match="chapter/section"> <xsl:param name="chapnum"/> <h3> <xsl:for-each select="marker"> <xsl:call-template name="marker-before-title"/> </xsl:for-each> <xsl:call-template name="title_link"> <xsl:with-param name="title"> <xsl:value-of select="$chapnum"/>.<xsl:number/>&#160; <xsl:value-of select="title"/> </xsl:with-param> </xsl:call-template> </h3> <xsl:apply-templates> <xsl:with-param name="chapnum" select="$chapnum"/> <xsl:with-param name="sectnum"><xsl:number/></xsl:with-param> </xsl:apply-templates> </xsl:template> <!-- Subsections lvl 2 --> <xsl:template match="section/section"> <xsl:param name="chapnum"/> <xsl:param name="sectnum"/> <h4> <xsl:for-each select="marker"> <xsl:call-template name="marker-before-title"/> </xsl:for-each> <!-- xsl:value-of select="$partnum"/>.<xsl:value-of select="$chapnum"/>.<xsl:value-of select="$sectnum"/>.<xsl:number/ --> <xsl:call-template name="title_link"> <xsl:with-param name="title" select="title"/> </xsl:call-template> </h4> <xsl:apply-templates> <xsl:with-param name="chapnum" select="$chapnum"/> </xsl:apply-templates> </xsl:template> <!-- Subsections lvl 3 and ... --> <xsl:template match="section/section/section"> <xsl:param name="chapnum"/> <xsl:param name="sectnum"/> <h5> <xsl:for-each select="marker"> <xsl:call-template name="marker-before-title"/> </xsl:for-each> <!-- xsl:value-of select="$partnum"/>.<xsl:value-of select="$chapnum"/>.<xsl:value-of select="$sectnum"/>.<xsl:number/ --> <xsl:value-of select="title"/> </h5> <xsl:apply-templates> <xsl:with-param name="chapnum" select="$chapnum"/> </xsl:apply-templates> </xsl:template> <!-- *ref/Section --> <xsl:template match="erlref/section|cref/section|comref/section|fileref/section|appref/section|funcs/fsdescription"> <xsl:param name="chapnum"/> <div class="innertube"> <h3> <xsl:for-each select="marker"> <xsl:call-template name="marker-before-title"/> </xsl:for-each> <xsl:call-template name="title_link"> <xsl:with-param name="title" select="title"/> </xsl:call-template> </h3> <div class="REFBODY rb-3"> <xsl:apply-templates> <xsl:with-param name="chapnum" select="$chapnum"/> </xsl:apply-templates> </div> </div> </xsl:template> <!-- *ref/Subsection --> <xsl:template match="erlref/section/section|cref/section/section|comref/section/section|fileref/section/section|appref/section/section"> <xsl:param name="chapnum"/> <xsl:param name="sectnum"/> <h4> <xsl:value-of select="title"/> </h4> <div class="REFBODY rb-4"> <xsl:apply-templates> <xsl:with-param name="chapnum" select="$chapnum"/> </xsl:apply-templates> </div> </xsl:template> <!-- Lists --> <xsl:template match="list"> <xsl:param name="chapnum"/> <ul> <xsl:apply-templates> <xsl:with-param name="chapnum" select="$chapnum"/> </xsl:apply-templates> </ul> </xsl:template> <xsl:template match="list/item"> <xsl:param name="chapnum"/> <li> <xsl:apply-templates> <xsl:with-param name="chapnum" select="$chapnum"/> </xsl:apply-templates> </li> </xsl:template> <xsl:template match="taglist"> <xsl:param name="chapnum"/> <dl> <xsl:apply-templates> <xsl:with-param name="chapnum" select="$chapnum"/> </xsl:apply-templates> </dl> </xsl:template> <xsl:template match="taglist/tag"> <xsl:param name="chapnum"/> <dt> <strong> <xsl:apply-templates/> </strong> </dt> </xsl:template> <xsl:template match="taglist/item"> <xsl:param name="chapnum"/> <dd> <xsl:apply-templates> <xsl:with-param name="chapnum" select="$chapnum"/> </xsl:apply-templates> </dd> </xsl:template> <!-- Note --> <xsl:template match="note"> <xsl:param name="chapnum"/> <div class="note"> <div class="label">Note</div> <div class="content"> <p> <xsl:apply-templates> <xsl:with-param name="chapnum" select="$chapnum"/> </xsl:apply-templates> </p> </div> </div> </xsl:template> <!-- Warning --> <xsl:template match="warning"> <xsl:param name="chapnum"/> <div class="warning"> <div class="label">Warning</div> <div class="content"> <p> <xsl:apply-templates> <xsl:with-param name="chapnum" select="$chapnum"/> </xsl:apply-templates> </p> </div> </div> </xsl:template> <!-- Do --> <xsl:template match="do"> <xsl:param name="chapnum"/> <div class="do"> <div class="label">Do</div> <div class="content"> <p> <xsl:apply-templates> <xsl:with-param name="chapnum" select="$chapnum"/> </xsl:apply-templates> </p> </div> </div> </xsl:template> <!-- Dont --> <xsl:template match="dont"> <xsl:param name="chapnum"/> <div class="dont"> <div class="label">Don't</div> <div class="content"> <p> <xsl:apply-templates> <xsl:with-param name="chapnum" select="$chapnum"/> </xsl:apply-templates> </p> </div> </div> </xsl:template> <!-- Quote --> <xsl:template match="quote"> <xsl:param name="chapnum"/> <div class="quote"> <p> <xsl:apply-templates> <xsl:with-param name="chapnum" select="$chapnum"/> </xsl:apply-templates> </p> </div> </xsl:template> <!-- Paragraph --> <xsl:template match="p"> <p> <xsl:apply-templates/> </p> </xsl:template> <!-- Inline elements --> <xsl:template match="i"> <i><xsl:apply-templates/></i> </xsl:template> <xsl:template match="br"> <br/> </xsl:template> <xsl:template match="c"> <span class="code"><xsl:apply-templates/></span> </xsl:template> <xsl:template match="em"> <strong><xsl:apply-templates/></strong> </xsl:template> <xsl:template match="strong"> <strong><xsl:apply-templates/></strong> </xsl:template> <!-- Code --> <xsl:template match="code"> <xsl:param name="chapnum"/> <xsl:variable name="type" select="@type"/> <xsl:variable name="codenum"> <xsl:number level="any" from="chapter" count="code"/> </xsl:variable> <xsl:choose> <xsl:when test="not(descendant::anno)"> <div class="example example-{$type}"><pre><xsl:value-of select="erl:code_trim(text())"/></pre></div> </xsl:when> <xsl:otherwise> <div class="example example-{$type}"><pre><xsl:apply-templates/></pre></div> </xsl:otherwise> </xsl:choose> </xsl:template> <!-- Pre --> <xsl:template match="pre"> <xsl:param name="chapnum"/> <xsl:variable name="codenum"> <xsl:number level="any" from="chapter" count="code"/> </xsl:variable> <div class="example"><pre><xsl:apply-templates/></pre></div> </xsl:template> <!-- Table --> <xsl:template match="table"> <xsl:param name="chapnum"/> <xsl:variable name="tabnum"> <xsl:number level="any" from="chapter" count="table"/> </xsl:variable> <div class="doc-table-wrapper"> <table class="doc-table"> <!-- tbody--> <xsl:apply-templates select="row"> <xsl:with-param name="chapnum" select="$chapnum"/> <xsl:with-param name="tabnum" select="$tabnum"/> </xsl:apply-templates> <!-- /tbody--> </table> <xsl:apply-templates select="tcaption"> <xsl:with-param name="chapnum" select="$chapnum"/> <xsl:with-param name="tabnum" select="$tabnum"/> </xsl:apply-templates> </div> </xsl:template> <xsl:template match="row"> <tr> <xsl:apply-templates/> </tr> </xsl:template> <xsl:template match="cell"> <td align="left" valign="middle"> <xsl:apply-templates/> </td> </xsl:template> <xsl:template match="tcaption"> <xsl:param name="chapnum"/> <xsl:param name="tabnum"/> <p class="doc-table-caption">Table <xsl:value-of select="$chapnum"/>.<xsl:value-of select="$tabnum"/>: &#160; <xsl:apply-templates/> </p> </xsl:template> <!-- Image --> <xsl:template match="image"> <xsl:param name="chapnum"/> <xsl:variable name="fignum"> <xsl:number level="any" from="chapter" count="image"/> </xsl:variable> <div class="doc-image-wrapper"> <xsl:choose> <xsl:when test="substring(@file, (string-length(@file) - string-length('.svg')) + 1) = '.svg'"> <object alt="IMAGE MISSING" data="{@file}" class="doc-svg doc-image"> </object> </xsl:when> <xsl:when test="@width"> <img alt="IMAGE MISSING" width="{@width}" src="{@file}" class="doc-image"/> </xsl:when> <xsl:otherwise> <img alt="IMAGE MISSING" src="{@file}" class="doc-image"/> </xsl:otherwise> </xsl:choose> <xsl:apply-templates> <xsl:with-param name="chapnum" select="$chapnum"/> <xsl:with-param name="fignum" select="$fignum"/> </xsl:apply-templates> </div> </xsl:template> <xsl:template match="icaption"> <xsl:param name="chapnum"/> <xsl:param name="fignum"/> <p class="doc-image-caption">Figure <xsl:value-of select="$chapnum"/>.<xsl:value-of select="$fignum"/>: &#160; <xsl:apply-templates/> </p> </xsl:template> <!-- Internal Docs --> <!-- Part --> <xsl:template match="internal"> <xsl:document href="{$outdir}/internal_docs.html" method="html" encoding="UTF-8" indent="yes" doctype-public="-//W3C//DTD HTML 4.01 Transitional//EN"> <xsl:call-template name="pagelayout"/> </xsl:document> </xsl:template> <!-- Part content--> <xsl:template name="internal.content"> <div class="frontpage"/> <center><h1><xsl:value-of select="/book/header/title"/> Internal Docs</h1></center> <center><h4>Version <xsl:value-of select="$appver"/></h4></center> <center><h4><xsl:value-of select="$gendate"/></h4></center> <div class="extrafrontpageinfo"> <center><xsl:value-of select="$extra_front_page_info"/></center> </div> <xsl:apply-templates select="chapter|erlref"/> </xsl:template> <!-- Menu.internal.chapter --> <xsl:template name="menu.internal.ug"> <xsl:param name="chapnum"/> <div id="leftnav"> <div class="leftnav-tube"> <xsl:call-template name="erlang_logo"/> <p class="section-title"><xsl:value-of select="/book/header/title"/></p> <p class="section-subtitle">Internal Documentation</p> <p class="section-version">Version <xsl:value-of select="$appver"/></p> <xsl:call-template name="menu_top"/> <xsl:call-template name="menu_middle"/> <h3>Chapters</h3> <ul class="flipMenu" imagepath="{$topdocdir}/js/flipmenu"> <xsl:call-template name="menu.chapter"> <xsl:with-param name="entries" select="/book/internals/internal/chapter[header/title]"/> <xsl:with-param name="chapnum" select="$chapnum"/> </xsl:call-template> </ul> </div> </div> </xsl:template> <!-- Menu.internal.ref --> <xsl:template name="menu.internal.ref"> <xsl:param name="curModule"/> <div id="leftnav"> <div class="leftnav-tube"> <xsl:call-template name="erlang_logo"/> <p class="section-title"><xsl:value-of select="/book/header/title"/></p> <p class="section-subtitle">Reference Manual</p> <p class="section-version">Version <xsl:value-of select="$appver"/></p> <xsl:call-template name="menu_top"/> <xsl:call-template name="menu_middle"/> <h3>Table of Contents</h3> <ul class="flipMenu"> <xsl:call-template name="menu.ref2"> <xsl:with-param name="entries" select="/book/internals/internal/erlref[module]|/book/internals/internal/cref[lib]|/book/internals/internal/comref[com]|/book/internals/internal/fileref[file]|/book/internals/internal/appref[app]"/> <!--xsl:with-param name="genFuncMenu" select="true"/--> <xsl:with-param name="curModule" select="$curModule"/> </xsl:call-template> </ul> </div> </div> </xsl:template> <!--Users Guide --> <!-- Part --> <xsl:template match="part"> <!-- Generate Glossary for Users Guide --> <!--xsl:call-template name="glossary"> <xsl:with-param name="type">users_guide</xsl:with-param> </xsl:call-template--> <!-- Generate Bibliography for Users Guide --> <!--xsl:call-template name="bibliography"> <xsl:with-param name="type">users_guide</xsl:with-param> </xsl:call-template--> <xsl:document href="{$outdir}/users_guide.html" method="html" encoding="UTF-8" indent="yes" doctype-public="-//W3C//DTD HTML 4.01 Transitional//EN"> <xsl:call-template name="pagelayout"/> </xsl:document> </xsl:template> <!-- Part content--> <xsl:template name="part.content"> <div class="frontpage"/> <center><h1><xsl:value-of select="/book/header/title"/> User's Guide</h1></center> <center><h4>Version <xsl:value-of select="$appver"/></h4></center> <center><h4><xsl:value-of select="$gendate"/></h4></center> <div class="extrafrontpageinfo"> <center><xsl:value-of select="$extra_front_page_info"/></center> </div> <xsl:apply-templates select="chapter"/> </xsl:template> <!-- Menu.ug --> <xsl:template name="menu.ug"> <xsl:param name="chapnum"/> <div id="leftnav"> <div class="leftnav-tube"> <xsl:call-template name="erlang_logo"/> <p class="section-title"><xsl:value-of select="/book/header/title"/></p> <p class="section-subtitle">User's Guide</p> <p class="section-version">Version <xsl:value-of select="$appver"/></p> <xsl:call-template name="menu_top"/> <xsl:call-template name="menu_middle"/> <h3>Chapters</h3> <ul class="flipMenu" imagepath="{$topdocdir}/js/flipmenu"> <xsl:call-template name="menu.chapter"> <xsl:with-param name="entries" select="/book/parts/part/chapter[header/title]"/> <xsl:with-param name="chapnum" select="$chapnum"/> </xsl:call-template> </ul> </div> </div> </xsl:template> <xsl:template name="menu.chapter"> <xsl:param name="entries"/> <xsl:param name="chapnum"/> <xsl:for-each select="$entries"> <xsl:variable name="chapter_file"> <xsl:value-of select='substring-before(header/file, ".xml")'/> </xsl:variable> <xsl:variable name="curchapnum"><xsl:number/></xsl:variable> <xsl:variable name="expanded"> <xsl:choose> <xsl:when test="$chapnum = $curchapnum">true</xsl:when> <xsl:otherwise>false</xsl:otherwise> </xsl:choose> </xsl:variable> <xsl:variable name="loadscrollpos"> <xsl:choose> <xsl:when test="$chapnum = $curchapnum">loadscrollpos</xsl:when> <xsl:otherwise>no</xsl:otherwise> </xsl:choose> </xsl:variable> <li id="{$loadscrollpos}" title="{header/title}" expanded="{$expanded}"> <xsl:value-of select="header/title"/> <ul> <li> <a href="{$chapter_file}.html"> Top of chapter </a> </li> <xsl:call-template name="menu.section"> <xsl:with-param name="entries" select="section[title]"/> <xsl:with-param name="chapter_file"><xsl:value-of select="$chapter_file"/></xsl:with-param> </xsl:call-template> </ul> </li> </xsl:for-each> </xsl:template> <xsl:template name="menu.section"> <xsl:param name="entries"/> <xsl:param name="chapter_file"/> <xsl:for-each select="$entries"> <li title="{title}"> <a href="{$chapter_file}.html#{erl:to-link(title)}"> <xsl:value-of select="title"/> </a> </li> </xsl:for-each> </xsl:template> <!-- Chapter (if top tag)--> <xsl:template match="/chapter"> <xsl:document href="{substring-before(header/file, '.xml')}.html" method="html" encoding="UTF-8" indent="yes" doctype-public="-//W3C//DTD HTML 4.01 Transitional//EN"> <xsl:call-template name="pagelayout"> <xsl:with-param name="chapnum"><xsl:number/></xsl:with-param> </xsl:call-template> </xsl:document> </xsl:template> <!-- Chapter --> <xsl:template match="chapter"> <xsl:document href="{substring-before(header/file, '.xml')}.html" method="html" encoding="UTF-8" indent="yes" doctype-public="-//W3C//DTD HTML 4.01 Transitional//EN"> <xsl:call-template name="pagelayout"> <xsl:with-param name="chapnum"><xsl:number/></xsl:with-param> </xsl:call-template> </xsl:document> </xsl:template> <!-- Chapter content--> <xsl:template name="chapter.content"> <xsl:param name="chapnum"/> <!-- center--> <h1> <xsl:value-of select="$chapnum"/>&#160;<xsl:value-of select="header/title"/> </h1> <!-- /center--> <xsl:apply-templates> <xsl:with-param name="chapnum" select="$chapnum"/> </xsl:apply-templates> </xsl:template> <!-- Reference Manual --> <!-- Application --> <xsl:template match="application"> <!-- Generate Glossary for Ref. Manual --> <!--xsl:call-template name="glossary"> <xsl:with-param name="type">ref_man</xsl:with-param> </xsl:call-template--> <!-- Generate Bibliography for Ref. Manual --> <!--xsl:call-template name="bibliography"> <xsl:with-param name="type">ref_man</xsl:with-param> </xsl:call-template--> <xsl:document href="{$outdir}/index.html" method="html" encoding="UTF-8" indent="yes" doctype-public="-//W3C//DTD HTML 4.01 Transitional//EN"> <xsl:call-template name="pagelayout"/> </xsl:document> </xsl:template> <!-- Application content--> <xsl:template name="app.content"> <div class="frontpage"/> <center><h1><xsl:value-of select="/book/header/title"/> Reference Manual</h1></center> <center><h4>Version <xsl:value-of select="$appver"/></h4></center> <center><h4><xsl:value-of select="$gendate"/></h4></center> <div class="extrafrontpageinfo"> <center><xsl:value-of select="$extra_front_page_info"/></center> </div> <xsl:apply-templates select="erlref|cref|comref|fileref|appref"/> </xsl:template> <!-- Menu.ref --> <xsl:template name="menu.ref"> <xsl:param name="curModule"/> <div id="leftnav"> <div class="leftnav-tube"> <xsl:call-template name="erlang_logo"/> <p class="section-title"><xsl:value-of select="/book/header/title"/></p> <p class="section-subtitle">Reference Manual</p> <p class="section-version">Version <xsl:value-of select="$appver"/></p> <xsl:call-template name="menu_top"/> <xsl:call-template name="menu_middle"/> <h3>Table of Contents</h3> <ul class="flipMenu"> <xsl:call-template name="menu.ref2"> <xsl:with-param name="entries" select="/book/applications/application/erlref[module]|/book/applications/application/cref[lib]|/book/applications/application/comref[com]|/book/applications/application/fileref[file]|/book/applications/application/appref[app]"/> <!--xsl:with-param name="genFuncMenu" select="true"/--> <xsl:with-param name="curModule" select="$curModule"/> </xsl:call-template> </ul> </div> </div> </xsl:template> <xsl:template name="menu.ref2"> <xsl:param name="entries"/> <!--xsl:param name="genFuncMenu"/--> <xsl:param name="curModule"/> <xsl:for-each select="$entries"> <xsl:variable name="cval"> <xsl:choose> <xsl:when test="local-name() = 'erlref'"> <xsl:value-of select="module"/> </xsl:when> <xsl:when test="local-name() = 'cref'"> <xsl:value-of select="lib"/> </xsl:when> <xsl:when test="local-name() = 'comref'"> <xsl:value-of select="com"/> </xsl:when> <xsl:when test="local-name() = 'fileref'"> <xsl:value-of select="file"/> </xsl:when> <xsl:when test="local-name() = 'appref'"> <xsl:value-of select="app"/> </xsl:when> </xsl:choose> </xsl:variable> <xsl:variable name="genFuncMenu"> <xsl:choose> <xsl:when test="local-name() = 'comref'">false</xsl:when> <xsl:when test="local-name() = 'appref'">false</xsl:when> <xsl:when test="local-name() = 'fileref'">false</xsl:when> <xsl:when test="descendant::funcs">true</xsl:when> <xsl:otherwise>false</xsl:otherwise> </xsl:choose> </xsl:variable> <xsl:variable name="expanded"> <xsl:choose> <xsl:when test="$curModule = $cval">true</xsl:when> <xsl:otherwise>false</xsl:otherwise> </xsl:choose> </xsl:variable> <xsl:variable name="loadscrollpos"> <xsl:choose> <xsl:when test="$curModule = $cval">loadscrollpos</xsl:when> <xsl:otherwise>no</xsl:otherwise> </xsl:choose> </xsl:variable> <xsl:variable name="link_cval"><xsl:value-of select="translate($cval, '&#173;', '')"/></xsl:variable> <xsl:choose> <xsl:when test="$genFuncMenu = 'true'"> <li id="{$loadscrollpos}" title="{$cval} " expanded="{$expanded}"> <xsl:value-of select="$cval"/> <ul> <li> <a href="{$link_cval}.html"> Top of manual page </a> </li> <xsl:call-template name="nl"/> <xsl:choose> <xsl:when test="local-name() = 'erlref'"> <!-- Use the cached value in order to save time. value-of a string node is _much_ faster than copy-of a rtf --> <xsl:value-of disable-output-escaping="yes" select="$erlref.nav/module[@name = $cval]"/> </xsl:when> <xsl:otherwise> <xsl:call-template name="menu.funcs"> <xsl:with-param name="entries" select="funcs/func/name"/> <xsl:with-param name="basename"><xsl:value-of select="$link_cval"/></xsl:with-param> <xsl:with-param name="cval" select="$cval"/> </xsl:call-template> </xsl:otherwise> </xsl:choose> </ul> </li> </xsl:when> <xsl:otherwise> <xsl:choose> <xsl:when test="local-name() = 'appref'"> <li title="{$cval} (App)"> <a href="{$link_cval}_app.html"> <xsl:value-of select="$cval"/> (App) </a> </li> </xsl:when> <xsl:otherwise> <li title="{$cval}"> <a href="{$link_cval}.html"> <xsl:value-of select="$cval"/> </a> </li> </xsl:otherwise> </xsl:choose> </xsl:otherwise> </xsl:choose> </xsl:for-each> </xsl:template> <xsl:template name="menu.funcs"> <xsl:param name="entries"/> <xsl:param name="basename"/> <xsl:param name="cval"/> <xsl:for-each select="$entries"> <!-- Sort on function name, so the index list in lefthand frame is ordered. --> <xsl:sort select="erl:get_sort_field(.)" data-type="text" case-order="upper-first"/> <xsl:choose> <xsl:when test="ancestor::cref"> <xsl:variable name="fname"><xsl:value-of select="substring-before(nametext, '(')"/></xsl:variable> <xsl:choose> <xsl:when test="string-length($fname) > 0"> <li title="{$fname}"> <a href="{$basename}.html#{$fname}"> <xsl:value-of select="$fname"/>() </a> </li> </xsl:when> <xsl:otherwise> <li title="{name/nametext}"> <a href="{$basename}.html#{name/nametext}"> <xsl:value-of select="nametext"/>() </a> </li> </xsl:otherwise> </xsl:choose> </xsl:when> <xsl:when test="ancestor::erlref"> <xsl:variable name="tmpstring"> <xsl:value-of select="substring-before(substring-after(., '('), '->')"/> </xsl:variable> <xsl:variable name="ustring"> <xsl:choose> <xsl:when test="string-length($tmpstring) > 0"> <xsl:call-template name="remove-paren"> <xsl:with-param name="string" select="$tmpstring"/> </xsl:call-template> </xsl:when> <xsl:otherwise> <xsl:call-template name="remove-paren"> <xsl:with-param name="string" select="substring-after(., '(')"/> </xsl:call-template> </xsl:otherwise> </xsl:choose> </xsl:variable> <xsl:variable name="arity"> <xsl:choose> <xsl:when test="string-length(@arity) > 0"> <!-- Dialyzer spec --> <xsl:value-of select="@arity"/> </xsl:when> <xsl:otherwise> <xsl:call-template name="calc-arity"> <xsl:with-param name="string" select="substring-before($ustring, ')')"/> <xsl:with-param name="no-of-pars" select="0"/> </xsl:call-template> </xsl:otherwise> </xsl:choose> </xsl:variable> <xsl:variable name="fname"> <xsl:choose> <xsl:when test="string-length(@name) > 0"> <!-- Dialyzer spec --> <xsl:value-of select="@name"/> </xsl:when> <xsl:otherwise> <xsl:variable name="fname1"> <xsl:value-of select="substring-before(., '(')"/> </xsl:variable> <xsl:variable name="fname2"> <xsl:value-of select="substring-after($fname1, 'erlang:')"/> </xsl:variable> <xsl:choose> <xsl:when test="string-length($fname2) > 0"> <xsl:value-of select="$fname2"/> </xsl:when> <xsl:otherwise> <xsl:value-of select="$fname1"/> </xsl:otherwise> </xsl:choose> </xsl:otherwise> </xsl:choose> </xsl:variable> <!-- Avoid duplicated entries. See also template "spec_name" --> <!-- Do not to use preceding since it is very slow! --> <xsl:variable name="mfas" select="key('mfa', concat($cval,':',$fname,'/',$arity))"/> <xsl:choose> <xsl:when test="string-length(@name) > 0 and generate-id($mfas[1]) != generate-id(.)"> <!-- Skip. Only works for Dialyzer specs. --> </xsl:when> <xsl:otherwise> <!-- <li title="{$fname}-{$arity}"> <a href="{$basename}.html#{$fname}-{$arity}"> <xsl:value-of select="$fname"/>/<xsl:value-of select="$arity"/> </a> </li> --> <!-- Generate a text node --> <xsl:text>&lt;li title="</xsl:text> <xsl:value-of select="$fname"/> <xsl:text>-</xsl:text> <xsl:value-of select="$arity"/> <xsl:text>">&lt;a href="</xsl:text> <xsl:value-of select="$basename"/> <xsl:text>.html#</xsl:text> <xsl:value-of select="$fname"/> <xsl:text>-</xsl:text> <xsl:value-of select="$arity"/> <xsl:text>"></xsl:text> <xsl:value-of select="$fname"/> <xsl:text>/</xsl:text> <xsl:value-of select="$arity"/> <xsl:text>&lt;/a>&lt;/li></xsl:text> <xsl:call-template name="nl"/> </xsl:otherwise> </xsl:choose> </xsl:when> </xsl:choose> </xsl:for-each> </xsl:template> <!-- Erlref --> <xsl:template match="erlref"> <xsl:variable name="filename"><xsl:value-of select="translate(module, '&#173;', '')"/></xsl:variable> <xsl:document href="{$filename}.html" method="html" encoding="UTF-8" indent="yes" doctype-public="-//W3C//DTD HTML 4.01 Transitional//EN"> <xsl:call-template name="pagelayout"> <xsl:with-param name="curModule" select="module"/> </xsl:call-template> </xsl:document> </xsl:template> <!-- Cref --> <xsl:template match="cref"> <xsl:document href="{lib}.html" method="html" encoding="UTF-8" indent="yes" doctype-public="-//W3C//DTD HTML 4.01 Transitional//EN"> <xsl:call-template name="pagelayout"> <xsl:with-param name="curModule" select="lib"/> </xsl:call-template> </xsl:document> </xsl:template> <!-- Comref --> <xsl:template match="comref"> <xsl:document href="{com}.html" method="html" encoding="UTF-8" indent="yes" doctype-public="-//W3C//DTD HTML 4.01 Transitional//EN"> <xsl:call-template name="pagelayout"> <xsl:with-param name="curModule" select="com"/> </xsl:call-template> </xsl:document> </xsl:template> <!-- Fileref --> <xsl:template match="fileref"> <xsl:document href="{file}.html" method="html" encoding="UTF-8" indent="yes" doctype-public="-//W3C//DTD HTML 4.01 Transitional//EN"> <xsl:call-template name="pagelayout"> <xsl:with-param name="curModule" select="file"/> </xsl:call-template> </xsl:document> </xsl:template> <!-- Appref --> <xsl:template match="appref"> <xsl:document href="{app}_app.html" method="html" encoding="UTF-8" indent="yes" doctype-public="-//W3C//DTD HTML 4.01 Transitional//EN"> <xsl:call-template name="pagelayout"> <xsl:with-param name="curModule" select="app"/> </xsl:call-template> </xsl:document> </xsl:template> <!-- *ref content--> <xsl:template name="ref.content"> <xsl:param name="partnum"/> <div class="innertube"> <center> <h1> <xsl:choose> <xsl:when test="local-name() = 'erlref'"> <xsl:value-of select="module"/> </xsl:when> <xsl:when test="local-name() = 'cref'"> <xsl:value-of select="lib"/> </xsl:when> <xsl:when test="local-name() = 'comref'"> <xsl:value-of select="com"/> </xsl:when> <xsl:when test="local-name() = 'fileref'"> <xsl:value-of select="file"/> </xsl:when> <xsl:when test="local-name() = 'appref'"> <xsl:value-of select="app"/> </xsl:when> </xsl:choose> </h1> </center> </div> <xsl:apply-templates> <xsl:with-param name="partnum" select="$partnum"/> </xsl:apply-templates> </xsl:template> <!-- Module --> <xsl:template match="module"> <xsl:param name="partnum"/> <div class="innertube"> <xsl:call-template name="h3_title_link"> <xsl:with-param name="title">Module</xsl:with-param> </xsl:call-template> <div class="REFBODY module-body"> <xsl:apply-templates> <xsl:with-param name="partnum" select="$partnum"/> </xsl:apply-templates> </div> </div> </xsl:template> <!-- Modulesummary --> <xsl:template match="modulesummary"> <xsl:param name="partnum"/> <div class="innertube"> <xsl:call-template name="h3_title_link"> <xsl:with-param name="title">Module Summary</xsl:with-param> </xsl:call-template> <div class="REFBODY module-summary-body"> <xsl:apply-templates> <xsl:with-param name="partnum" select="$partnum"/> </xsl:apply-templates> </div> <!-- Since --> <xsl:if test="string-length(../module/@since) > 0"> <xsl:call-template name="h3_title_link"> <xsl:with-param name="title">Since</xsl:with-param> </xsl:call-template> <div class="REFBODY module-since"> Module <xsl:value-of select="../module"/> was introduced in <xsl:value-of select="../module/@since"/>. </div> </xsl:if> </div> </xsl:template> <!-- Lib --> <xsl:template match="lib"> <xsl:param name="partnum"/> <div class="innertube"> <xsl:call-template name="h3_title_link"> <xsl:with-param name="title">C Library</xsl:with-param> </xsl:call-template> <div class="REFBODY c-library-body"> <xsl:apply-templates> <xsl:with-param name="partnum" select="$partnum"/> </xsl:apply-templates> </div> </div> </xsl:template> <!-- Libsummary --> <xsl:template match="libsummary"> <xsl:param name="partnum"/> <div class="innertube"> <xsl:call-template name="h3_title_link"> <xsl:with-param name="title">Library Summary</xsl:with-param> </xsl:call-template> <div class="REFBODY library-summary-body"> <xsl:apply-templates> <xsl:with-param name="partnum" select="$partnum"/> </xsl:apply-templates> </div> </div> </xsl:template> <!-- Com --> <xsl:template match="com"> <xsl:param name="partnum"/> <div class="innertube"> <xsl:call-template name="h3_title_link"> <xsl:with-param name="title">Command</xsl:with-param> </xsl:call-template> <div class="REFBODY command-body"> <xsl:apply-templates> <xsl:with-param name="partnum" select="$partnum"/> </xsl:apply-templates> </div> </div> </xsl:template> <!-- Comsummary --> <xsl:template match="comsummary"> <xsl:param name="partnum"/> <div class="innertube"> <xsl:call-template name="h3_title_link"> <xsl:with-param name="title">Command Summary</xsl:with-param> </xsl:call-template> <div class="REFBODY command-summary-body"> <xsl:apply-templates> <xsl:with-param name="partnum" select="$partnum"/> </xsl:apply-templates> </div> </div> </xsl:template> <!-- File --> <xsl:template match="file"> <xsl:param name="partnum"/> <div class="innertube"> <xsl:call-template name="h3_title_link"> <xsl:with-param name="title">File</xsl:with-param> </xsl:call-template> <div class="REFBODY file-body"> <xsl:apply-templates> <xsl:with-param name="partnum" select="$partnum"/> </xsl:apply-templates> </div> </div> </xsl:template> <!-- Filesummary --> <xsl:template match="filesummary"> <xsl:param name="partnum"/> <div class="innertube"> <xsl:call-template name="h3_title_link"> <xsl:with-param name="title">File Summary</xsl:with-param> </xsl:call-template> <div class="REFBODY file-summary-body"> <xsl:apply-templates> <xsl:with-param name="partnum" select="$partnum"/> </xsl:apply-templates> </div> </div> </xsl:template> <!-- App --> <xsl:template match="app"> <xsl:param name="partnum"/> <div class="innertube"> <xsl:call-template name="h3_title_link"> <xsl:with-param name="title">Application</xsl:with-param> </xsl:call-template> <div class="REFBODY application-body"> <xsl:apply-templates> <xsl:with-param name="partnum" select="$partnum"/> </xsl:apply-templates> </div> </div> </xsl:template> <!-- Appsummary --> <xsl:template match="appsummary"> <xsl:param name="partnum"/> <div class="innertube"> <xsl:call-template name="h3_title_link"> <xsl:with-param name="title">Application Summary</xsl:with-param> </xsl:call-template> <div class="REFBODY application-summary-body"> <xsl:apply-templates> <xsl:with-param name="partnum" select="$partnum"/> </xsl:apply-templates> </div> </div> </xsl:template> <!-- Description --> <xsl:template match="description"> <xsl:param name="partnum"/> <div class="innertube"> <xsl:call-template name="h3_title_link"> <xsl:with-param name="title">Description</xsl:with-param> </xsl:call-template> <div class="REFBODY description-body"> <p> <xsl:apply-templates> <xsl:with-param name="partnum" select="$partnum"/> </xsl:apply-templates> </p> </div> </div> </xsl:template> <!-- Funcs --> <xsl:template match="funcs"> <xsl:param name="partnum"/> <xsl:apply-templates select="fsdescription"> <xsl:with-param name="partnum" select="$partnum"/> </xsl:apply-templates> <div class="innertube"> <xsl:call-template name="h3_title_link"> <xsl:with-param name="title">Exports</xsl:with-param> </xsl:call-template> </div> <div class="exports-body"> <xsl:apply-templates select="func"> <xsl:with-param name="partnum" select="$partnum"/> </xsl:apply-templates> </div> </xsl:template> <!-- Func --> <xsl:template match="func"> <xsl:param name="partnum"/> <xsl:apply-templates select="name"/> <xsl:apply-templates select="name[string-length(@arity) > 0 and position()=last()]" mode="types"/> <div class="exports-tube"> <xsl:apply-templates select="fsummary|type|desc"> <xsl:with-param name="partnum" select="$partnum"/> </xsl:apply-templates> </div> </xsl:template> <xsl:template match="name"> <xsl:choose> <!-- @arity is mandatory when referring to a specification --> <xsl:when test="string-length(@arity) > 0"> <xsl:call-template name="spec_name"/> </xsl:when> <xsl:when test="ancestor::datatype"> <xsl:call-template name="type_name"/> </xsl:when> <xsl:when test="string-length(text()) = 0 and ancestor::erlref"> <xsl:message terminate="yes"> Error <xsl:value-of select="@name"/>: arity is mandatory when referring to specifications! </xsl:message> </xsl:when> <xsl:otherwise> <xsl:call-template name="name"/> </xsl:otherwise> </xsl:choose> </xsl:template> <!-- Used both in <datatype> and in <func>! --> <xsl:template name="name"> <xsl:variable name="tmpstring"> <xsl:value-of select="substring-before(substring-after(., '('), '->')"/> </xsl:variable> <xsl:variable name="ustring"> <xsl:choose> <xsl:when test="string-length($tmpstring) > 0"> <xsl:call-template name="remove-paren"> <xsl:with-param name="string" select="$tmpstring"/> </xsl:call-template> </xsl:when> <xsl:otherwise> <xsl:call-template name="remove-paren"> <xsl:with-param name="string" select="substring-after(., '(')"/> </xsl:call-template> </xsl:otherwise> </xsl:choose> </xsl:variable> <xsl:variable name="arity"> <xsl:call-template name="calc-arity"> <xsl:with-param name="string" select="substring-before($ustring, ')')"/> <xsl:with-param name="no-of-pars" select="0"/> </xsl:call-template> </xsl:variable> <xsl:choose> <xsl:when test="ancestor::cref"> <table class="func-table"> <tr class="func-tr"> <td class="cfunc-td"> <span class="bold_code bc-7"> <xsl:call-template name="title_link"> <xsl:with-param name="link" select="substring-before(nametext, '(')"/> </xsl:call-template> </span> </td> <td class="func-since-td"> <xsl:if test="string-length(@since) > 0"> <span class="since"><xsl:value-of select="@since"/></span> </xsl:if> </td> </tr> </table> </xsl:when> <xsl:when test="ancestor::erlref"> <xsl:variable name="fname"> <xsl:variable name="fname1"> <xsl:value-of select="substring-before(., '(')"/> </xsl:variable> <xsl:variable name="fname2"> <xsl:value-of select="substring-after($fname1, 'erlang:')"/> </xsl:variable> <xsl:choose> <xsl:when test="string-length($fname2) > 0"> <xsl:value-of select="normalize-space($fname2)"/> </xsl:when> <xsl:otherwise> <xsl:value-of select="normalize-space($fname1)"/> </xsl:otherwise> </xsl:choose> </xsl:variable> <xsl:choose> <xsl:when test="ancestor::datatype"> <div class="bold_code bc-8"> <xsl:call-template name="title_link"> <xsl:with-param name="link" select="concat('type-',$fname)"/> <xsl:with-param name="title"> <xsl:apply-templates/> </xsl:with-param> </xsl:call-template> </div> </xsl:when> <xsl:otherwise> <table class="func-table"> <tr class="func-tr"> <td class="func-td"> <div class="bold_code fun-type"> <xsl:call-template name="title_link"> <xsl:with-param name="link" select="concat(concat($fname,'-'),$arity)"/> <xsl:with-param name="title"> <xsl:apply-templates/> </xsl:with-param> </xsl:call-template> </div> </td> <td class="func-since-td"> <xsl:if test="string-length(@since) > 0"> <span class="since"><xsl:value-of select="@since"/></span> </xsl:if> </td> </tr> </table> </xsl:otherwise> </xsl:choose> </xsl:when> <xsl:otherwise> <div class="bold_code bc-10"><xsl:value-of select="."/></div> </xsl:otherwise> </xsl:choose> </xsl:template> <!-- Type --> <xsl:template match="type"> <xsl:param name="partnum"/> <!-- The case where @name != 0 is taken care of in "type_name" --> <xsl:if test="string-length(@name) = 0 and string-length(@variable) = 0"> <div class="REFBODY rb-5"> <h3 class="func-types-title">Types</h3> <xsl:apply-templates> <xsl:with-param name="partnum" select="$partnum"/> </xsl:apply-templates> </div> </xsl:if> </xsl:template> <!-- V --> <xsl:template match="v"> <xsl:param name="partnum"/> <div class="REFTYPES rt-4"> <span class="bold_code fun-param-type"> <xsl:apply-templates> <xsl:with-param name="partnum" select="$partnum"/> </xsl:apply-templates> </span><br/> </div> </xsl:template> <!-- D --> <xsl:template match="d"> <xsl:param name="partnum"/> <div class="REFBODY rb-6"> <xsl:apply-templates> <xsl:with-param name="partnum" select="$partnum"/> </xsl:apply-templates> </div> </xsl:template> <xsl:template name="h3_title_link"> <xsl:param name="title"/> <h3> <xsl:call-template name="title_link"> <xsl:with-param name="title" select="$title"/> <xsl:with-param name="link" select="erl:to-link($title)"/> </xsl:call-template> </h3> </xsl:template> <xsl:template name="title_link"> <xsl:param name="title" select="'APPLY'"/> <xsl:param name="link" select="erl:to-link(title)"/> <xsl:param name="ghlink" select="ancestor-or-self::*[@ghlink][position() = 1]/@ghlink"/> <xsl:variable name="id" select="concat(concat($link,'-'), generate-id(.))"/> <span onMouseOver="document.getElementById('ghlink-{$id}').style.visibility = 'visible';" onMouseOut="document.getElementById('ghlink-{$id}').style.visibility = 'hidden';"> <xsl:call-template name="ghlink"> <xsl:with-param name="id" select="$id"/> <xsl:with-param name="ghlink" select="$ghlink"/> </xsl:call-template> <a class="title_link" name="{$link}" href="#{$link}"> <xsl:choose> <xsl:when test="$title = 'APPLY'"> <xsl:apply-templates/> <!-- like <ret> and <nametext> --> </xsl:when> <xsl:otherwise> <xsl:value-of select="$title"/> </xsl:otherwise> </xsl:choose> </a> </span> </xsl:template> <xsl:template name="ghlink"> <xsl:param name="id"/> <xsl:param name="ghlink" select="ancestor-or-self::*[@ghlink][position() = 1]/@ghlink"/> <xsl:choose> <xsl:when test="string-length($ghlink) > 0"> <span id="ghlink-{$id}" class="ghlink"> <a href="https://github.com/erlang/otp/edit/{$ghlink}" title="Found an issue with the documentation? Fix it by clicking here!"> <span class="pencil"/> </a> </span> </xsl:when> <xsl:otherwise> <span id="ghlink-{$id}"/> </xsl:otherwise> </xsl:choose> </xsl:template> <!-- Desc --> <xsl:template match="desc"> <xsl:param name="partnum"/> <div class="REFBODY rb-7"> <p> <xsl:apply-templates> <xsl:with-param name="partnum" select="$partnum"/> </xsl:apply-templates> </p> </div> </xsl:template> <!-- Fsummary --> <xsl:template match="fsummary"> <!-- This tag is skipped for now. --> </xsl:template> <xsl:template match="input"> <span class="bold_code bc-12"><xsl:apply-templates/></span> </xsl:template> <xsl:template match="node()[starts-with(name(), 'see')]"> <xsl:call-template name="seealso"/> </xsl:template> <xsl:template name="seealso"> <xsl:variable name="app_part"> <xsl:variable name="base"> <xsl:value-of select="substring-before(substring-before(concat(@marker,'#'), '#'),':')"/> </xsl:variable> <xsl:choose> <xsl:when test="starts-with($base,'system/')"> <xsl:text>doc/</xsl:text> <xsl:value-of select="substring-after($base,'/')"/> </xsl:when> <xsl:otherwise> <xsl:value-of select="$base"/> </xsl:otherwise> </xsl:choose> </xsl:variable> <xsl:variable name="mod_part"> <xsl:variable name="filepart"> <!-- Get everything before the first #. We concat a # so that if there is no # we will get the entire string --> <xsl:value-of select="substring-before(concat(@marker,'#'), '#')"/> </xsl:variable> <xsl:variable name="base"> <!-- Remove the app part of there is any --> <xsl:choose> <xsl:when test="string-length($app_part) > 0"> <xsl:value-of select="substring-after($filepart, ':')"/> </xsl:when> <xsl:otherwise> <xsl:value-of select="$filepart"/> </xsl:otherwise> </xsl:choose> </xsl:variable> <xsl:choose> <!-- If this is a <seeguide> and name is index then we change it to users_guide --> <xsl:when test="node()[starts-with(name(parent::*), 'seeguide')] and $base = 'index'"> <xsl:text>users_guide</xsl:text> </xsl:when> <xsl:otherwise> <xsl:value-of select="$base"/> </xsl:otherwise> </xsl:choose> </xsl:variable> <xsl:variable name="linkpart"> <xsl:variable name="base"> <xsl:value-of select="substring-after(@marker, '#')"/> </xsl:variable> <xsl:choose> <!-- If this is a <seetype> we prepend type- to the anchor --> <xsl:when test="node()[starts-with(name(parent::*), 'seetype')]"> <xsl:text>type-</xsl:text><xsl:value-of select="$base"/> </xsl:when> <xsl:when test="node()[starts-with(name(parent::*), 'seemfa')]"> <xsl:value-of select="translate($base, '/', '-')"/> </xsl:when> <xsl:otherwise> <xsl:value-of select="$base"/> </xsl:otherwise> </xsl:choose> </xsl:variable> <xsl:variable name="extension"> <xsl:choose> <xsl:when test="substring($mod_part, (string-length($mod_part) - string-length('.svg')) + 1) = '.svg'"> <xsl:text></xsl:text> </xsl:when> <xsl:otherwise> <xsl:text>.html</xsl:text> </xsl:otherwise> </xsl:choose> </xsl:variable> <xsl:choose> <xsl:when test="starts-with(@marker,'#')"> <!-- "#Linkpart" --> <span class="bold_code bc-17"><a href="#{$linkpart}"><xsl:apply-templates/></a></span> </xsl:when> <xsl:when test="contains(@marker,'#')"> <!-- "Filepart#Linkpart" (or "Filepart#") --> <xsl:choose> <xsl:when test="string-length($app_part) > 0"> <!-- "AppPart:ModPart#Linkpart" --> <span class="bold_code bc-13"><a href="javascript:erlhref('{$topdocdir}/../','{$app_part}','{$mod_part}{$extension}#{$linkpart}');"><xsl:apply-templates/></a></span> </xsl:when> <xsl:otherwise> <!-- "Filepart#Linkpart (there is no ':' in Filepart) --> <xsl:variable name="minus_prefix" select="substring-before($linkpart, '-')"/> <xsl:choose> <xsl:when test="$minus_prefix = 'type' and string-length($specs_file) > 0 and count($i/specs/module[@name=$mod_part]) = 0"> <!-- Dialyzer seealso (the application is unknown) --> <!-- Following code deemed too slow; use key() instead <xsl:variable name="app" select="$m2a/mod2app/module[@name=$filepart]"/> --> <xsl:variable name="this" select="."/> <xsl:for-each select="$m2a"> <xsl:variable name="app" select="key('mod2app', $mod_part)"/> <xsl:choose> <xsl:when test="string-length($app) > 0"> <span class="bold_code bc-14"><a href="javascript:erlhref('{$topdocdir}/../','{$app}','{$mod_part}{$extension}#{$linkpart}');"><xsl:value-of select="$this"/></a></span> </xsl:when> <xsl:otherwise> <!-- Unknown application --> <xsl:message terminate="yes"> Error <xsl:value-of select="$mod_part"/>: cannot find module exporting type <xsl:value-of select="$app_part"/> - <xsl:value-of select="$linkpart"/> </xsl:message> </xsl:otherwise> </xsl:choose> </xsl:for-each> </xsl:when> <xsl:when test="string-length($linkpart) > 0"> <!-- Still Filepart#Linkpart (there is no ':' in Filepart) --> <span class="bold_code bc-15"><a href="{$mod_part}{$extension}#{$linkpart}"><xsl:apply-templates/></a></span> </xsl:when> <xsl:otherwise> <!-- "Filepart#" (there is no ':' in Filepart) --> <span class="bold_code bc-16"><a href="{$mod_part}{$extension}"><xsl:apply-templates/></a></span> </xsl:otherwise> </xsl:choose> </xsl:otherwise> </xsl:choose> </xsl:when> <xsl:otherwise> <!-- "AppPart:Mod" or "Mod" (there is no '#') --> <xsl:choose> <xsl:when test="string-length($app_part) > 0"> <!-- "App:Mod" --> <span class="bold_code bc-18"><a href="javascript:erlhref('{$topdocdir}/../','{$app_part}','{$mod_part}{$extension}');"><xsl:apply-templates/></a></span> </xsl:when> <xsl:otherwise> <!-- "Mod" --> <span class="bold_code bc-19"><a href="{$mod_part}{$extension}"><xsl:apply-templates/></a></span> </xsl:otherwise> </xsl:choose> </xsl:otherwise> </xsl:choose> </xsl:template> <xsl:template match="url"> <span class="bold_code bc-20"><a href="{@href}"><xsl:apply-templates/></a></span> </xsl:template> <xsl:template match="marker"> <xsl:choose> <xsl:when test="not(parent::section and following-sibling::title)"> <a name="{@id}"><xsl:apply-templates/></a> </xsl:when> </xsl:choose> </xsl:template> <xsl:template name="marker-before-title"> <xsl:choose> <xsl:when test="self::marker and parent::section and following-sibling::title"> <a name="{@id}"><xsl:apply-templates/></a> </xsl:when> </xsl:choose> </xsl:template> <!-- Release Notes --> <xsl:template match="releasenotes"> <xsl:document href="{$outdir}/release_notes.html" method="html" encoding="UTF-8" indent="yes" doctype-public="-//W3C//DTD HTML 4.01 Transitional//EN"> <xsl:call-template name="pagelayout"/> </xsl:document> </xsl:template> <!-- Rel notes content--> <xsl:template name="releasenotes.content"> <div class="frontpage"/> <center><h1><xsl:value-of select="/book/header/title"/> Release Notes</h1></center> <center><h4>Version <xsl:value-of select="$appver"/></h4></center> <center><h4><xsl:value-of select="$gendate"/></h4></center> <div class="extrafrontpageinfo"> <center><xsl:value-of select="$extra_front_page_info"/></center> </div> <xsl:apply-templates select="chapter"/> </xsl:template> <!-- Menu.rn --> <xsl:template name="menu.rn"> <xsl:param name="chapnum"/> <div id="leftnav"> <div class="leftnav-tube"> <xsl:call-template name="erlang_logo"/> <p class="section-title"><xsl:value-of select="/book/header/title"/></p> <p class="section-subtitle">Release Notes</p> <p class="section-version">Version <xsl:value-of select="$appver"/></p> <xsl:call-template name="menu_top"/> <xsl:call-template name="menu_middle"/> <h3>Chapters</h3> <ul class="flipMenu" imagepath="{$topdocdir}/js/flipmenu"> <xsl:call-template name="menu.chapter"> <xsl:with-param name="entries" select="/book/releasenotes/chapter[header/title]"/> <xsl:with-param name="chapnum" select="$chapnum"/> </xsl:call-template> </ul> </div> </div> </xsl:template> <!-- Special templates to calculate the arity of functions --> <xsl:template name="calc-arity"> <xsl:param name="string"/> <xsl:param name="no-of-pars"/> <xsl:variable name="length"> <xsl:value-of select="string-length($string)"/> </xsl:variable> <xsl:choose> <xsl:when test="$length > 0"> <xsl:call-template name="calc-arity"> <xsl:with-param name="string" select="substring-after($string, ',')"/> <xsl:with-param name="no-of-pars" select="$no-of-pars+1"/> </xsl:call-template> </xsl:when> <xsl:otherwise> <xsl:value-of select="$no-of-pars"/> </xsl:otherwise> </xsl:choose> </xsl:template> <xsl:template name="remove-paren"> <xsl:param name="string"/> <xsl:variable name="str1"> <xsl:call-template name="remove-paren-1"> <xsl:with-param name="string" select="$string"/> <xsl:with-param name="start">(</xsl:with-param> <xsl:with-param name="end">)</xsl:with-param> </xsl:call-template> </xsl:variable> <xsl:variable name="str2"> <xsl:call-template name="remove-paren-1"> <xsl:with-param name="string" select="$str1"/> <xsl:with-param name="start">{</xsl:with-param> <xsl:with-param name="end">}</xsl:with-param> </xsl:call-template> </xsl:variable> <xsl:variable name="str3"> <xsl:call-template name="remove-paren-1"> <xsl:with-param name="string" select="$str2"/> <xsl:with-param name="start">[</xsl:with-param> <xsl:with-param name="end">]</xsl:with-param> </xsl:call-template> </xsl:variable> <xsl:value-of select="$str3"/> </xsl:template> <xsl:template name="remove-paren-1"> <xsl:param name="string"/> <xsl:param name="start"/> <xsl:param name="end"/> <xsl:variable name="tmp1"> <xsl:value-of select="substring-before($string, $start)"/> </xsl:variable> <xsl:choose> <xsl:when test="string-length($tmp1) > 0 or starts-with($string, $start)"> <xsl:variable name="tmp2"> <xsl:value-of select="substring-after(substring-after($string, $start), $end)"/> </xsl:variable> <xsl:variable name="retstring"> <xsl:call-template name="remove-paren"> <xsl:with-param name="string" select="$tmp2"/> </xsl:call-template> </xsl:variable> <xsl:value-of select="concat(concat($tmp1, 'x'), $retstring)"/> </xsl:when> <xsl:otherwise> <xsl:value-of select="$string"/> </xsl:otherwise> </xsl:choose> </xsl:template> <xsl:template name="nl"> <xsl:text> </xsl:text> </xsl:template> <xsl:template match="node()[starts-with(name(), 'see')]//text()"> <xsl:value-of select="normalize-space(.)"/> </xsl:template> <xsl:template match="ret"> <xsl:value-of select="."/> <xsl:variable name="last_char" select="substring(., string-length(.), 1)"/> <xsl:if test="$last_char != '*'"> <xsl:text> </xsl:text> </xsl:if> </xsl:template> <xsl:template match="nametext"> <xsl:value-of select="substring-before(.,'(')"/> <xsl:text>(</xsl:text> <xsl:variable name="arglist" select="substring-after(.,'(')"/> <xsl:choose> <xsl:when test="$arglist = ')' or $arglist = 'void)'"> <xsl:value-of select="$arglist"/> </xsl:when> <xsl:otherwise> <br/> <xsl:call-template name="cfunc-arglist"> <xsl:with-param name="text" select="$arglist"/> </xsl:call-template> </xsl:otherwise> </xsl:choose> </xsl:template> <!-- Format C function argument list with <br> after comma --> <xsl:template name="cfunc-arglist"> <xsl:param name="text"/> <xsl:variable name="line" select="normalize-space($text)"/> <xsl:choose> <xsl:when test="contains($line,',')"> <xsl:value-of select="substring-before($line,',')"/>,<br/> <xsl:call-template name="cfunc-arglist"> <xsl:with-param name="text" select="substring-after($line,',')"/> </xsl:call-template> </xsl:when> <xsl:otherwise> <xsl:value-of select="$line"/> </xsl:otherwise> </xsl:choose> </xsl:template> </xsl:stylesheet>
{ "pile_set_name": "Github" }
package org.jboss.resteasy.test.resource.param.resource; import org.jboss.resteasy.test.resource.param.MatrixParamAsPrimitiveTest; import org.junit.Assert; import javax.ws.rs.DefaultValue; import javax.ws.rs.GET; import javax.ws.rs.MatrixParam; import javax.ws.rs.Path; import javax.ws.rs.Produces; @Path("/wrappers/default/override") public class MatrixParamAsPrimitiveWrappersDefaultOverride { @GET @Produces("application/boolean") public String doGet(@MatrixParam("boolean") @DefaultValue("false") Boolean v) { Assert.assertEquals(MatrixParamAsPrimitiveTest.ERROR_MESSAGE, true, v.booleanValue()); return "content"; } @GET @Produces("application/byte") public String doGet(@MatrixParam("byte") @DefaultValue("1") Byte v) { Assert.assertTrue(MatrixParamAsPrimitiveTest.ERROR_MESSAGE, (byte) 127 == v.byteValue()); return "content"; } @GET @Produces("application/short") public String doGet(@MatrixParam("short") @DefaultValue("1") Short v) { Assert.assertTrue(MatrixParamAsPrimitiveTest.ERROR_MESSAGE, (short) 32767 == v.shortValue()); return "content"; } @GET @Produces("application/int") public String doGet(@MatrixParam("int") @DefaultValue("1") Integer v) { Assert.assertEquals(MatrixParamAsPrimitiveTest.ERROR_MESSAGE, 2147483647, v.intValue()); return "content"; } @GET @Produces("application/long") public String doGet(@MatrixParam("long") @DefaultValue("1") Long v) { Assert.assertEquals(MatrixParamAsPrimitiveTest.ERROR_MESSAGE, 9223372036854775807L, v.longValue()); return "content"; } @GET @Produces("application/float") public String doGet(@MatrixParam("float") @DefaultValue("0.0") Float v) { Assert.assertEquals(MatrixParamAsPrimitiveTest.ERROR_MESSAGE, 3.14159265f, v.floatValue(), 0.0f); return "content"; } @GET @Produces("application/double") public String doGet(@MatrixParam("double") @DefaultValue("0.0") Double v) { Assert.assertEquals(MatrixParamAsPrimitiveTest.ERROR_MESSAGE, 3.14159265358979d, v.doubleValue(), 0.0); return "content"; } @GET @Produces("application/char") public String doGet(@MatrixParam("char") @DefaultValue("b") Character v) { Assert.assertEquals(MatrixParamAsPrimitiveTest.ERROR_MESSAGE, 'a', v.charValue()); return "content"; } }
{ "pile_set_name": "Github" }
// // ViewController.m // LMSideBarControllerDemo // // Created by LMinh on 10/11/15. // Copyright © 2015 LMinh. All rights reserved. // #import "SideBarController.h" #import "LeftMenuViewController.h" #import "RightMenuViewController.h" #import "MainNavigationController.h" #import "LMSideBarDepthStyle.h" @implementation SideBarController - (void)awakeFromNib { [super awakeFromNib]; // Init side bar styles LMSideBarDepthStyle *sideBarDepthStyle = [LMSideBarDepthStyle new]; sideBarDepthStyle.menuWidth = 220; // Init view controllers LeftMenuViewController *leftMenuViewController = [self.storyboard instantiateViewControllerWithIdentifier:@"leftMenuViewController"]; RightMenuViewController *rightMenuViewController = [self.storyboard instantiateViewControllerWithIdentifier:@"rightMenuViewController"]; MainNavigationController *navigationController = [self.storyboard instantiateViewControllerWithIdentifier:@"mainNavigationController"]; // Setup side bar controller [self setPanGestureEnabled:YES]; [self setDelegate:self]; [self setMenuViewController:leftMenuViewController forDirection:LMSideBarControllerDirectionLeft]; [self setMenuViewController:rightMenuViewController forDirection:LMSideBarControllerDirectionRight]; [self setSideBarStyle:sideBarDepthStyle forDirection:LMSideBarControllerDirectionLeft]; [self setSideBarStyle:sideBarDepthStyle forDirection:LMSideBarControllerDirectionRight]; [self setContentViewController:navigationController]; } #pragma mark - SIDE BAR DELEGATE - (void)sideBarController:(LMSideBarController *)sideBarController willShowMenuViewController:(UIViewController *)menuViewController { } - (void)sideBarController:(LMSideBarController *)sideBarController didShowMenuViewController:(UIViewController *)menuViewController { } - (void)sideBarController:(LMSideBarController *)sideBarController willHideMenuViewController:(UIViewController *)menuViewController { } - (void)sideBarController:(LMSideBarController *)sideBarController didHideMenuViewController:(UIViewController *)menuViewController { } @end
{ "pile_set_name": "Github" }
const { PeopleResolver } = require('../../people'); const { FIELD_TYPES } = require('../../people/constants'); const { OBJECT_TYPE } = require('../constants'); const { findByEmail } = require('../index'); const denormalizer = require('./denormalizer'); const PRIORITY = 100; module.exports = new PeopleResolver(OBJECT_TYPE, resolver, denormalizer, PRIORITY); function resolver({ fieldType, value, context }) { if (fieldType === FIELD_TYPES.EMAIL_ADDRESS) { return new Promise((resolve, reject) => { findByEmail(value, { domainId: context.domain._id}, (err, user) => { if (err) return reject(err); resolve(user); }); }); } return Promise.resolve(); }
{ "pile_set_name": "Github" }
<?xml version="1.0" encoding="UTF-8"?> <MediaContainer allowSync="1" identifier="com.plexapp.plugins.library" librarySectionID="4" librarySectionUUID="efbdd7b618d67cdae2fd42cf68a8427f9526d7fa" mediaTagPrefix="/system/bundle/media/flags/" mediaTagVersion="1393422068" size="1" > <Video addedAt="1365260822" art="/library/metadata/572/art/1379374156" contentRating="R" duration="10506750" guid="com.plexapp.agents.imdb://tt0187393?lang=en" key="/library/metadata/572" lastViewedAt="1388768131" originallyAvailableAt="2000-06-28" rating="7.5999999046325701" ratingKey="572" studio="Centropolis Entertainment" summary="After proving himself on the field of battle in the French and Indian War, Benjamin Martin wants nothing more to do with such things, preferring the simple life of a farmer. But when his son Gabriel enlists in the army to defend their new nation, America, against the British, Benjamin reluctantly returns to his old life to protect his son." tagline="Some things are worth fighting for." thumb="/library/metadata/572/thumb/1379374156" title="The Patriot" titleSort="Patriot" type="movie" updatedAt="1379374156" viewCount="92" viewOffset="389055" year="2000" > <Media id="557" aspectRatio="2.35" audioChannels="2" audioCodec="aac" bitrate="1704" container="mp4" duration="10506750" has64bitOffsets="0" height="800" optimizedForStreaming="1" videoCodec="h264" videoFrameRate="24p" videoResolution="1080" width="1920" > <Part id="589" container="mp4" duration="10506750" file="/var/lib/plexmediaserver/Movies/The Patriot Extended Cut.mp4" has64bitOffsets="0" key="/library/parts/589/file.mp4" optimizedForStreaming="1" size="2238591499" > <Stream id="2591" codec="srt" format="srt" key="/library/streams/2591" language="English" languageCode="eng" streamType="3" /> <Stream id="3023" bitDepth="8" bitrate="1640" cabac="1" chromaSubsampling="4:2:0" codec="h264" codecID="avc1" colorSpace="yuv" duration="10506412" frameRate="23.976" frameRateMode="cfr" hasScalingMatrix="0" height="800" index="0" level="40" profile="high" refFrames="5" scanType="progressive" streamType="1" width="1920" /> <Stream id="3024" bitrate="64" bitrateMode="vbr" channels="2" codec="aac" codecID="40" duration="10506750" index="1" profile="he-aac / lc" samplingRate="22050" selected="1" streamType="2" /> </Part> </Media> <Genre id="19" tag="Action" /> <Genre id="104" tag="Drama" /> <Genre id="509" tag="History" /> <Genre id="510" tag="War" /> <Writer id="1889" tag="Robert Rodat" /> <Director id="108" tag="Roland Emmerich" /> <Producer id="3779" tag="Dean Devlin" /> <Producer id="3780" tag="Mark Gordon" /> <Producer id="3781" tag="Gary Levinsohn" /> <Country id="24" tag="USA" /> <Role id="3573" role="Benjamin Martin" tag="Mel Gibson" thumb="http://d3gtl9l2a4fn1j.cloudfront.net/t/p/original/8s48F6yFPOOcUfGKH1dNvztiHZz.jpg" /> <Role id="3574" role="Gabriel Martin" tag="Heath Ledger" thumb="http://d3gtl9l2a4fn1j.cloudfront.net/t/p/original/47b8wJySE9r6gWMcTGSa0EuiDV.jpg" /> <Role id="3575" role="Charlotte Selton" tag="Joely Richardson" thumb="http://d3gtl9l2a4fn1j.cloudfront.net/t/p/original/eIp0OGjOBmdd7FuprJYKDWYvZxH.jpg" /> <Role id="543" role="Col. William Tavington" tag="Jason Isaacs" thumb="http://d3gtl9l2a4fn1j.cloudfront.net/t/p/original/xUlka8zpREdB0xIAiolOSC1t4CB.jpg" /> <Role id="1622" role="Jean Villeneuve" tag="Tchéky Karyo" thumb="http://d3gtl9l2a4fn1j.cloudfront.net/t/p/original/kh5Gb5luqfAOxYZvquXueXHXXa.jpg" /> <Role id="1887" role="Col. Harry Burwell" tag="Chris Cooper" thumb="http://d3gtl9l2a4fn1j.cloudfront.net/t/p/original/idYN2ItTwPbGL55cBZO4y8rdcGu.jpg" /> <Role id="3576" role="Anne Howard" tag="Lisa Brenner" /> <Role id="3577" role="Gen. Cornwallis" tag="Tom Wilkinson" thumb="http://d3gtl9l2a4fn1j.cloudfront.net/t/p/original/ammDpnQRD1JVY0aMnt2HBJfucgZ.jpg" /> <Role id="3578" role="John Billings" tag="Leon Rippy" thumb="http://d3gtl9l2a4fn1j.cloudfront.net/t/p/original/o0mQPjR67UJzzKbm3nVdOj7fdVm.jpg" /> <Role id="1721" role="Dan Scott" tag="Donal Logue" thumb="http://d3gtl9l2a4fn1j.cloudfront.net/t/p/original/vwSHbtsOPCJDBeSCANFlWgsgN6A.jpg" /> <Role id="2303" role="Capt. Wilkins" tag="Adam Baldwin" thumb="http://d3gtl9l2a4fn1j.cloudfront.net/t/p/original/w76VLhGjRELFkETeFF0iPMJO9eJ.jpg" /> <Role id="3579" role="Occam" tag="Jay Arlen Jones" /> <Role id="3580" role="Peter Howard" tag="Joey D. Vieira" /> <Role id="3581" role="Thomas Martin" tag="Gregory Smith" thumb="http://d3gtl9l2a4fn1j.cloudfront.net/t/p/original/2piTSujC9LSHop6dWUEUNf6C6Ek.jpg" /> <Role id="3582" role="Susan Martin" tag="Skye McCole Bartusiak" thumb="http://d3gtl9l2a4fn1j.cloudfront.net/t/p/original/b4ywo8sX0KbVgaj84kmjvM4o23g.jpg" /> <Role id="3583" role="Nathan Martin" tag="Trevor Morgan" thumb="http://d3gtl9l2a4fn1j.cloudfront.net/t/p/original/87yB4E6ugM97PVWwwEUbFLNc6Y9.jpg" /> <Role id="3584" role="Samuel Martin" tag="Bryan Chafin" /> <Role id="3585" role="Charles O&apos;Hara" tag="Peter Woodward" thumb="http://d3gtl9l2a4fn1j.cloudfront.net/t/p/original/fMybhOr9I4K6fC1Bg2i6jXLV0Dg.jpg" /> </Video> </MediaContainer>
{ "pile_set_name": "Github" }
import csv from itertools import count from pathlib import Path from typing import Any, Dict, List from rotkehlchen.assets.asset import Asset from rotkehlchen.constants.assets import A_USD from rotkehlchen.constants.misc import ZERO from rotkehlchen.db.dbhandler import DBHandler from rotkehlchen.errors import DeserializationError, UnknownAsset from rotkehlchen.exchanges.data_structures import AssetMovement, AssetMovementCategory, Trade from rotkehlchen.serialization.deserialize import ( deserialize_asset_amount, deserialize_asset_amount_force_positive, deserialize_asset_movement_category, deserialize_fee, deserialize_timestamp_from_date, ) from rotkehlchen.typing import AssetAmount, Fee, Location, Price, TradePair, TradeType def remap_header(fieldnames: List[str]) -> List[str]: cur_count = count(1) mapping = {1: 'Buy', 2: 'Sell', 3: 'Fee'} return [f'Cur.{mapping[next(cur_count)]}' if f.startswith('Cur.') else f for f in fieldnames] class UnsupportedCointrackingEntry(Exception): """Thrown for Cointracking CSV export entries we can't support to import""" class UnsupportedCryptocomEntry(Exception): """Thrown for Cryptocom CSV export entries we can't support to import""" def exchange_row_to_location(entry: str) -> Location: """Takes the exchange row entry of Cointracking exported trades list and returns a location""" if entry == 'no exchange': return Location.EXTERNAL elif entry == 'Kraken': return Location.KRAKEN elif entry == 'Poloniex': return Location.POLONIEX elif entry == 'Bittrex': return Location.BITTREX elif entry == 'Binance': return Location.BINANCE elif entry == 'Bitmex': return Location.BITMEX elif entry == 'Coinbase': return Location.COINBASE # TODO: Check if this is the correct string for CoinbasePro from cointracking elif entry == 'CoinbasePro': return Location.COINBASEPRO # TODO: Check if this is the correct string for Gemini from cointracking elif entry == 'Gemini': return Location.GEMINI elif entry == 'ETH Transaction': raise UnsupportedCointrackingEntry( 'Not importing ETH Transactions from Cointracking. Cointracking does not ' 'export enough data for them. Simply enter your ethereum accounts and all ' 'your transactions will be auto imported directly from the chain', ) elif entry == 'BTC Transaction': raise UnsupportedCointrackingEntry( 'Not importing BTC Transactions from Cointracking. Cointracking does not ' 'export enough data for them. Simply enter your BTC accounts and all ' 'your transactions will be auto imported directly from the chain', ) raise UnsupportedCointrackingEntry( f'Unknown Exchange "{entry}" encountered during a cointracking import. Ignoring it', ) class DataImporter(): def __init__(self, db: DBHandler) -> None: self.db = db def _consume_cointracking_entry(self, csv_row: Dict[str, Any]) -> None: """Consumes a cointracking entry row from the CSV and adds it into the database Can raise: - DeserializationError if something is wrong with the format of the expected values - UnsupportedCointrackingEntry if importing of this entry is not supported. - IndexError if the CSV file is corrupt - KeyError if the an expected CSV key is missing - UnknownAsset if one of the assets founds in the entry are not supported """ row_type = csv_row['Type'] timestamp = deserialize_timestamp_from_date( date=csv_row['Date'], formatstr='%d.%m.%Y %H:%M:%S', location='cointracking.info', ) notes = csv_row['Comment'] location = exchange_row_to_location(csv_row['Exchange']) fee = Fee(ZERO) fee_currency = A_USD # whatever (used only if there is no fee) if csv_row['Fee'] != '': fee = deserialize_fee(csv_row['Fee']) fee_currency = Asset(csv_row['Cur.Fee']) if row_type in ('Gift/Tip', 'Trade', 'Income'): base_asset = Asset(csv_row['Cur.Buy']) quote_asset = None if csv_row['Cur.Sell'] == '' else Asset(csv_row['Cur.Sell']) if quote_asset is None and row_type not in ('Gift/Tip', 'Income'): raise DeserializationError('Got a trade entry with an empty quote asset') if quote_asset is None: # Really makes no difference as this is just a gift and the amount is zero quote_asset = A_USD pair = TradePair(f'{base_asset.identifier}_{quote_asset.identifier}') base_amount_bought = deserialize_asset_amount(csv_row['Buy']) if csv_row['Sell'] != '-': quote_amount_sold = deserialize_asset_amount(csv_row['Sell']) else: quote_amount_sold = AssetAmount(ZERO) rate = Price(quote_amount_sold / base_amount_bought) trade = Trade( timestamp=timestamp, location=location, pair=pair, trade_type=TradeType.BUY, # It's always a buy during cointracking import amount=base_amount_bought, rate=rate, fee=fee, fee_currency=fee_currency, link='', notes=notes, ) self.db.add_trades([trade]) elif row_type == 'Deposit' or row_type == 'Withdrawal': category = deserialize_asset_movement_category(row_type.lower()) if category == AssetMovementCategory.DEPOSIT: amount = deserialize_asset_amount(csv_row['Buy']) asset = Asset(csv_row['Cur.Buy']) else: amount = deserialize_asset_amount_force_positive(csv_row['Sell']) asset = Asset(csv_row['Cur.Sell']) asset_movement = AssetMovement( location=location, category=category, address=None, transaction_id=None, timestamp=timestamp, asset=asset, amount=amount, fee=fee, fee_asset=fee_currency, link='', ) self.db.add_asset_movements([asset_movement]) else: raise UnsupportedCointrackingEntry( f'Unknown entrype type "{row_type}" encountered during cointracking ' f'data import. Ignoring entry', ) def import_cointracking_csv(self, filepath: Path) -> None: with open(filepath, 'r', encoding='utf-8-sig') as csvfile: data = csv.reader(csvfile, delimiter=',', quotechar='"') header = remap_header(next(data)) for row in data: try: self._consume_cointracking_entry(dict(zip(header, row))) except UnknownAsset as e: self.db.msg_aggregator.add_warning( f'During cointracking CSV import found action with unknown ' f'asset {e.asset_name}. Ignoring entry', ) continue except IndexError: self.db.msg_aggregator.add_warning( 'During cointracking CSV import found entry with ' 'unexpected number of columns', ) continue except DeserializationError as e: self.db.msg_aggregator.add_warning( f'Error during cointracking CSV import deserialization. ' f'Error was {str(e)}. Ignoring entry', ) continue except UnsupportedCointrackingEntry as e: self.db.msg_aggregator.add_warning(str(e)) continue return None def _consume_cryptocom_entry(self, csv_row: Dict[str, Any]) -> None: """Consumes a cryptocom entry row from the CSV and adds it into the database Can raise: - DeserializationError if something is wrong with the format of the expected values - UnsupportedCryptocomEntry if importing of this entry is not supported. - KeyError if the an expected CSV key is missing - UnknownAsset if one of the assets founds in the entry are not supported """ row_type = csv_row['Transaction Kind'] timestamp = deserialize_timestamp_from_date( date=csv_row['Timestamp (UTC)'], formatstr='%Y-%m-%d %H:%M:%S', location='crypto.com', ) description = csv_row['Transaction Description'] notes = f'{description}\nSource: crypto.com (CSV import)' # No fees info for now (Aug 2020) on crypto.com, so we put 0 fees fee = Fee(ZERO) fee_currency = A_USD # whatever (used only if there is no fee) if row_type in ( 'crypto_purchase', 'crypto_exchange', 'referral_gift', 'crypto_earn_interest_paid', ): # variable mapping to raw data currency = csv_row['Currency'] to_currency = csv_row['To Currency'] native_currency = csv_row['Native Currency'] amount = csv_row['Amount'] to_amount = csv_row['To Amount'] native_amount = csv_row['Native Amount'] trade_type = TradeType.BUY if to_currency != native_currency else TradeType.SELL if row_type == 'crypto_exchange': # trades crypto to crypto base_asset = Asset(to_currency) quote_asset = Asset(currency) if quote_asset is None: raise DeserializationError('Got a trade entry with an empty quote asset') base_amount_bought = deserialize_asset_amount(to_amount) quote_amount_sold = deserialize_asset_amount(amount) else: base_asset = Asset(currency) quote_asset = Asset(native_currency) base_amount_bought = deserialize_asset_amount(amount) quote_amount_sold = deserialize_asset_amount(native_amount) rate = Price(abs(quote_amount_sold / base_amount_bought)) pair = TradePair(f'{base_asset.identifier}_{quote_asset.identifier}') trade = Trade( timestamp=timestamp, location=Location.CRYPTOCOM, pair=pair, trade_type=trade_type, amount=base_amount_bought, rate=rate, fee=fee, fee_currency=fee_currency, link='', notes=notes, ) self.db.add_trades([trade]) elif row_type in ( 'crypto_earn_program_created', 'lockup_lock', 'lockup_unlock', 'dynamic_coin_swap_bonus_exchange_deposit', 'crypto_wallet_swap_debited', 'crypto_wallet_swap_credited', 'lockup_swap_debited', 'lockup_swap_credited', 'dynamic_coin_swap_debited', 'dynamic_coin_swap_credited', 'dynamic_coin_swap_bonus_exchange_deposit', ): # those types are ignored because it doesn't affect the wallet balance # or are not handled here return else: raise UnsupportedCryptocomEntry( f'Unknown entrype type "{row_type}" encountered during ' f'cryptocom data import. Ignoring entry', ) def _import_cryptocom_swap(self, data: Any) -> None: """Look for swapping events and handle them as trades. Notice: Crypto.com csv export gathers all swapping entries (`lockup_swap_*`, `crypto_wallet_swap_*`, ...) into one entry named `dynamic_coin_swap_*`. This method looks for `dynamic_coin_swap_debited` and `dynamic_coin_swap_credited` entries using the same timestamp to handle them as one trade. """ swapping_rows: Dict[Any, Dict[str, Any]] = {} debited_row = None credited_row = None for row in data: if row['Transaction Kind'] == 'dynamic_coin_swap_debited': timestamp = deserialize_timestamp_from_date( date=row['Timestamp (UTC)'], formatstr='%Y-%m-%d %H:%M:%S', location='crypto.com', ) if timestamp not in swapping_rows: swapping_rows[timestamp] = {} swapping_rows[timestamp]['debited'] = row elif row['Transaction Kind'] == 'dynamic_coin_swap_credited': timestamp = deserialize_timestamp_from_date( date=row['Timestamp (UTC)'], formatstr='%Y-%m-%d %H:%M:%S', location='crypto.com', ) if timestamp not in swapping_rows: swapping_rows[timestamp] = {} swapping_rows[timestamp]['credited'] = row for timestamp in swapping_rows: credited_row = swapping_rows[timestamp]['credited'] debited_row = swapping_rows[timestamp]['debited'] if credited_row is not None and debited_row is not None: notes = 'Coin Swap\nSource: crypto.com (CSV import)' # No fees here since it's coin swapping fee = Fee(ZERO) fee_currency = A_USD base_asset = Asset(credited_row['Currency']) quote_asset = Asset(debited_row['Currency']) pair = TradePair(f'{base_asset.identifier}_{quote_asset.identifier}') base_amount_bought = deserialize_asset_amount(credited_row['Amount']) quote_amount_sold = deserialize_asset_amount(debited_row['Amount']) rate = Price(abs(base_amount_bought / quote_amount_sold)) trade = Trade( timestamp=timestamp, location=Location.CRYPTOCOM, pair=pair, trade_type=TradeType.BUY, amount=base_amount_bought, rate=rate, fee=fee, fee_currency=fee_currency, link='', notes=notes, ) self.db.add_trades([trade]) def import_cryptocom_csv(self, filepath: Path) -> None: with open(filepath, 'r', encoding='utf-8-sig') as csvfile: data = csv.DictReader(csvfile) self._import_cryptocom_swap(data) # reset the iterator csvfile.seek(0) # pass the header since seek(0) make the first row to be the header next(data) for row in data: try: self._consume_cryptocom_entry(row) except UnknownAsset as e: self.db.msg_aggregator.add_warning( f'During cryptocom CSV import found action with unknown ' f'asset {e.asset_name}. Ignoring entry', ) continue except DeserializationError as e: self.db.msg_aggregator.add_warning( f'Error during cryptocom CSV import deserialization. ' f'Error was {str(e)}. Ignoring entry', ) continue except UnsupportedCryptocomEntry as e: self.db.msg_aggregator.add_warning(str(e)) continue return None
{ "pile_set_name": "Github" }
/** * Marlin 3D Printer Firmware * Copyright (C) 2019 MarlinFirmware [https://github.com/MarlinFirmware/Marlin] * * Based on Sprinter and grbl. * Copyright (C) 2011 Camiel Gubbels / Erik van der Zalm * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * */ #ifndef HAL_PINSDEBUG_TEENSY_H #define NUMBER_PINS_TOTAL NUM_DIGITAL_PINS #define MULTI_NAME_PAD 16 // space needed to be pretty if not first name assigned to a pin #define FTM0_CH0_PIN 22 #define FTM0_CH1_PIN 23 #define FTM0_CH2_PIN 9 #define FTM0_CH3_PIN 10 #define FTM0_CH4_PIN 6 #define FTM0_CH5_PIN 20 #define FTM0_CH6_PIN 21 #define FTM0_CH7_PIN 5 #define FTM1_CH0_PIN 3 #define FTM1_CH1_PIN 4 #define FTM2_CH0_PIN 29 #define FTM2_CH1_PIN 30 #define FTM3_CH0_PIN 2 #define FTM3_CH1_PIN 14 #define FTM3_CH2_PIN 7 #define FTM3_CH3_PIN 8 #define FTM3_CH4_PIN 35 #define FTM3_CH5_PIN 36 #define FTM3_CH6_PIN 37 #define FTM3_CH7_PIN 38 #ifdef __MK66FX1M0__ // Teensy3.6 #define TPM1_CH0_PIN 16 #define TPM1_CH1_PIN 17 #endif #define IS_ANALOG(P) ((P) >= analogInputToDigitalPin(0) && (P) <= analogInputToDigitalPin(9)) || ((P) >= analogInputToDigitalPin(12) && (P) <= analogInputToDigitalPin(20)) void HAL_print_analog_pin(char buffer[], int8_t pin) { if (pin <= 23) sprintf_P(buffer, PSTR("(A%2d) "), int(pin - 14)); else if (pin <= 39) sprintf_P(buffer, PSTR("(A%2d) "), int(pin - 19)); } void HAL_analog_pin_state(char buffer[], int8_t pin) { if (pin <= 23) sprintf_P(buffer, PSTR("Analog in =% 5d"), analogRead(pin - 14)); else if (pin <= 39) sprintf_P(buffer, PSTR("Analog in =% 5d"), analogRead(pin - 19)); } #define PWM_PRINT(V) do{ sprintf_P(buffer, PSTR("PWM: %4d"), 22); SERIAL_ECHO(buffer); }while(0) #define FTM_CASE(N,Z) \ case FTM##N##_CH##Z##_PIN: \ if (FTM##N##_C##Z##V) { \ PWM_PRINT(FTM##N##_C##Z##V); \ return true; \ } else return false /** * Print a pin's PWM status. * Return true if it's currently a PWM pin. */ bool HAL_pwm_status(int8_t pin) { char buffer[20]; // for the sprintf statements switch (pin) { FTM_CASE(0,0); FTM_CASE(0,1); FTM_CASE(0,2); FTM_CASE(0,3); FTM_CASE(0,4); FTM_CASE(0,5); FTM_CASE(0,6); FTM_CASE(0,7); FTM_CASE(1,0); FTM_CASE(1,1); FTM_CASE(2,0); FTM_CASE(2,1); FTM_CASE(3,0); FTM_CASE(3,1); FTM_CASE(3,2); FTM_CASE(3,3); FTM_CASE(3,4); FTM_CASE(3,5); FTM_CASE(3,6); FTM_CASE(3,7); case NOT_ON_TIMER: default: return false; } SERIAL_ECHOPGM(" "); } static void HAL_pwm_details(uint8_t pin) { /* TODO */ } #endif
{ "pile_set_name": "Github" }
{% extends "socialaccount/base.html" %} {% load i18n recaptcha2 %} {% block head_title %}{% trans "Register" %}{% endblock %} {% block head %} {% recaptcha_init request.LANGUAGE_CODE %} {% endblock %} {% block content %} <div class="central-form"> <h1>{% trans "Register" %}</h1> <p>{% blocktrans with provider_name=account.get_provider.name site_name=site.name %}You are about to use your {{provider_name}} account to login to {{site_name}}. As a final step, please complete the following form:{% endblocktrans %}</p> <form class="signup" id="signup_form" method="post" action="{% url 'socialaccount_signup' %}"> {% csrf_token %} {% include 'form-fields.html' %} <button class="btn btn-primary" type="submit">{% trans "Register" %} &raquo;</button> </form> </div> {% endblock %}
{ "pile_set_name": "Github" }
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ /* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ var gTestfile = 'regress-465483.js'; //----------------------------------------------------------------------------- var BUGNUMBER = 465483; var summary = 'Type instability leads to undefined being added as a string instead of as a number'; var actual = ''; var expect = ''; //----------------------------------------------------------------------------- test(); //----------------------------------------------------------------------------- function test() { enterFunc ('test'); printBugNumber(BUGNUMBER); printStatus (summary); expect = 'NaN'; jit(true); for each (i in [4, 'a', 'b', (void 0)]) print(actual = '' + (i + i)); jit(false); reportCompare(expect, actual, summary); exitFunc ('test'); }
{ "pile_set_name": "Github" }
# # Copyright (c) 2010-2020. Axon Framework # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # name=AxonTestConfiguration appenders = console appender.console.type = Console appender.console.name = STDOUT appender.console.layout.type = PatternLayout appender.console.layout.pattern = %d [%t] %-5p %-30.30c{1} %x - %m%n rootLogger.level = info rootLogger.appenderRefs = stdout rootLogger.appenderRef.stdout.ref = STDOUT logger.axon.name = org.axonframework logger.axon.level = info logger.axon.additivity = false logger.axon.appenderRefs = stdout logger.axon.appenderRef.stdout.ref = STDOUT
{ "pile_set_name": "Github" }
<!doctype HTML> <html> <meta charset="utf8"> <title>Content Visibility: navigating to a text fragment.</title> <link rel="author" title="Vladimir Levin" href="mailto:[email protected]"> <link rel="help" href="https://drafts.csswg.org/css-contain/#content-visibility"> <meta name="timeout" content="long"> <meta name="assert" content="content-visibility: auto subtrees are 'searchable' by text fragment links"> <script src="/resources/testharness.js"></script> <script src="/resources/testharnessreport.js"></script> <script src="/resources/testdriver.js"></script> <script src="/resources/testdriver-vendor.js"></script> <script src="/common/utils.js"></script> <script src="/scroll-to-text-fragment/stash.js"></script> <script> promise_test(t => new Promise((resolve, reject) => { const fragment = '#:~:text=hiddentext'; const key = token(); test_driver.bless("Open a URL with a text fragment directive", () => { window.open(`resources/text-fragment-target-auto.html?key=${key}${fragment}`, '_blank', 'noopener'); }); fetchResults(key, resolve, reject); }).then(data => { assert_equals(data.scrollPosition, "text"); assert_equals(data.target, "text"); }), "Fragment navigation with content-visibility; single text"); promise_test(t => new Promise((resolve, reject) => { const fragment = '#:~:text=start,end'; const key = token(); test_driver.bless("Open a URL with a text fragment directive", () => { window.open(`resources/text-fragment-target-auto.html?key=${key}${fragment}`, '_blank', 'noopener'); }); fetchResults(key, resolve, reject); }).then(data => { assert_equals(data.scrollPosition, "text2"); assert_equals(data.target, "text2and3ancestor"); }), "Fragment navigation with content-visibility; range across blocks"); </script>
{ "pile_set_name": "Github" }
<!DOCTYPE html> <html lang="en-US"> <head> <meta charset="UTF-8"> <meta http-equiv="X-UA-Compatible" content="IE=edge"> <meta name="viewport" content="width=device-width, initial-scale=1"> <!-- Begin Jekyll SEO tag v2.5.0 --> <title>Using the Demo Generator | Azure DevOps Demo Generator</title> <meta name="generator" content="Jekyll v3.7.4" /> <meta property="og:title" content="Using the Demo Generator" /> <meta property="og:locale" content="en_US" /> <meta name="description" content="Azure DevOps Demo Generator" /> <meta property="og:description" content="Azure DevOps Demo Generator" /> <link rel="canonical" href="http://localhost:4000/using.html" /> <meta property="og:url" content="http://localhost:4000/using.html" /> <meta property="og:site_name" content="Azure DevOps Demo Generator" /> <script type="application/ld+json"> {"@type":"WebPage","url":"http://localhost:4000/using.html","headline":"Using the Demo Generator","description":"Azure DevOps Demo Generator","@context":"http://schema.org"}</script> <!-- End Jekyll SEO tag --> <link rel="stylesheet" href="/assets/css/style.css?v=71de869d35c4585adaf0b064bf89fd3929f3aa6a"> </head> <body> <div class="container-lg px-3 my-5 markdown-body"> <h1 id="using-the-azure-devops-demo-generator">Using the Azure DevOps Demo Generator</h1> <hr /> <ol> <li> <p>Browse to the <a href="https://azuredevopsdemogenerator.azurewebsites.net/">Azure DevOps Demo Generator site</a> by click the link, or copy <code class="highlighter-rouge">https://azuredevopsdemogenerator.azurewebsites.net/</code> into your browser’s URL field.</p> </li> <li> <p>Click <strong>Sign In</strong> and provide the Microsoft or Azure AD account credentials associated with an organization in Azure DevOps Services. If you don’t have an organization, click on <strong>Get Started for Free</strong> to create one and then log in with your credentials.</p> </li> </ol> <p><img src="/About-Azure-DevOps-Demo-Generator/images/homepage.png" alt="Image of VSTS Demo Generator V2 login" /></p> <ol> <li> <p>After you sign in, select <strong>Accept</strong> to grant the Demo Generator permissions to access your Azure DevOps account.</p> </li> <li> <p>Select the organization you will use to host the project created by the Azure DevOps Demo Generator. (You may have multiple accounts of which you are a member, and which are associated with your login, so choose carefully.) Provide a name for your project (such as “MyProjectDemo” ) that you and other contributors can use to identify it as a demo project.</p> </li> </ol> <p><img src="/About-Azure-DevOps-Demo-Generator/images/mainpage.png" alt="Image of the generator main page" /></p> <p>Lastly, select the demo project template you want to provision by clicking <strong>…</strong> (Browse) button.</p> <p><img src="/About-Azure-DevOps-Demo-Generator/images/templateselection.png" alt="Image of VSTS Demo Generator template selection screen" /></p> <blockquote> <p>The default template is <strong>SmartHotel360</strong>, which contains complete ASP.NET 2 web mobile and desktop business apps for a hotel, and can be deployed using Docker containers. Other templates include <strong>MyHealthClinic</strong>, which defines a team project for an ASP.NET Core app that deploys to Azure App Service; <strong>PartsUnlimited</strong>, which defines an ASP.NET app with customized CI/CD pipelines; and <strong>MyShuttle</strong>, which defines a Java app and Azure App service deployment.</p> </blockquote> <blockquote> <p>All four templates provide fictional Azure DevOps users and pre-populated Agile planning and tracking work items and data, along with source code in an Azure Repos Git repo, as well as access to Azure Pipelines.</p> </blockquote> <ol> <li> <p>Some templates may require additional extensions to be installed to your organization. The demo generation process checks to see if these extensions are already installed. If the extension is already installed, a green check will be displayed in front of the extension name. If the extension is <strong>not</strong> installed, select the empty check boxes to install the extension(s) to your account. When ready, click on <strong>Create Project</strong> button.</p> <blockquote> <p>If you want to manually install the extensions, click on the provided link for a specific extension, which takes you to the extension’s page on Azure DevOps Marketplace. From there, you can install the extension.</p> </blockquote> </li> <li> <p>Your project may take a couple of minutes for the Demo Generator to provision. When it completes, you will be provided with a link to the demo project.</p> </li> </ol> <p><img src="_img/projectcreated.png" alt="Image of Azure DevOps Demo Generator project created screen" /></p> <ol> <li>Select the link to go to the new demo Azure DevOps Services project and confirm it was successfully provisioned.</li> </ol> <p><img src="_img/projecthomepage.png" alt="Image of Azure DevOps Demo Generator provision confirmation screen" /></p> <blockquote> <p>You must provide your own information such as URLs, logins, password, and others for the configuration of demo endpoints that use Azure resources.</p> </blockquote> <hr /> <p>Next: <a href="/About-Azure-DevOps-Demo-Generator/Build-your-own-template">Building your own template</a></p> </div> <script src="https://cdnjs.cloudflare.com/ajax/libs/anchor-js/4.1.0/anchor.min.js" integrity="sha256-lZaRhKri35AyJSypXXs4o6OPFTbTmUoltBbDCbdzegg=" crossorigin="anonymous"></script> <script>anchors.add();</script> </body> </html>
{ "pile_set_name": "Github" }
/* This file is a part of libcds - Concurrent Data Structures library (C) Copyright Maxim Khizhinsky ([email protected]) 2006-2016 Source code repo: http://github.com/khizmax/libcds/ Download: http://sourceforge.net/projects/libcds/files/ Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef CDSLIB_CONTAINER_DETAILS_LAZY_LIST_BASE_H #define CDSLIB_CONTAINER_DETAILS_LAZY_LIST_BASE_H #include <cds/container/details/base.h> #include <cds/intrusive/details/lazy_list_base.h> #include <cds/urcu/options.h> namespace cds { namespace container { /// LazyList ordered list related definitions /** @ingroup cds_nonintrusive_helper */ namespace lazy_list { /// LazyList traits /** Either \p compare or \p less or both must be specified. */ struct traits { /// allocator used to allocate new node typedef CDS_DEFAULT_ALLOCATOR allocator; /// Key comparing functor /** No default functor is provided. If the option is not specified, the \p less is used. */ typedef opt::none compare; /// Specifies binary predicate used for key comparing /** Default is \p std::less<T>. */ typedef opt::none less; /// Specifies binary functor used for comparing keys for equality /** No default functor is provided. If \p equal_to option is not spcified, \p compare is used, if \p compare is not specified, \p less is used. */ typedef opt::none equal_to; /// Specifies list ordering policy. /** If \p sort is \p true, than list maintains items in sorted order, otherwise items are unordered. Default is \p true. Note that if \p sort is \p false then lookup operations scan entire list. */ static const bool sort = true; /// Lock type used to lock modifying items /** Default is cds::sync::spin */ typedef cds::sync::spin lock_type; /// back-off strategy used typedef cds::backoff::Default back_off; /// Item counting feature; by default, disabled. Use \p cds::atomicity::item_counter to enable item counting typedef atomicity::empty_item_counter item_counter; /// C++ memory ordering model /** Can be \p opt::v::relaxed_ordering (relaxed memory model, the default) or \p opt::v::sequential_consistent (sequentially consisnent memory model). */ typedef opt::v::relaxed_ordering memory_model; /// RCU deadlock checking policy (only for \ref cds_intrusive_LazyList_rcu "RCU-based LazyList") /** List of available options see \p opt::rcu_check_deadlock */ typedef opt::v::rcu_throw_deadlock rcu_check_deadlock; //@cond // LazyKVList: supporting for split-ordered list // key accessor (opt::none = internal key type is equal to user key type) typedef opt::none key_accessor; //@endcond }; /// Metafunction converting option list to \p lazy_list::traits /** \p Options are: - \p opt::lock_type - lock type for node-level locking. Default \p is cds::sync::spin. Note that <b>each</b> node of the list has member of type \p lock_type, therefore, heavy-weighted locking primitive is not acceptable as candidate for \p lock_type. - \p opt::compare - key compare functor. No default functor is provided. If the option is not specified, the \p opt::less is used. - \p opt::less - specifies binary predicate used for key compare. Default is \p std::less<T>. - \p opt::equal_to - specifies binary functor for comparing keys for equality. This option is applicable only for unordered list. No default is provided. If \p equal_to is not specified, \p compare is used, if \p compare is not specified, \p less is used. - \p opt::sort - specifies ordering policy. Default value is \p true, i.e. the list is ordered. Note: unordering feature is not fully supported yet. - \p opt::back_off - back-off strategy used. If the option is not specified, \p cds::backoff::Default is used. - \p opt::item_counter - the type of item counting feature. Default is disabled (\p atomicity::empty_item_counter). To enable item counting use \p atomicity::item_counter. - \p opt::allocator - the allocator used for creating and freeing list's item. Default is \ref CDS_DEFAULT_ALLOCATOR macro. - \p opt::memory_model - C++ memory ordering model. Can be \p opt::v::relaxed_ordering (relaxed memory model, the default) or \p opt::v::sequential_consistent (sequentially consisnent memory model). */ template <typename... Options> struct make_traits { # ifdef CDS_DOXYGEN_INVOKED typedef implementation_defined type ; ///< Metafunction result # else typedef typename cds::opt::make_options< typename cds::opt::find_type_traits< traits, Options... >::type ,Options... >::type type; #endif }; } // namespace lazy_list // Forward declarations template <typename GC, typename T, typename Traits=lazy_list::traits> class LazyList; template <typename GC, typename Key, typename Value, typename Traits=lazy_list::traits> class LazyKVList; // Tag for selecting lazy list implementation /** This struct is empty and it is used only as a tag for selecting LazyList as ordered list implementation in declaration of some classes. See \p split_list::traits::ordered_list as an example. */ struct lazy_list_tag {}; }} // namespace cds::container #endif // #ifndef CDSLIB_CONTAINER_DETAILS_LAZY_LIST_BASE_H
{ "pile_set_name": "Github" }
####################################################################### ## ## Corresponding documentation: ## ## https://redmine.lighttpd.net/projects/lighttpd/wiki/Docs_ModAccesslog ## server.modules += ( "mod_accesslog" ) ## ## Default access log. ## accesslog.filename = log_root + "/lighttpd-access.log" ## ## The default format produces CLF compatible output. ## For available parameters see access.txt ## #accesslog.format = "%h %l %u %t \"%r\" %b %>s \"%{User-Agent}i\" \"%{Referer}i\"" ## ## If you want to log to syslog you have to unset the ## accesslog.use-syslog setting and uncomment the next line. ## #accesslog.use-syslog = "enable" # #######################################################################
{ "pile_set_name": "Github" }
// // ReleaseLeopardOrLater.xcconfig // // Xcode configuration file for building a Release configuration of a project // on Leopard or later. // // This is a _Configuration_ Xcode config file for use in the "Based on" popup // of the project configuration editor. Do _not_ use this as the config base // and individual Xcode target, there are other configuration files for that // purpose. // // Copyright 2006-2008 Google Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); you may not // use this file except in compliance with the License. You may obtain a copy // of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, WITHOUT // WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the // License for the specific language governing permissions and limitations under // the License. // // This file will be going away, please migrate off it. Instead Apple wants // you to use the "current" SDK, use ReleaseMacOSX.xcconfig and set your min // supported OS version in your project file. // Pull in the general settings #include "../subconfig/General.xcconfig" // Leopard or later #include "../subconfig/LeopardOrLater.xcconfig" // Release settings #include "../subconfig/Release.xcconfig" // Merge settings #include "../subconfig/GTMMerge.xcconfig"
{ "pile_set_name": "Github" }
<?xml version="1.0" encoding="utf-8"?> <resources> <style name="ActionUI" parent="@android:style/Theme.Translucent.NoTitleBar"> </style> </resources>
{ "pile_set_name": "Github" }
![Game Developer Roadmap - 2020](./img/title.png) > Roadmap to becoming a game developer in 2020, inspired by [web-developer-roadmap](https://github.com/kamranahmedse/developer-roadmap). Below you find a set of charts demonstrating the paths that you can take and the technologies that you would want to adopt in order to become a frontend, backend or a devops. I made these charts for an old professor of mine who wanted something to share with his college students to give them a perspective; sharing them here to help the community. > Check out my [Github](https://github.com/utilForever) and say "hi" on [Twitter](https://twitter.com/utilForever). *** <h3 align="center"><strong>Purpose of these Roadmaps</strong></h3> > The purpose of these roadmaps is to give you an idea about the landscape and to guide you if you are confused about what to learn next and not to encourage you to pick what is hip and trendy. You should grow some understanding of why one tool would be better suited for some cases than the other and remember hip and trendy never means best suited for the job. <h3 align="center"><strong>Note to Beginners</strong></h3> > These roadmaps cover everything that is there to learn for the paths listed below. Don't feel overwhelmed, you don't need to learn it all in the beginning if you are just getting started. We are working on the beginner versions of these and will release it soon after we are done with the 2020 release of roadmaps. *** If you think that these can be improved in any way, please do suggest. ## Introduction ![Game Developr Roadmap Introduction](./img/intro.png) ## Client Roadmap ![Client Roadmap](./img/client.png) ## Server Roadmap ![Server Roadmap](./img/server.png) ## QA Roadmap ![QA Roadmap](./img/qa.png) ## 🚦 Wrap Up If you think any of the roadmaps can be improved, please do open a PR with any updates and submit any issues. Also, I will continue to improve this, so you might want to watch/star this repository to revisit. ## 🙌 Contribution The roadmaps are built using [Balsamiq](https://balsamiq.com/products/mockups/). Project file can be found at `/project-files` directory. To modify any of the roadmaps, open Balsamiq, click **Project > Import > Mockup JSON**, it will open the roadmap for you, update it, upload and update the images in readme and create a PR. - Open pull request with improvements - Discuss ideas in issues - Spread the word - Reach out to me directly at [email protected] or [![Twitter URL](https://img.shields.io/twitter/url/https/twitter.com/utilForever.svg?style=social&label=Follow%20%40utilForever)](https://twitter.com/utilForever) ## License <img align="right" src="http://opensource.org/trademarks/opensource/OSI-Approved-License-100x137.png"> The class is licensed under the [MIT License](http://opensource.org/licenses/MIT): Copyright &copy; 2020 [Chris Ohk](http://www.github.com/utilForever). Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
{ "pile_set_name": "Github" }
// Copyright 2015 The Neugram Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package stmt defines data structures representing Neugram statements. package stmt import ( "neugram.io/ng/syntax/expr" "neugram.io/ng/syntax/src" "neugram.io/ng/syntax/tipe" "neugram.io/ng/syntax/token" ) type Stmt interface { stmt() Pos() src.Pos // implements syntax.Node } type Import struct { Position src.Pos Name string Path string } type ImportSet struct { Position src.Pos Imports []*Import } type TypeDecl struct { Position src.Pos Name string Type *tipe.Named } type TypeDeclSet struct { Position src.Pos TypeDecls []*TypeDecl } type MethodikDecl struct { Position src.Pos Name string Type *tipe.Named Methods []*expr.FuncLiteral } // TODO InterfaceLiteral struct { Name string, MethodNames []string, Methods []*tipe.Func } type Const struct { Position src.Pos NameList []string Type tipe.Type Values []expr.Expr } type ConstSet struct { Position src.Pos Consts []*Const } type VarSet struct { Position src.Pos Vars []*Var } type Var struct { Position src.Pos NameList []string Type tipe.Type Values []expr.Expr } type Assign struct { Position src.Pos Decl bool Left []expr.Expr Right []expr.Expr // TODO: give up on multiple rhs values for now. } type Block struct { Position src.Pos Stmts []Stmt } type If struct { Position src.Pos Init Stmt Cond expr.Expr Body Stmt // always *BlockStmt Else Stmt } type For struct { Position src.Pos Init Stmt Cond expr.Expr Post Stmt Body Stmt // always *BlockStmt } type Switch struct { Position src.Pos Init Stmt Cond expr.Expr Cases []SwitchCase } type SwitchCase struct { Position src.Pos Conds []expr.Expr Default bool Body *Block } type TypeSwitch struct { Position src.Pos Init Stmt // initialization statement; or nil Assign Stmt // x := y.(type) or y.(type) Cases []TypeSwitchCase } type TypeSwitchCase struct { Position src.Pos Default bool Types []tipe.Type Body *Block } type Go struct { Position src.Pos Call *expr.Call } type Range struct { Position src.Pos Decl bool Key expr.Expr Val expr.Expr Expr expr.Expr Body Stmt // always *BlockStmt } type Return struct { Position src.Pos Exprs []expr.Expr } type Defer struct { Position src.Pos Expr expr.Expr } type Simple struct { Position src.Pos Expr expr.Expr } // Send is channel send statement, "a <- b". type Send struct { Position src.Pos Chan expr.Expr Value expr.Expr } type Branch struct { Position src.Pos Type token.Token // Continue, Break, Goto, or Fallthrough Label string } type Labeled struct { Position src.Pos Label string Stmt Stmt } type Select struct { Position src.Pos Cases []SelectCase } type SelectCase struct { Position src.Pos Default bool Stmt Stmt // a recv- or send-stmt Body *Block } type Bad struct { Position src.Pos Error error } func (s *Import) stmt() {} func (s *ImportSet) stmt() {} func (s *TypeDecl) stmt() {} func (s *TypeDeclSet) stmt() {} func (s *MethodikDecl) stmt() {} func (s *Const) stmt() {} func (s *ConstSet) stmt() {} func (s *Var) stmt() {} func (s *VarSet) stmt() {} func (s *Assign) stmt() {} func (s *Block) stmt() {} func (s *If) stmt() {} func (s *For) stmt() {} func (s *Switch) stmt() {} func (s *SwitchCase) stmt() {} func (s *TypeSwitch) stmt() {} func (s *TypeSwitchCase) stmt() {} func (s *Go) stmt() {} func (s *Range) stmt() {} func (s *Return) stmt() {} func (s *Defer) stmt() {} func (s *Simple) stmt() {} func (s *Send) stmt() {} func (s *Branch) stmt() {} func (s *Labeled) stmt() {} func (s *Select) stmt() {} func (s *Bad) stmt() {} func (s *Import) Pos() src.Pos { return s.Position } func (s *ImportSet) Pos() src.Pos { return s.Position } func (s *TypeDecl) Pos() src.Pos { return s.Position } func (s *TypeDeclSet) Pos() src.Pos { return s.Position } func (s *MethodikDecl) Pos() src.Pos { return s.Position } func (s *Const) Pos() src.Pos { return s.Position } func (s *ConstSet) Pos() src.Pos { return s.Position } func (s *Var) Pos() src.Pos { return s.Position } func (s *VarSet) Pos() src.Pos { return s.Position } func (s *Assign) Pos() src.Pos { return s.Position } func (s *Block) Pos() src.Pos { return s.Position } func (s *If) Pos() src.Pos { return s.Position } func (s *For) Pos() src.Pos { return s.Position } func (s *Switch) Pos() src.Pos { return s.Position } func (s SwitchCase) Pos() src.Pos { return s.Position } func (s *TypeSwitch) Pos() src.Pos { return s.Position } func (s TypeSwitchCase) Pos() src.Pos { return s.Position } func (s *Go) Pos() src.Pos { return s.Position } func (s *Range) Pos() src.Pos { return s.Position } func (s *Return) Pos() src.Pos { return s.Position } func (s *Defer) Pos() src.Pos { return s.Position } func (s *Simple) Pos() src.Pos { return s.Position } func (s *Send) Pos() src.Pos { return s.Position } func (s *Branch) Pos() src.Pos { return s.Position } func (s *Labeled) Pos() src.Pos { return s.Position } func (s *Select) Pos() src.Pos { return s.Position } func (s SelectCase) Pos() src.Pos { return s.Position } func (s *Bad) Pos() src.Pos { return s.Position }
{ "pile_set_name": "Github" }
<?xml version="1.0" encoding="UTF-8"?> <!DOCTYPE log4j:configuration SYSTEM "log4j.dtd"> <log4j:configuration xmlns:log4j="http://jakarta.apache.org/log4j/" debug="false"> <appender name="dbexample" class="org.apache.log4j.RollingFileAppender"> <param name="File" value="${catalina.home}/logs/dbexample.log"/> <param name="Append" value="true" /> <param name="ImmediateFlush" value="true" /> <param name="MaxFileSize" value="20MB" /> <param name="MaxBackupIndex" value="10" /> <layout class="org.apache.log4j.PatternLayout"> <param name="ConversionPattern" value="%-4r [%t] %-5p %c %x - %m%n" /> </layout> </appender> <logger name="com.journaldev" additivity="false"> <level value="DEBUG" /> <appender-ref ref="dbexample"/> </logger> <root> <level value="debug" /> <appender-ref ref="dbexample" /> </root> </log4j:configuration>
{ "pile_set_name": "Github" }
<?xml version="1.0" encoding="UTF-8"?> <!-- The contents of this file are subject to the terms of the Common Development and Distribution License (the License). You may not use this file except in compliance with the License. You can obtain a copy of the License at legal/CDDLv1.0.txt. See the License for the specific language governing permission and limitations under the License. When distributing Covered Software, include this CDDL Header Notice in each file and include the License file at legal/CDDLv1.0.txt. If applicable, add the following below the CDDL Header, with the fields enclosed by brackets [] replaced by your own identifying information: "Portions copyright [year] [name of copyright owner]". Copyright 2014 ForgeRock AS. --> <!DOCTYPE ModuleProperties PUBLIC "=//iPlanet//Authentication Module Properties XML Interface 1.0 DTD//EN" "jar://com/sun/identity/authentication/Auth_Module_Properties.dtd"> <ModuleProperties moduleName="Scripted" version="1.0" > <Callbacks length="0" order="1" timeout="600" header="#WILL NOT BE SHOWN#" /> <Callbacks length="2" order="2" timeout="120" header="Sign in to OpenAM" > <HiddenValueCallback> <Id>clientScriptOutputData</Id> </HiddenValueCallback> <TextOutputCallback messageType="script">PLACEHOLDER</TextOutputCallback> </Callbacks> </ModuleProperties>
{ "pile_set_name": "Github" }
/* * Copyright 2017, GeoSolutions Sas. * All rights reserved. * * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. */ var expect = require('expect'); var { SET_SEARCH_CONFIG_PROP, RESET_SEARCH_CONFIG, UPDATE_SERVICE, setSearchConfigProp, restServiceConfig, updateService } = require('../searchconfig'); describe('Test correctness of the searchconfig actions', () => { it('resetServiceConfig', () => { const testPage = 1; var retval = restServiceConfig(testPage); expect(retval).toExist(); expect(retval.type).toBe(RESET_SEARCH_CONFIG); expect(retval.page).toBe(testPage); }); it('setSearchConfigProp', () => { const testProperty = 'prop'; const testValue = 'val'; var retval = setSearchConfigProp(testProperty, testValue); expect(retval).toExist(); expect(retval.type).toBe(SET_SEARCH_CONFIG_PROP); expect(retval.property).toBe(testProperty); expect(retval.value).toBe(testValue); }); it('updateService', () => { const testService = "service"; const testIdx = 1; var retval = updateService(testService, testIdx); expect(retval).toExist(); expect(retval.type).toBe(UPDATE_SERVICE); expect(retval.service).toBe(testService); expect(retval.idx).toBe(testIdx); }); });
{ "pile_set_name": "Github" }
############################################################################### # # Tests for XlsxWriter. # # Copyright (c), 2013-2020, John McNamara, [email protected] # from ..excel_comparison_test import ExcelComparisonTest from ...workbook import Workbook class TestCompareXLSXFiles(ExcelComparisonTest): """ Test file created by XlsxWriter against a file created by Excel. """ def setUp(self): self.set_filename('chart_gradient06.xlsx') def test_create_file(self): """Test the creation of a simple XlsxWriter file.""" workbook = Workbook(self.got_filename) worksheet = workbook.add_worksheet() chart = workbook.add_chart({'type': 'column'}) chart.axis_ids = [61363328, 61364864] data = [ [1, 2, 3, 4, 5], [2, 4, 6, 8, 10], [3, 6, 9, 12, 15], ] worksheet.write_column('A1', data[0]) worksheet.write_column('B1', data[1]) worksheet.write_column('C1', data[2]) chart.add_series({ 'values': '=Sheet1!$A$1:$A$5', 'gradient': { 'colors': ['#DDEBCF', '#9CB86E', '#156B13'], 'type': 'path' } }) chart.add_series({'values': '=Sheet1!$B$1:$B$5'}) chart.add_series({'values': '=Sheet1!$C$1:$C$5'}) worksheet.insert_chart('E9', chart) workbook.close() self.assertExcelEqual()
{ "pile_set_name": "Github" }
""" author: shreyansh kushwaha made on : 08.09.2020 """ board = {'1' : ' ','2' : ' ','3' : ' ','4' : ' ','5' : ' ','6' : ' ','7' : ' ','8' : ' ','9' : ' '} import random, os, check, time p1 = input("Enter your name player 1 (symbol X):\n") p2 = input("Enter your name player 2 (symbol O):\n") score1, score2, score_tie = [], [], [] total_moves =0 player = random.randint(1, 2) time.sleep(2.6) os.system("cls") if player == 1: print(f" {p1} won the toss.") else: print(f" {p2} won the toss") time.sleep(3) print(" Let us begin") time.sleep(2) def toggaleplayer(player): if player == 1: player = 2 elif player == 2: player = 1 def playagain(): inp = input("Do you want to play again??(Y/N)\n") if inp.upper() == "Y": a = toggaleplayer(player) restart(a) elif inp.upper() == "N": os.system("cls") print("Thanks for playing") print(f"Number of times {p1} won : {len(score1)}.") print(f"Number of times {p2} won : {len(score2)}.") print(f"Number of ties: {len(score_tie)}.") abc = input() quit() else: print("Invalid input") quit() def restart(a): total_moves, board =0, {'1' : ' ','2' : ' ','3' : ' ','4' : ' ','5' : ' ','6' : ' ','7' : ' ','8' : ' ','9' : ' '} while True: os.system("cls") print(board['1'] + '|' + board['2'] + '|' + board['3'] ) print('-+-+-') print(board['4'] + '|' + board['5'] + '|' + board['6'] ) print('-+-+-') print(board['7'] + '|' + board['8'] + '|' + board['9'] ) check.check(total_moves,score1, score2, score_tie, playagain, board, p1, p2) while True: if a == 1: p1_input = input(f"Its {p1}'s chance..\nwhere do you want to place your move:") if p1_input.upper() in board and board[p1_input.upper()] == " ": board[p1_input.upper()] = 'X' a = 2 break else: # on wrong input print("Invalid input \n Enter again. ") continue else: p2_input = input(f"Its {p2}'s chance..\nwhere do you want to place your move:") if p2_input.upper() in board and board[p2_input.upper()] == " ": board[p2_input.upper()] = 'O' a = 1 break else: print("Invalid Input") continue total_moves += 1 while True: os.system("cls") print(board['1'] + '|' + board['2'] + '|' + board['3'] ) print('-+-+-') print(board['4'] + '|' + board['5'] + '|' + board['6'] ) print('-+-+-') print(board['7'] + '|' + board['8'] + '|' + board['9'] ) check.check(total_moves,score1, score2, score_tie, playagain, board, p1, p2) while True: if player == 1: p1_input = input(f"Its {p1}'s chance..\nwhere do you want to place your move:") if p1_input.upper() in board and board[p1_input.upper()] == " ": board[p1_input.upper()] = 'X' player = 2 break else: # on wrong input print("Invalid input ") continue else: p2_input = input(f"Its {p2}'s chance..\nwhere do you want to place your move:") if p2_input.upper() in board and board[p2_input.upper()] == " ": board[p2_input.upper()] = 'O' player = 1 break else: print("Invalid Input") continue total_moves += 1
{ "pile_set_name": "Github" }
// // Generated by class-dump 3.5 (64 bit) (Debug version compiled Oct 15 2018 10:31:50). // // class-dump is Copyright (C) 1997-1998, 2000-2001, 2004-2015 by Steve Nygard. // @class NSData; @protocol SETransceiver - (NSData *)transceive:(NSData *)arg1 error:(id *)arg2; @end
{ "pile_set_name": "Github" }
// Base jqxgrid options and utils var base_jqxgrid_options = { altrows: true, columnsheight: 40, rowsheight: 40, filterrowheight: 40, width: '100%', editable: false, columnsresize: true, groupable: false, sortable: true, filterable: true, showstatusbar: false, statusbarheight: 50, showaggregates: true, pageable: true, pagermode: 'default', autoheight: true, autorowheight: false, // auto? enabletooltips: true, // another solution: http://www.jqwidgets.com/community/topic/grid-tooltip/ pagesize: 10, pagesizeoptions: ['10', '15', '20', '30', '40', '50', '100'], virtualmode: false }; var draw_grid = function(selector, datafields, columns, server_pagination, custom_source_data, custom_grid_options) { var source = { url: window.location.href, datafields: datafields, datatype: "json", id: 'pk', root: 'data', data: { enable_pagination: false }, beforeprocessing: function (data) { // for server-side pagination source.totalrecords = data.total_records; render_error(data); }, loadError: function (xhr, status, error){ if(xhr.responseText=='logout'){ window.location.href = login_url } } }; if (custom_source_data) { if (custom_source_data.badgeClass){ custom_source_data.loadComplete = function(data){ $('.'+custom_source_data.badgeClass).text(data.total_records || 0) } } source = $.extend({}, source, custom_source_data); } var dataAdapter = new $.jqx.dataAdapter(source); var current_jqxgrid_options = { source: dataAdapter, columns: columns, // for server-side pagination rendergridrows: function () { return dataAdapter.records; }, updatefilterconditions: function (type, defaultconditions) { var stringcomparisonoperators = ['EMPTY', 'NOT_EMPTY', 'CONTAINS', 'CONTAINS_CASE_SENSITIVE', 'FULL_TEXT_SEARCH', 'DOES_NOT_CONTAIN', 'DOES_NOT_CONTAIN_CASE_SENSITIVE', 'STARTS_WITH', 'STARTS_WITH_CASE_SENSITIVE', 'ENDS_WITH', 'ENDS_WITH_CASE_SENSITIVE', 'EQUAL', 'EQUAL_CASE_SENSITIVE', 'NULL', 'NOT_NULL']; var numericcomparisonoperators = ['EQUAL', 'NOT_EQUAL', 'LESS_THAN', 'LESS_THAN_OR_EQUAL', 'GREATER_THAN', 'GREATER_THAN_OR_EQUAL', 'NULL', 'NOT_NULL']; var datecomparisonoperators = ['EQUAL', 'NOT_EQUAL', 'LESS_THAN', 'LESS_THAN_OR_EQUAL', 'GREATER_THAN', 'GREATER_THAN_OR_EQUAL', 'NULL', 'NOT_NULL']; var booleancomparisonoperators = ['EQUAL', 'NOT_EQUAL']; switch (type) { case 'stringfilter': return stringcomparisonoperators; case 'numericfilter': return numericcomparisonoperators; case 'datefilter': return datecomparisonoperators; case 'booleanfilter': return booleancomparisonoperators; } } }; if (custom_grid_options) { current_jqxgrid_options = $.extend({}, current_jqxgrid_options, custom_grid_options); } if (typeof server_pagination !== 'undefined' && server_pagination) { current_jqxgrid_options.virtualmode = true; current_jqxgrid_options.autoheight = true; current_jqxgrid_options.source._source.data.enable_pagination = server_pagination; current_jqxgrid_options.source._source.sort = function () { // update the grid and send a request to the server. $(selector).jqxGrid('updatebounddata'); }; current_jqxgrid_options.source._source.filter = function () { // update the grid and send a request to the server. $(selector).jqxGrid('updatebounddata', 'filter'); }; } var opts = $.extend({}, base_jqxgrid_options, current_jqxgrid_options); // needed to add custom filters (full text search) $(selector).bind('bindingcomplete', function (a, b) { var localizationObject = {}; filterstringcomparisonoperators = ['empty', 'not empty', 'contains', 'contains(match case)', 'full text search(or contains)', 'does not contain', 'does not contain(match case)', 'starts with', 'starts with(match case)', 'ends with', 'ends with(match case)', 'equal', 'equal(match case)', 'null', 'not null']; filternumericcomparisonoperators = ['equal', 'not equal', 'less than', 'less than or equal', 'greater than', 'greater than or equal', 'null', 'not null']; filterdatecomparisonoperators = ['equal', 'not equal', 'less than', 'less than or equal', 'greater than', 'greater than or equal', 'null', 'not null']; filterbooleancomparisonoperators = ['equal', 'not equal']; localizationObject.filterstringcomparisonoperators = filterstringcomparisonoperators; localizationObject.filternumericcomparisonoperators = filternumericcomparisonoperators; localizationObject.filterdatecomparisonoperators = filterdatecomparisonoperators; localizationObject.filterbooleancomparisonoperators = filterbooleancomparisonoperators; // change default message for empty data set if user_projects_selected is empty if (!window.user_projects_selected.length){ localizationObject.emptydatastring = "Select Project(s) Above for Viewing Data"; $(".project_selection label") .fadeOut(300) .fadeIn(300) .fadeOut(300) .fadeIn(300) } else if (window.table_warning) { localizationObject.emptydatastring = window.table_warning; } else { localizationObject.emptydatastring = "No data to display"; } $(selector).jqxGrid('localizestrings', localizationObject); }); $(selector).jqxGrid(opts) }; // custom cell renderer for jqxgrid function renderCell(defaulthtml, new_value) { var el = $(defaulthtml); el.html(new_value); return el.clone().wrap('<div>').parent().html(); } function linkFormatter(defaulthtml, url, val, new_window, button_options) { if (new_window || window.name == 'new'){ var window_width = $(window).width()*0.8; var window_height = $(window).height()*0.8; var new_value = '<a href="' + url + '" onclick="window.open(this.href, \'new\', \'width=' + window_width + ', height=' + window_height + ',scrollbars\'); return false;">' + val + '</a>'; } else { new_value = '<a href="' + url + '">' + val + '</a>'; } if (button_options) { new_value = $(new_value) .addClass('btn') .addClass(button_options.klass) .css({margin: 0, height: '100%', width: '100%'}); if (button_options.css){ new_value.css(button_options.css) } defaulthtml = $(defaulthtml); defaulthtml.css({margin: 0, padding: '2px', height: '100%'}) } return renderCell(defaulthtml, new_value); } function defaultLinkFormatter(index, columnfield, value, defaulthtml, columnproperties, row) { return linkFormatter(defaulthtml, row.url, value); } function baseLinkFormatter(url, value, defaulthtml, columnproperties) { if (columnproperties.cellsformat != '') { if ($.jqx.dataFormat) { if ($.jqx.dataFormat.isDate(value)) { value = $.jqx.dataFormat.formatdate(value, columnproperties.cellsformat); } else if ($.jqx.dataFormat.isNumber(value)) { value = $.jqx.dataFormat.formatnumber(value, columnproperties.cellsformat); } } } return linkFormatter(defaulthtml, url, value); } function updateLinkFormatter(index, columnfield, value, defaulthtml, columnproperties, row) { var main = window.location.pathname.split('/')[1]; var url = ['', main, row.slug || row.pk, ''].join('/'); return baseLinkFormatter(url, value, defaulthtml, columnproperties); } function detailLinkFormatter(index, columnfield, value, defaulthtml, columnproperties, row) { var main = window.location.pathname.split('/')[1]; var url = ['', main, row.slug || row.pk, 'detail', ''].join('/'); return baseLinkFormatter(url, value, defaulthtml, columnproperties); } function bool_renderer(index, columnfield, value, defaulthtml, columnproperties, row) { var fa_cls = value ? 'fa-check-square-o' : 'fa-square-o'; value = '<i class="fa ' + fa_cls + '"></i>'; return renderCell(defaulthtml, value) } function render_in_table_knob(val, row, columnfield) { var $knob = $('<div><input type="text" class="in-table-knob"></div>'); $knob .find('input') .attr('value', (val || 0) + '%') .knob({ readOnly: true, width: 50, height: 50, skin: "tron", fgColor: "#85AED2", inputColor: "#666", draw: function () { $(this.i).css({'font-size': '10px', 'margin-top': '18px'}); } }); var $canvas = $knob.find('canvas'); var dataURL = $canvas[0].toDataURL(); $canvas.remove(); $knob.prepend($('<img src="' + dataURL + '" width="50px" height="50px">')); // TODO: update if (columnfield=='progress') { $knob.attr('title', 'Total Documents: ' + row.total_documents_count + '\nCompleted Documents: ' + row.completed_documents_count) } return $knob; } function knob_cellsrenderer(index, columnfield, value, defaulthtml, columnproperties, row) { var knobbed = render_in_table_knob(value, row, columnfield); var $defaulthtml = $(defaulthtml).css('margin-top', '4px'); return renderCell($defaulthtml, knobbed); } function inactive_cellsclassrenderer(row, columnfield, value, data) { if (!data.is_active){ return 'inactive' } } function note_renderer(index, columnfield, value, defaulthtml, columnproperties, row) { // this throws error on simple text like ",.'p" // var $defaulthtml = $(defaulthtml).attr('title', $(value).text()); var $defaulthtml = $(defaulthtml); return renderCell($defaulthtml, value) } // helpers for jquery-confirm popups function reviewers_renderer(index, columnfield, value, defaulthtml, columnproperties, row){ var $defaulthtml = $(defaulthtml).prop('title', row.reviewers_usernames); return renderCell($defaulthtml, value); } var cancel_button_action = { text: 'Cancel', btnClass: 'btn-u btn-sm btn-l', action: function(){} }; var ajax_error_handler = function(xhr){ console.log(xhr); SEMICOLON.widget.notifications($('<span class="notification" data-notify-type="danger" data-notify-msg="Error"></span>')); }; var ajax_success_handler = function(response){ var status = response.level || response.status; SEMICOLON.widget.notifications($('<span class="notification" data-notify-type="' + status + '" data-notify-msg="' + response.message.replace(/(['"])/g, "") + '"></span>')) }; // remove item on "Yes" button click - send ajax request, refresh table function show_remove_popup(url, grid){ var token = jQuery("[name=csrfmiddlewaretoken]").val(); $.confirm({ type: 'orange', icon: 'fa fa-warning text-warning', title: 'Delete this object?', backgroundDismiss: true, content: 'url:' + url, buttons: { remove: { text: 'Remove', btnClass: 'btn-u btn-sm btn-w', action: function(){ $.ajax({ method: 'POST', url: url, data: { csrfmiddlewaretoken: token }, success: function(response){ grid.jqxGrid('updatebounddata'); ajax_success_handler(response) }, error: ajax_error_handler }) } }, cancel: cancel_button_action } }); } // tag text unit popup function tag_popup(target, pk, grid, tag_pk, tag) { if (typeof tag_pk === "undefined"){ tag_pk = null; tag = '' } if (target == 'document') { var url = doc_tag_url; var title = 'Tag Document' } else if (target == 'text_unit') { url = tu_tag_url; title = 'Tag Text Unit' } else if (target == 'cluster_documents') { url = cl_doc_tag_url; title = "Tag cluster's documents" } var token = jQuery("[name=csrfmiddlewaretoken]").val(); $.confirm({ type: 'blue', icon: 'fa fa-tags', title: title, backgroundDismiss: true, content: '<input type="text" class="form-control" name="tag" id="tag" placeholder="Tag" value="' + tag + '">', buttons: { tag: { text: 'Save', btnClass: 'btn-u btn-sm', action: function(){ var tag = this.$content.find('input').val(); if (tag) { $.ajax({ method: 'POST', url: url, data: { csrfmiddlewaretoken: token, owner_id: pk, tag_pk: tag_pk, tag: tag }, success: function(response){ if (grid){ $(grid).jqxGrid('updatebounddata'); } ajax_success_handler(response) }, error: ajax_error_handler }) } } }, cancel: cancel_button_action } }) } // classify text unit popup function classify_text_unit_popup(pk, grid) { var token = jQuery("[name=csrfmiddlewaretoken]").val(); $.confirm({ type: 'blue', icon: 'fa fa-gavel', title: 'Classify Text Unit', backgroundDismiss: true, buttons: { classify: { text: 'Save', btnClass: 'btn-u btn-sm', action: function(){ var class_name = this.$content.find('#text_unit_classify_class_name').val(); var class_value = this.$content.find('#text_unit_classify_class_value').val(); if (class_name && class_value) { $.ajax({ method: 'POST', url: tuc_submit_url, data: { csrfmiddlewaretoken: token, owner_id: pk, class_name: class_name, class_value: class_value }, success: function(response){ if (grid){ $(grid).jqxGrid('updatebounddata'); } ajax_success_handler(response) }, error: ajax_error_handler }) } } }, cancel: cancel_button_action }, content: '<input type="text" class="form-control" id="text_unit_classify_class_name" name="class_name" placeholder="Class Name">' + '<input type="text" class="form-control" id="text_unit_classify_class_value" name="class_value" placeholder="Class Value">', onContentReady: function () { this.$content.css('overflow-y', 'scroll'); addTypeahead(tuc_type_name_url, '#text_unit_classify_class_name', 'text_unit_classify_class_name'); addTypeahead(tuc_type_value_url, '#text_unit_classify_class_value', 'text_unit_classify_class_value'); } }) } // mark Document completed OR reopen function mark_document_completed(text, url, grid) { var token = jQuery("[name=csrfmiddlewaretoken]").val(); $.confirm({ type: 'orange', icon: 'fa fa-gavel text-warning', title: text, content: '', backgroundDismiss: true, buttons: { action: { text: 'Go', btnClass: 'btn-u btn-sm btn-w', action: function(){ $.ajax({ method: 'POST', url: url, data: { csrfmiddlewaretoken: token }, success: function(response){ var is_nested = grid.hasClass('sub-grid'); var task_queue_grid = is_nested ? grid.parents('.jqxgrid') : grid; task_queue_grid.jqxGrid('updatebounddata'); if (is_nested) { // for Task Queue list view task_queue_grid.on('bindingcomplete', function(){ task_queue_grid.jqxGrid('showrowdetails', grid.attr('id').replace('grid', '')) }); } ajax_success_handler(response) }, error: ajax_error_handler }) } }, cancel: cancel_button_action } }); } // remove Document from Task Queue function remove_document_from_task_queue(text, url, grid) { var token = jQuery("[name=csrfmiddlewaretoken]").val(); $.confirm({ type: 'orange', icon: 'fa fa-remove text-warning', title: text, content: '', useBootstrap: false, boxWidth: "500px", backgroundDismiss: true, buttons: { action: { text: 'Confirm', btnClass: 'btn-u btn-sm btn-w', action: function(){ $.ajax({ method: 'POST', url: url, data: { csrfmiddlewaretoken: token }, success: function(response){ var is_nested = grid.hasClass('sub-grid'); var task_queue_grid = is_nested ? grid.parents('.jqxgrid') : grid; task_queue_grid.jqxGrid('updatebounddata'); if (is_nested) { // for Task Queue list view task_queue_grid.on('bindingcomplete', function(){ task_queue_grid.jqxGrid('showrowdetails', grid.attr('id').replace('grid', '')) }); } else { // TODO!!! append removed task queue name/url in dropdown "Add to Task Queue } ajax_success_handler(response) }, error: ajax_error_handler }) } }, cancel: cancel_button_action } }); } // purge task function purge_task_popup(pk, url, grid) { var token = jQuery("[name=csrfmiddlewaretoken]").val(); $.confirm({ type: 'orange', icon: 'fa fa-eraser text-warning', title: 'Purge this task?', content: '', backgroundDismiss: true, buttons: { action: { text: 'Confirm', btnClass: 'btn-u btn-sm btn-w', action: function(){ $.ajax({ method: 'POST', url: url, data: { task_pk: pk, csrfmiddlewaretoken: token }, success: function(response){ grid.jqxGrid('updatebounddata'); ajax_success_handler(response) }, error: ajax_error_handler }) } }, cancel: cancel_button_action } }); } // open popup window on menu button click in jqxgrid var show_menu = function (menu_data, grid, pk, width) { var default_link_data = { url: '#', cls: '', icon: '', onclick: '', text: '', target: '_parent' }; var ul = $('<ul class="popup-menu"></ul>'); $.each(menu_data, function(index, item){ item = $.extend({}, default_link_data, item); var li = $('<li><a href="' + item.url + '" class="' + item.cls + '" target"' + item.target + '" onclick="' + item.onclick + '" data-pk="' + pk + '"><i class="' + item.icon + '"></i>' + item.text + '</a></li>'); ul.append(li) }); width = typeof width === "undefined" ? 200 : width; var jc = $.dialog({ title: null, content: ul, type: 'blue', animation: 'right', closeAnimation: 'right', backgroundDismiss: true, useBootstrap: false, boxWidth: width + "px", closeIconClass: 'fa fa-remove', onContentReady: function () { $('.popup-menu a').click(function() { jc.close(); }); $('.popup-menu a.remove').click(function(event) { event.preventDefault(); show_remove_popup($(this).attr('href'), grid) }); $('.popup-menu a.retrain-classifier').click(function(event) { event.preventDefault(); alert('Not implemented') }); $('.popup-menu a.tag-text-unit').click(function(event) { event.preventDefault(); tag_popup('text_unit', pk) }); $('.popup-menu a.classify-text-unit').click(function(event) { event.preventDefault(); classify_text_unit_popup(pk) }); $('.popup-menu a.purge-task').click(function(event) { event.preventDefault(); purge_task_popup(pk, $(this).attr('href'), grid) }); $('.popup-menu a.mark-document-completed').click(function(event) { event.preventDefault(); mark_document_completed($(this).text(), $(this).attr('href'), grid) }); $('.popup-menu a.remove-from-task-queue').click(function(event) { event.preventDefault(); remove_document_from_task_queue($(this).text(), $(this).attr('href'), grid) }); } }); return true }; function expand_row(event, grid, row_number){ event.preventDefault(); $(grid).jqxGrid('showrowdetails', row_number) } function apply_filter(grid, column_name, sentence, tabs_id, tab_no){ // grid and tabs_id - html elements, i.e. ".jqxgrid" and "#tab1" var filtergroup = new $.jqx.filter(); var filter = filtergroup.createfilter('stringfilter', sentence, 'contains'); filtergroup.addfilter(1, filter); $(grid).jqxGrid('addfilter', column_name, filtergroup); $(grid).jqxGrid('applyfilters'); $(tabs_id).tabs('option', 'active', tab_no) } // resize table if window or container resized $( window ).resize(function() { if ($('.jqxgrid')) { $('.jqxgrid').jqxGrid('refresh'); } }); $('.dev-page-sidebar-collapse, .dev-page-sidebar-minimize').click(function() { setTimeout(function(){ $('.jqxgrid').jqxGrid('refresh') }, 200) }); // init Update button $(".update-jqxgrid-data").click(function () { $(this).parent().siblings().find('.jqxgrid').jqxGrid('updatebounddata'); }); // init "Selection mode" button $('.jqxgrid-select-mode').click(function(){ $(this).toggleClass('active'); var mode = $(this).hasClass('active') ? 'singlecell' : 'singlerow'; $(this).parent().siblings().find('.jqxgrid').jqxGrid('selectionmode', mode); }); // init "Switch filterrow" $('.jqxgrid-filterrow').click(function(){ $(this).toggleClass('active'); var value = $(this).hasClass('active') ? true : false; $(this).parent().siblings().find('.jqxgrid').jqxGrid('showfilterrow', value); }); function now(){ var currentdate = new Date(); return currentdate.getFullYear().toString() + (currentdate.getMonth()+1).toString() + currentdate.getDate().toString() + '_' + currentdate.getHours().toString() + currentdate.getMinutes().toString(); } // init "csvExport" button $(".gridExport li").click(function() { var to = $(this).data('export-to'); var $grid = $(this).parents('.gridExport').parent().siblings().find('.jqxgrid'); var source = $grid.jqxGrid('source'); var query_data_initial = source._source.data; var query_data = $.extend({}, query_data_initial); var filterinfo = $grid.jqxGrid('getfilterinformation'); query_data['filterscount'] = filterinfo.length; for (var i = 0; i < filterinfo.length; i++) { var filter = filterinfo[i]; var filterdata = filter.filter.getfilters()[0]; query_data['filterdatafield'+i] = filter.filtercolumn; query_data['filteroperator'+i] = filterdata.operator; query_data['filtercondition'+i] = filterdata.condition; query_data['filtervalue'+i] = filterdata.value; } var sortinfo = $grid.jqxGrid('getsortinformation'); if (typeof sortinfo.sortcolumn !== 'undefined') { query_data['sortdatafield'] = sortinfo.sortcolumn; query_data['sortorder'] = sortinfo.sortdirection.ascending ? 'asc' : 'desc'; } query_data['export_to'] = to; query_data['return_raw_data'] = '1'; var conjunct = source._source.url.indexOf('?') >= 0 ? '&' : '?'; var downloadUrl = source._source.url.replace('#', '') + conjunct + $.param(query_data); window.open(downloadUrl); source._source.data = query_data_initial; }); function render_error(data) { window.table_warning = data.table_warning; if (!data) return; var messages = []; var severity = 'Warning'; if (data.error) { messages.push(safe_tags_replace(data.error)); severity = 'Error'; } if (data.warning) messages.push(safe_tags_replace(data.warning)); if (!messages.length) return; var msg = messages.join('<br/><br/>'); var dlgData = {title: severity, backgroundDismiss: true, content: msg}; if (!data.prompt_reload) $.alert(dlgData); else { dlgData.buttons = { confirm: { text: 'Reload', btnClass: 'btn-u btn-sm', action: function() { window.location.reload(); } }, cancel: { btnClass: 'btn-u btn-sm btn-l', text: 'Okay' } }; $.confirm(dlgData); } } var tagsToReplace = { '&': '&amp;', '<': '&lt;', '>': '&gt;' }; function replaceTag(tag) { return tagsToReplace[tag] || tag; } function safe_tags_replace(str) { return str.replace(/[&<>]/g, replaceTag); }
{ "pile_set_name": "Github" }
import helper from distutils.version import LooseVersion class check_configuration_ssl_allow_invalid_cert(): """ check_configuration_ssl_allow_invalid_cert: Bypasses the validation checks for TLS/SSL certificates on other servers in the cluster and allows the use of invalid certificates. When using the allowInvalidCertificates setting, MongoDB logs as a warning the use of the invalid certificate. MongoDB versions 2.6.4 and above, check the net.ssl.weakCertificateValidation configuration option. """ # References: # https://docs.mongodb.org/v2.6/reference/configuration-options/#net.ssl.allowInvalidCertificates TITLE = 'Allow Invalid Certificate' CATEGORY = 'Configuration' TYPE = 'configuration_file' SQL = None # SQL not needed... because this is NoSQL. verbose = False skip = False result = {} db = None def do_check(self, configuration_file): option = None version_number = self.db.server_info()['version'] if LooseVersion(version_number) >= LooseVersion("2.6.4"): option = 'net.ssl.allowInvalidCertificates' value = helper.get_yaml_config_value(configuration_file, option) if None == value: self.result['level'] = 'GREEN' self.result['output'] = '%s not found, not enabled.' % (option) elif False == value: self.result['level'] = 'GREEN' self.result['output'] = '%s is (%s) not enabled.' % (option, value) else: self.result['level'] = 'RED' self.result['output'] = '%s is (%s) enabled.' % (option, value) else: self.result['level'] = 'GRAY' self.result['output'] = 'This check does not apply to MongoDB versions below 2.6.4.' return self.result def __init__(self, parent): print('Performing check: ' + self.TITLE) self.verbose = parent.verbose self.db = parent.db
{ "pile_set_name": "Github" }
/** * Copyright (c) 2015 Bosch Software Innovations GmbH and others. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the Eclipse Public License v1.0 * which accompanies this distribution, and is available at * http://www.eclipse.org/legal/epl-v10.html */ package org.eclipse.hawkbit.repository.jpa.model.helper; import java.util.ArrayList; import java.util.List; import org.eclipse.hawkbit.repository.jpa.EntityInterceptor; import org.springframework.beans.factory.annotation.Autowired; /** * A singleton bean which holds the {@link EntityInterceptor} to have all * interceptors in spring beans. * */ public final class EntityInterceptorHolder { private static final EntityInterceptorHolder SINGLETON = new EntityInterceptorHolder(); @Autowired(required = false) private final List<EntityInterceptor> entityInterceptors = new ArrayList<>(); private EntityInterceptorHolder() { } /** * @return the entity intreceptor holder singleton instance */ public static EntityInterceptorHolder getInstance() { return SINGLETON; } public List<EntityInterceptor> getEntityInterceptors() { return entityInterceptors; } }
{ "pile_set_name": "Github" }
github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce h1:prjrVgOk2Yg6w+PflHoszQNLTUh4kaByUcEWM/9uin4= github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
{ "pile_set_name": "Github" }
/* Soot - a J*va Optimization Framework * Copyright (C) 2004 Jennifer Lhotak * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 02111-1307, USA. */ package soot.javaToJimple; import java.util.*; public abstract class AbstractJimpleBodyBuilder { protected soot.jimple.JimpleBody body; public void ext(AbstractJimpleBodyBuilder ext){ this.ext = ext; if (ext.ext != null){ throw new RuntimeException("Extensions created in wrong order."); } ext.base = this.base; } public AbstractJimpleBodyBuilder ext(){ if (ext == null) return this; return ext; } private AbstractJimpleBodyBuilder ext = null; public void base(AbstractJimpleBodyBuilder base){ this.base = base; } public AbstractJimpleBodyBuilder base(){ return base; } private AbstractJimpleBodyBuilder base = this; protected soot.jimple.JimpleBody createJimpleBody(polyglot.ast.Block block, List formals, soot.SootMethod sootMethod){ return ext().createJimpleBody(block, formals, sootMethod); } /*protected soot.Value createExpr(polyglot.ast.Expr expr){ return ext().createExpr(expr); }*/ protected soot.Value createAggressiveExpr(polyglot.ast.Expr expr, boolean reduceAggressively, boolean reverseCondIfNec){ //System.out.println("in abstract"); return ext().createAggressiveExpr(expr, reduceAggressively, reverseCondIfNec); } protected void createStmt(polyglot.ast.Stmt stmt){ ext().createStmt(stmt); } protected boolean needsAccessor(polyglot.ast.Expr expr){ return ext().needsAccessor(expr); } protected soot.Local handlePrivateFieldAssignSet(polyglot.ast.Assign assign){ return ext().handlePrivateFieldAssignSet(assign); } protected soot.Local handlePrivateFieldUnarySet(polyglot.ast.Unary unary){ return ext().handlePrivateFieldUnarySet(unary); } protected soot.Value getAssignRightLocal(polyglot.ast.Assign assign, soot.Local leftLocal){ return ext().getAssignRightLocal(assign, leftLocal); } protected soot.Value getSimpleAssignRightLocal(polyglot.ast.Assign assign){ return ext().getSimpleAssignRightLocal(assign); } protected soot.Local handlePrivateFieldSet(polyglot.ast.Expr expr, soot.Value right, soot.Value base){ return ext().handlePrivateFieldSet(expr, right, base); } protected soot.SootMethodRef getSootMethodRef(polyglot.ast.Call call){ return ext().getSootMethodRef(call); } protected soot.Local generateLocal(soot.Type sootType){ return ext().generateLocal(sootType); } protected soot.Local generateLocal(polyglot.types.Type polyglotType){ return ext().generateLocal(polyglotType); } protected soot.Local getThis(soot.Type sootType){ return ext().getThis(sootType); } protected soot.Value getBaseLocal(polyglot.ast.Receiver receiver){ return ext().getBaseLocal(receiver); } protected soot.Value createLHS(polyglot.ast.Expr expr){ return ext().createLHS(expr); } protected soot.jimple.FieldRef getFieldRef(polyglot.ast.Field field){ return ext().getFieldRef(field); } protected soot.jimple.Constant getConstant(soot.Type sootType, int val){ return ext().getConstant(sootType, val); } }
{ "pile_set_name": "Github" }
/* * PRCMU clock implementation for ux500 platform. * * Copyright (C) 2012 ST-Ericsson SA * Author: Ulf Hansson <[email protected]> * * License terms: GNU General Public License (GPL) version 2 */ #include <linux/clk-provider.h> #include <linux/mfd/dbx500-prcmu.h> #include <linux/slab.h> #include <linux/io.h> #include <linux/err.h> #include "clk.h" #define to_clk_prcmu(_hw) container_of(_hw, struct clk_prcmu, hw) struct clk_prcmu { struct clk_hw hw; u8 cg_sel; int is_prepared; int is_enabled; int opp_requested; }; /* PRCMU clock operations. */ static int clk_prcmu_prepare(struct clk_hw *hw) { int ret; struct clk_prcmu *clk = to_clk_prcmu(hw); ret = prcmu_request_clock(clk->cg_sel, true); if (!ret) clk->is_prepared = 1; return ret; } static void clk_prcmu_unprepare(struct clk_hw *hw) { struct clk_prcmu *clk = to_clk_prcmu(hw); if (prcmu_request_clock(clk->cg_sel, false)) pr_err("clk_prcmu: %s failed to disable %s.\n", __func__, __clk_get_name(hw->clk)); else clk->is_prepared = 0; } static int clk_prcmu_is_prepared(struct clk_hw *hw) { struct clk_prcmu *clk = to_clk_prcmu(hw); return clk->is_prepared; } static int clk_prcmu_enable(struct clk_hw *hw) { struct clk_prcmu *clk = to_clk_prcmu(hw); clk->is_enabled = 1; return 0; } static void clk_prcmu_disable(struct clk_hw *hw) { struct clk_prcmu *clk = to_clk_prcmu(hw); clk->is_enabled = 0; } static int clk_prcmu_is_enabled(struct clk_hw *hw) { struct clk_prcmu *clk = to_clk_prcmu(hw); return clk->is_enabled; } static unsigned long clk_prcmu_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) { struct clk_prcmu *clk = to_clk_prcmu(hw); return prcmu_clock_rate(clk->cg_sel); } static long clk_prcmu_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *parent_rate) { struct clk_prcmu *clk = to_clk_prcmu(hw); return prcmu_round_clock_rate(clk->cg_sel, rate); } static int clk_prcmu_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate) { struct clk_prcmu *clk = to_clk_prcmu(hw); return prcmu_set_clock_rate(clk->cg_sel, rate); } static int clk_prcmu_opp_prepare(struct clk_hw *hw) { int err; struct clk_prcmu *clk = to_clk_prcmu(hw); if (!clk->opp_requested) { err = prcmu_qos_add_requirement(PRCMU_QOS_APE_OPP, (char *)__clk_get_name(hw->clk), 100); if (err) { pr_err("clk_prcmu: %s fail req APE OPP for %s.\n", __func__, __clk_get_name(hw->clk)); return err; } clk->opp_requested = 1; } err = prcmu_request_clock(clk->cg_sel, true); if (err) { prcmu_qos_remove_requirement(PRCMU_QOS_APE_OPP, (char *)__clk_get_name(hw->clk)); clk->opp_requested = 0; return err; } clk->is_prepared = 1; return 0; } static void clk_prcmu_opp_unprepare(struct clk_hw *hw) { struct clk_prcmu *clk = to_clk_prcmu(hw); if (prcmu_request_clock(clk->cg_sel, false)) { pr_err("clk_prcmu: %s failed to disable %s.\n", __func__, __clk_get_name(hw->clk)); return; } if (clk->opp_requested) { prcmu_qos_remove_requirement(PRCMU_QOS_APE_OPP, (char *)__clk_get_name(hw->clk)); clk->opp_requested = 0; } clk->is_prepared = 0; } static int clk_prcmu_opp_volt_prepare(struct clk_hw *hw) { int err; struct clk_prcmu *clk = to_clk_prcmu(hw); if (!clk->opp_requested) { err = prcmu_request_ape_opp_100_voltage(true); if (err) { pr_err("clk_prcmu: %s fail req APE OPP VOLT for %s.\n", __func__, __clk_get_name(hw->clk)); return err; } clk->opp_requested = 1; } err = prcmu_request_clock(clk->cg_sel, true); if (err) { prcmu_request_ape_opp_100_voltage(false); clk->opp_requested = 0; return err; } clk->is_prepared = 1; return 0; } static void clk_prcmu_opp_volt_unprepare(struct clk_hw *hw) { struct clk_prcmu *clk = to_clk_prcmu(hw); if (prcmu_request_clock(clk->cg_sel, false)) { pr_err("clk_prcmu: %s failed to disable %s.\n", __func__, __clk_get_name(hw->clk)); return; } if (clk->opp_requested) { prcmu_request_ape_opp_100_voltage(false); clk->opp_requested = 0; } clk->is_prepared = 0; } static struct clk_ops clk_prcmu_scalable_ops = { .prepare = clk_prcmu_prepare, .unprepare = clk_prcmu_unprepare, .is_prepared = clk_prcmu_is_prepared, .enable = clk_prcmu_enable, .disable = clk_prcmu_disable, .is_enabled = clk_prcmu_is_enabled, .recalc_rate = clk_prcmu_recalc_rate, .round_rate = clk_prcmu_round_rate, .set_rate = clk_prcmu_set_rate, }; static struct clk_ops clk_prcmu_gate_ops = { .prepare = clk_prcmu_prepare, .unprepare = clk_prcmu_unprepare, .is_prepared = clk_prcmu_is_prepared, .enable = clk_prcmu_enable, .disable = clk_prcmu_disable, .is_enabled = clk_prcmu_is_enabled, .recalc_rate = clk_prcmu_recalc_rate, }; static struct clk_ops clk_prcmu_scalable_rate_ops = { .is_enabled = clk_prcmu_is_enabled, .recalc_rate = clk_prcmu_recalc_rate, .round_rate = clk_prcmu_round_rate, .set_rate = clk_prcmu_set_rate, }; static struct clk_ops clk_prcmu_rate_ops = { .is_enabled = clk_prcmu_is_enabled, .recalc_rate = clk_prcmu_recalc_rate, }; static struct clk_ops clk_prcmu_opp_gate_ops = { .prepare = clk_prcmu_opp_prepare, .unprepare = clk_prcmu_opp_unprepare, .is_prepared = clk_prcmu_is_prepared, .enable = clk_prcmu_enable, .disable = clk_prcmu_disable, .is_enabled = clk_prcmu_is_enabled, .recalc_rate = clk_prcmu_recalc_rate, }; static struct clk_ops clk_prcmu_opp_volt_scalable_ops = { .prepare = clk_prcmu_opp_volt_prepare, .unprepare = clk_prcmu_opp_volt_unprepare, .is_prepared = clk_prcmu_is_prepared, .enable = clk_prcmu_enable, .disable = clk_prcmu_disable, .is_enabled = clk_prcmu_is_enabled, .recalc_rate = clk_prcmu_recalc_rate, .round_rate = clk_prcmu_round_rate, .set_rate = clk_prcmu_set_rate, }; static struct clk *clk_reg_prcmu(const char *name, const char *parent_name, u8 cg_sel, unsigned long rate, unsigned long flags, struct clk_ops *clk_prcmu_ops) { struct clk_prcmu *clk; struct clk_init_data clk_prcmu_init; struct clk *clk_reg; if (!name) { pr_err("clk_prcmu: %s invalid arguments passed\n", __func__); return ERR_PTR(-EINVAL); } clk = kzalloc(sizeof(struct clk_prcmu), GFP_KERNEL); if (!clk) { pr_err("clk_prcmu: %s could not allocate clk\n", __func__); return ERR_PTR(-ENOMEM); } clk->cg_sel = cg_sel; clk->is_prepared = 1; clk->is_enabled = 1; clk->opp_requested = 0; /* "rate" can be used for changing the initial frequency */ if (rate) prcmu_set_clock_rate(cg_sel, rate); clk_prcmu_init.name = name; clk_prcmu_init.ops = clk_prcmu_ops; clk_prcmu_init.flags = flags; clk_prcmu_init.parent_names = (parent_name ? &parent_name : NULL); clk_prcmu_init.num_parents = (parent_name ? 1 : 0); clk->hw.init = &clk_prcmu_init; clk_reg = clk_register(NULL, &clk->hw); if (IS_ERR_OR_NULL(clk_reg)) goto free_clk; return clk_reg; free_clk: kfree(clk); pr_err("clk_prcmu: %s failed to register clk\n", __func__); return ERR_PTR(-ENOMEM); } struct clk *clk_reg_prcmu_scalable(const char *name, const char *parent_name, u8 cg_sel, unsigned long rate, unsigned long flags) { return clk_reg_prcmu(name, parent_name, cg_sel, rate, flags, &clk_prcmu_scalable_ops); } struct clk *clk_reg_prcmu_gate(const char *name, const char *parent_name, u8 cg_sel, unsigned long flags) { return clk_reg_prcmu(name, parent_name, cg_sel, 0, flags, &clk_prcmu_gate_ops); } struct clk *clk_reg_prcmu_scalable_rate(const char *name, const char *parent_name, u8 cg_sel, unsigned long rate, unsigned long flags) { return clk_reg_prcmu(name, parent_name, cg_sel, rate, flags, &clk_prcmu_scalable_rate_ops); } struct clk *clk_reg_prcmu_rate(const char *name, const char *parent_name, u8 cg_sel, unsigned long flags) { return clk_reg_prcmu(name, parent_name, cg_sel, 0, flags, &clk_prcmu_rate_ops); } struct clk *clk_reg_prcmu_opp_gate(const char *name, const char *parent_name, u8 cg_sel, unsigned long flags) { return clk_reg_prcmu(name, parent_name, cg_sel, 0, flags, &clk_prcmu_opp_gate_ops); } struct clk *clk_reg_prcmu_opp_volt_scalable(const char *name, const char *parent_name, u8 cg_sel, unsigned long rate, unsigned long flags) { return clk_reg_prcmu(name, parent_name, cg_sel, rate, flags, &clk_prcmu_opp_volt_scalable_ops); }
{ "pile_set_name": "Github" }
#import "GPUImageColorInvertFilter.h" #if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE NSString *const kGPUImageInvertFragmentShaderString = SHADER_STRING ( varying highp vec2 textureCoordinate; uniform sampler2D inputImageTexture; void main() { lowp vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); gl_FragColor = vec4((1.0 - textureColor.rgb), textureColor.w); } ); #else NSString *const kGPUImageInvertFragmentShaderString = SHADER_STRING ( varying vec2 textureCoordinate; uniform sampler2D inputImageTexture; void main() { vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); gl_FragColor = vec4((1.0 - textureColor.rgb), textureColor.w); } ); #endif @implementation GPUImageColorInvertFilter - (id)init; { if (!(self = [super initWithFragmentShaderFromString:kGPUImageInvertFragmentShaderString])) { return nil; } return self; } @end
{ "pile_set_name": "Github" }
/* libFLAC - Free Lossless Audio Codec library * Copyright (C) 2000,2001,2002,2003,2004,2005,2006,2007 Josh Coalson * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * - Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * - Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * - Neither the name of the Xiph.org Foundation nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #if HAVE_CONFIG_H # include <config.h> #endif #include "private/crc.h" /* CRC-8, poly = x^8 + x^2 + x^1 + x^0, init = 0 */ FLAC__byte const FLAC__crc8_table[256] = { 0x00, 0x07, 0x0E, 0x09, 0x1C, 0x1B, 0x12, 0x15, 0x38, 0x3F, 0x36, 0x31, 0x24, 0x23, 0x2A, 0x2D, 0x70, 0x77, 0x7E, 0x79, 0x6C, 0x6B, 0x62, 0x65, 0x48, 0x4F, 0x46, 0x41, 0x54, 0x53, 0x5A, 0x5D, 0xE0, 0xE7, 0xEE, 0xE9, 0xFC, 0xFB, 0xF2, 0xF5, 0xD8, 0xDF, 0xD6, 0xD1, 0xC4, 0xC3, 0xCA, 0xCD, 0x90, 0x97, 0x9E, 0x99, 0x8C, 0x8B, 0x82, 0x85, 0xA8, 0xAF, 0xA6, 0xA1, 0xB4, 0xB3, 0xBA, 0xBD, 0xC7, 0xC0, 0xC9, 0xCE, 0xDB, 0xDC, 0xD5, 0xD2, 0xFF, 0xF8, 0xF1, 0xF6, 0xE3, 0xE4, 0xED, 0xEA, 0xB7, 0xB0, 0xB9, 0xBE, 0xAB, 0xAC, 0xA5, 0xA2, 0x8F, 0x88, 0x81, 0x86, 0x93, 0x94, 0x9D, 0x9A, 0x27, 0x20, 0x29, 0x2E, 0x3B, 0x3C, 0x35, 0x32, 0x1F, 0x18, 0x11, 0x16, 0x03, 0x04, 0x0D, 0x0A, 0x57, 0x50, 0x59, 0x5E, 0x4B, 0x4C, 0x45, 0x42, 0x6F, 0x68, 0x61, 0x66, 0x73, 0x74, 0x7D, 0x7A, 0x89, 0x8E, 0x87, 0x80, 0x95, 0x92, 0x9B, 0x9C, 0xB1, 0xB6, 0xBF, 0xB8, 0xAD, 0xAA, 0xA3, 0xA4, 0xF9, 0xFE, 0xF7, 0xF0, 0xE5, 0xE2, 0xEB, 0xEC, 0xC1, 0xC6, 0xCF, 0xC8, 0xDD, 0xDA, 0xD3, 0xD4, 0x69, 0x6E, 0x67, 0x60, 0x75, 0x72, 0x7B, 0x7C, 0x51, 0x56, 0x5F, 0x58, 0x4D, 0x4A, 0x43, 0x44, 0x19, 0x1E, 0x17, 0x10, 0x05, 0x02, 0x0B, 0x0C, 0x21, 0x26, 0x2F, 0x28, 0x3D, 0x3A, 0x33, 0x34, 0x4E, 0x49, 0x40, 0x47, 0x52, 0x55, 0x5C, 0x5B, 0x76, 0x71, 0x78, 0x7F, 0x6A, 0x6D, 0x64, 0x63, 0x3E, 0x39, 0x30, 0x37, 0x22, 0x25, 0x2C, 0x2B, 0x06, 0x01, 0x08, 0x0F, 0x1A, 0x1D, 0x14, 0x13, 0xAE, 0xA9, 0xA0, 0xA7, 0xB2, 0xB5, 0xBC, 0xBB, 0x96, 0x91, 0x98, 0x9F, 0x8A, 0x8D, 0x84, 0x83, 0xDE, 0xD9, 0xD0, 0xD7, 0xC2, 0xC5, 0xCC, 0xCB, 0xE6, 0xE1, 0xE8, 0xEF, 0xFA, 0xFD, 0xF4, 0xF3 }; /* CRC-16, poly = x^16 + x^15 + x^2 + x^0, init = 0 */ unsigned FLAC__crc16_table[256] = { 0x0000, 0x8005, 0x800f, 0x000a, 0x801b, 0x001e, 0x0014, 0x8011, 0x8033, 0x0036, 0x003c, 0x8039, 0x0028, 0x802d, 0x8027, 0x0022, 0x8063, 0x0066, 0x006c, 0x8069, 0x0078, 0x807d, 0x8077, 0x0072, 0x0050, 0x8055, 0x805f, 0x005a, 0x804b, 0x004e, 0x0044, 0x8041, 0x80c3, 0x00c6, 0x00cc, 0x80c9, 0x00d8, 0x80dd, 0x80d7, 0x00d2, 0x00f0, 0x80f5, 0x80ff, 0x00fa, 0x80eb, 0x00ee, 0x00e4, 0x80e1, 0x00a0, 0x80a5, 0x80af, 0x00aa, 0x80bb, 0x00be, 0x00b4, 0x80b1, 0x8093, 0x0096, 0x009c, 0x8099, 0x0088, 0x808d, 0x8087, 0x0082, 0x8183, 0x0186, 0x018c, 0x8189, 0x0198, 0x819d, 0x8197, 0x0192, 0x01b0, 0x81b5, 0x81bf, 0x01ba, 0x81ab, 0x01ae, 0x01a4, 0x81a1, 0x01e0, 0x81e5, 0x81ef, 0x01ea, 0x81fb, 0x01fe, 0x01f4, 0x81f1, 0x81d3, 0x01d6, 0x01dc, 0x81d9, 0x01c8, 0x81cd, 0x81c7, 0x01c2, 0x0140, 0x8145, 0x814f, 0x014a, 0x815b, 0x015e, 0x0154, 0x8151, 0x8173, 0x0176, 0x017c, 0x8179, 0x0168, 0x816d, 0x8167, 0x0162, 0x8123, 0x0126, 0x012c, 0x8129, 0x0138, 0x813d, 0x8137, 0x0132, 0x0110, 0x8115, 0x811f, 0x011a, 0x810b, 0x010e, 0x0104, 0x8101, 0x8303, 0x0306, 0x030c, 0x8309, 0x0318, 0x831d, 0x8317, 0x0312, 0x0330, 0x8335, 0x833f, 0x033a, 0x832b, 0x032e, 0x0324, 0x8321, 0x0360, 0x8365, 0x836f, 0x036a, 0x837b, 0x037e, 0x0374, 0x8371, 0x8353, 0x0356, 0x035c, 0x8359, 0x0348, 0x834d, 0x8347, 0x0342, 0x03c0, 0x83c5, 0x83cf, 0x03ca, 0x83db, 0x03de, 0x03d4, 0x83d1, 0x83f3, 0x03f6, 0x03fc, 0x83f9, 0x03e8, 0x83ed, 0x83e7, 0x03e2, 0x83a3, 0x03a6, 0x03ac, 0x83a9, 0x03b8, 0x83bd, 0x83b7, 0x03b2, 0x0390, 0x8395, 0x839f, 0x039a, 0x838b, 0x038e, 0x0384, 0x8381, 0x0280, 0x8285, 0x828f, 0x028a, 0x829b, 0x029e, 0x0294, 0x8291, 0x82b3, 0x02b6, 0x02bc, 0x82b9, 0x02a8, 0x82ad, 0x82a7, 0x02a2, 0x82e3, 0x02e6, 0x02ec, 0x82e9, 0x02f8, 0x82fd, 0x82f7, 0x02f2, 0x02d0, 0x82d5, 0x82df, 0x02da, 0x82cb, 0x02ce, 0x02c4, 0x82c1, 0x8243, 0x0246, 0x024c, 0x8249, 0x0258, 0x825d, 0x8257, 0x0252, 0x0270, 0x8275, 0x827f, 0x027a, 0x826b, 0x026e, 0x0264, 0x8261, 0x0220, 0x8225, 0x822f, 0x022a, 0x823b, 0x023e, 0x0234, 0x8231, 0x8213, 0x0216, 0x021c, 0x8219, 0x0208, 0x820d, 0x8207, 0x0202 }; void FLAC__crc8_update(const FLAC__byte data, FLAC__uint8 *crc) { *crc = FLAC__crc8_table[*crc ^ data]; } void FLAC__crc8_update_block(const FLAC__byte *data, unsigned len, FLAC__uint8 *crc) { while(len--) *crc = FLAC__crc8_table[*crc ^ *data++]; } FLAC__uint8 FLAC__crc8(const FLAC__byte *data, unsigned len) { FLAC__uint8 crc = 0; while(len--) crc = FLAC__crc8_table[crc ^ *data++]; return crc; } unsigned FLAC__crc16(const FLAC__byte *data, unsigned len) { unsigned crc = 0; while(len--) crc = ((crc<<8) ^ FLAC__crc16_table[(crc>>8) ^ *data++]) & 0xffff; return crc; }
{ "pile_set_name": "Github" }
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * This code is a modified version of the original Spark 1.0.2 implementation. * */ package com.massivedatascience.clusterer import com.massivedatascience.clusterer.MultiKMeansClusterer.ClusteringWithDistortion import com.massivedatascience.linalg.{ MutableWeightedVector, WeightedVector } import org.apache.spark.SparkContext._ import org.apache.spark.rdd.RDD import scala.collection.mutable.ArrayBuffer /** * A K-Means clustering implementation that performs multiple K-means clusterings simultaneously, * returning the one with the lowest cost. * */ //scalastyle:off @deprecated("use ColumnTrackingKMeans", "1.2.0") class MultiKMeans extends MultiKMeansClusterer { def cluster( maxIterations: Int, pointOps: BregmanPointOps, data: RDD[BregmanPoint], c: Seq[IndexedSeq[BregmanCenter]]): Seq[ClusteringWithDistortion] = { val centers = c.map(_.toArray).toArray def cluster(): Seq[ClusteringWithDistortion] = { val runs = centers.length val active = Array.fill(runs)(true) val costs = Array.fill(runs)(0.0) var activeRuns = new ArrayBuffer[Int] ++ (0 until runs) var iteration = 0 /* * Execute iterations of Lloyd's algorithm until all runs have converged. */ while (iteration < maxIterations && activeRuns.nonEmpty) { // remove the empty clusters logInfo(s"iteration $iteration") val activeCenters = activeRuns.map(r => centers(r)).toArray if (log.isInfoEnabled) { for (r <- 0 until activeCenters.length) logInfo(s"run ${activeRuns(r)} has ${activeCenters(r).length} centers") } // Find the sum and count of points mapping to each center val (centroids: Array[((Int, Int), WeightedVector)], runDistortion) = getCentroids(data, activeCenters) if (log.isInfoEnabled) { for (run <- activeRuns) logInfo(s"run $run distortion ${runDistortion(run)}") } for (run <- activeRuns) active(run) = false for (((runIndex: Int, clusterIndex: Int), cn: MutableWeightedVector) <- centroids) { val run = activeRuns(runIndex) if (cn.weight == 0.0) { active(run) = true centers(run)(clusterIndex) = null.asInstanceOf[BregmanCenter] } else { val centroid = cn.asImmutable active(run) = active(run) || pointOps.centerMoved(pointOps.toPoint(centroid), centers(run)(clusterIndex)) centers(run)(clusterIndex) = pointOps.toCenter(centroid) } } // filter out null centers for (r <- activeRuns) centers(r) = centers(r).filter(_ != null) // update distortions and print log message if run completed during this iteration for ((run, runIndex) <- activeRuns.zipWithIndex) { costs(run) = runDistortion(runIndex) if (!active(run)) logInfo(s"run $run finished in ${iteration + 1} iterations") } activeRuns = activeRuns.filter(active(_)) iteration += 1 } costs.zip(centers).map { case (x, y) => ClusteringWithDistortion(x, y.toIndexedSeq) } } def getCentroids( data: RDD[BregmanPoint], activeCenters: Array[Array[BregmanCenter]]): (Array[((Int, Int), WeightedVector)], Array[Double]) = { val sc = data.sparkContext val runDistortion = Array.fill(activeCenters.length)(sc.accumulator(0.0)) val bcActiveCenters = sc.broadcast(activeCenters) val result = data.mapPartitions[((Int, Int), WeightedVector)] { points => val bcCenters = bcActiveCenters.value val centers = bcCenters.map(c => Array.fill(c.length)(pointOps.make())) for (point <- points; (clusters, run) <- bcCenters.zipWithIndex) { val (cluster, cost) = pointOps.findClosest(clusters, point) runDistortion(run) += cost centers(run)(cluster).add(point) } val contribution = for ( (clusters, run) <- bcCenters.zipWithIndex; (contrib, cluster) <- clusters.zipWithIndex ) yield { ((run, cluster), centers(run)(cluster).asImmutable) } contribution.iterator }.aggregateByKey(pointOps.make())( (x, y) => x.add(y), (x, y) => x.add(y) ).map(x => (x._1, x._2.asImmutable)).collect() bcActiveCenters.unpersist() (result, runDistortion.map(x => x.localValue)) } cluster() } } //scalastyle:on
{ "pile_set_name": "Github" }
// DLLExports.cpp // // Notes: // Win2000: // If I register at HKCR\Folder\ShellEx then DLL is locked. // otherwise it unloads after explorer closing. // but if I call menu for desktop items it's locked all the time #include "../../../Common/MyWindows.h" #include <OleCtl.h> #include "../../../Common/MyInitGuid.h" #include "../../../Common/ComTry.h" #include "../../../Windows/DLL.h" #include "../../../Windows/ErrorMsg.h" #include "../../../Windows/NtCheck.h" #include "../../../Windows/Registry.h" #include "../FileManager/IFolder.h" #include "ContextMenu.h" static LPCTSTR k_ShellExtName = TEXT("7-Zip Shell Extension"); static LPCTSTR k_Approved = TEXT("Software\\Microsoft\\Windows\\CurrentVersion\\Shell Extensions\\Approved"); // {23170F69-40C1-278A-1000-000100020000} static LPCTSTR k_Clsid = TEXT("{23170F69-40C1-278A-1000-000100020000}"); DEFINE_GUID(CLSID_CZipContextMenu, k_7zip_GUID_Data1, k_7zip_GUID_Data2, k_7zip_GUID_Data3_Common, 0x10, 0x00, 0x00, 0x01, 0x00, 0x02, 0x00, 0x00); using namespace NWindows; HINSTANCE g_hInstance = 0; HWND g_HWND = 0; LONG g_DllRefCount = 0; // Reference count of this DLL. // #define ODS(sz) OutputDebugString(L#sz) class CShellExtClassFactory: public IClassFactory, public CMyUnknownImp { public: CShellExtClassFactory() { InterlockedIncrement(&g_DllRefCount); } ~CShellExtClassFactory() { InterlockedDecrement(&g_DllRefCount); } MY_UNKNOWN_IMP1_MT(IClassFactory) STDMETHODIMP CreateInstance(LPUNKNOWN, REFIID, void**); STDMETHODIMP LockServer(BOOL); }; STDMETHODIMP CShellExtClassFactory::CreateInstance(LPUNKNOWN pUnkOuter, REFIID riid, void **ppvObj) { // ODS("CShellExtClassFactory::CreateInstance()\r\n"); *ppvObj = NULL; if (pUnkOuter) return CLASS_E_NOAGGREGATION; CZipContextMenu *shellExt; try { shellExt = new CZipContextMenu(); } catch(...) { return E_OUTOFMEMORY; } if (!shellExt) return E_OUTOFMEMORY; HRESULT res = shellExt->QueryInterface(riid, ppvObj); if (res != S_OK) delete shellExt; return res; } STDMETHODIMP CShellExtClassFactory::LockServer(BOOL /* fLock */) { return S_OK; // Check it } #define NT_CHECK_FAIL_ACTION return FALSE; extern "C" BOOL WINAPI DllMain( #ifdef UNDER_CE HANDLE hInstance #else HINSTANCE hInstance #endif , DWORD dwReason, LPVOID) { if (dwReason == DLL_PROCESS_ATTACH) { g_hInstance = (HINSTANCE)hInstance; // ODS("In DLLMain, DLL_PROCESS_ATTACH\r\n"); NT_CHECK } else if (dwReason == DLL_PROCESS_DETACH) { // ODS("In DLLMain, DLL_PROCESS_DETACH\r\n"); } return TRUE; } // Used to determine whether the DLL can be unloaded by OLE STDAPI DllCanUnloadNow(void) { // ODS("In DLLCanUnloadNow\r\n"); return (g_DllRefCount == 0 ? S_OK : S_FALSE); } STDAPI DllGetClassObject(REFCLSID rclsid, REFIID riid, LPVOID* ppv) { // ODS("In DllGetClassObject\r\n"); *ppv = NULL; if (IsEqualIID(rclsid, CLSID_CZipContextMenu)) { CShellExtClassFactory *cf; try { cf = new CShellExtClassFactory; } catch(...) { return E_OUTOFMEMORY; } if (!cf) return E_OUTOFMEMORY; HRESULT res = cf->QueryInterface(riid, ppv); if (res != S_OK) delete cf; return res; } return CLASS_E_CLASSNOTAVAILABLE; // return _Module.GetClassObject(rclsid, riid, ppv); } static BOOL RegisterServer() { FString modulePath; if (!NDLL::MyGetModuleFileName(modulePath)) return FALSE; const UString modulePathU = fs2us(modulePath); CSysString clsidString = k_Clsid; CSysString s = TEXT("CLSID\\"); s += clsidString; { NRegistry::CKey key; if (key.Create(HKEY_CLASSES_ROOT, s, NULL, REG_OPTION_NON_VOLATILE, KEY_WRITE) != NOERROR) return FALSE; key.SetValue(NULL, k_ShellExtName); NRegistry::CKey keyInproc; if (keyInproc.Create(key, TEXT("InprocServer32"), NULL, REG_OPTION_NON_VOLATILE, KEY_WRITE) != NOERROR) return FALSE; keyInproc.SetValue(NULL, modulePathU); keyInproc.SetValue(TEXT("ThreadingModel"), TEXT("Apartment")); } #if !defined(_WIN64) && !defined(UNDER_CE) if (IsItWindowsNT()) #endif { NRegistry::CKey key; if (key.Create(HKEY_LOCAL_MACHINE, k_Approved, NULL, REG_OPTION_NON_VOLATILE, KEY_WRITE) == NOERROR) key.SetValue(clsidString, k_ShellExtName); } return TRUE; } STDAPI DllRegisterServer(void) { return RegisterServer() ? S_OK: SELFREG_E_CLASS; } static BOOL UnregisterServer() { const CSysString clsidString = k_Clsid; CSysString s = TEXT("CLSID\\"); s += clsidString; CSysString s2 = s; s2.AddAscii("\\InprocServer32"); RegDeleteKey(HKEY_CLASSES_ROOT, s2); RegDeleteKey(HKEY_CLASSES_ROOT, s); #if !defined(_WIN64) && !defined(UNDER_CE) if (IsItWindowsNT()) #endif { HKEY hKey; if (RegOpenKeyEx(HKEY_LOCAL_MACHINE, k_Approved, 0, KEY_SET_VALUE, &hKey) == NOERROR) { RegDeleteValue(hKey, clsidString); RegCloseKey(hKey); } } return TRUE; } STDAPI DllUnregisterServer(void) { return UnregisterServer() ? S_OK: SELFREG_E_CLASS; }
{ "pile_set_name": "Github" }
/** * Marlin 3D Printer Firmware * Copyright (c) 2020 MarlinFirmware [https://github.com/MarlinFirmware/Marlin] * * Based on Sprinter and grbl. * Copyright (c) 2011 Camiel Gubbels / Erik van der Zalm * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <https://www.gnu.org/licenses/>. * */ #pragma once enum DGUSLCD_Screens : uint8_t { DGUSLCD_SCREEN_BOOT = 0, DGUSLCD_SCREEN_MAIN = 1, DGUSLCD_SCREEN_STATUS = 1, DGUSLCD_SCREEN_STATUS2 = 1, DGUSLCD_SCREEN_TEMPERATURE = 10, DGUSLCD_SCREEN_PREHEAT = 18, DGUSLCD_SCREEN_POWER_LOSS = 100, DGUSLCD_SCREEN_MANUALMOVE = 192, DGUSLCD_SCREEN_UTILITY = 120, DGUSLCD_SCREEN_FILAMENT_HEATING = 146, DGUSLCD_SCREEN_FILAMENT_LOADING = 148, DGUSLCD_SCREEN_FILAMENT_UNLOADING = 158, DGUSLCD_SCREEN_MANUALEXTRUDE = 160, DGUSLCD_SCREEN_SDFILELIST = 71, DGUSLCD_SCREEN_SDPRINTMANIPULATION = 73, DGUSLCD_SCREEN_SDPRINTTUNE = 75, DGUSLCD_SCREEN_FLC_PREHEAT = 94, DGUSLCD_SCREEN_FLC_PRINTING = 96, DGUSLCD_SCREEN_STEPPERMM = 212, DGUSLCD_SCREEN_PID_E = 214, DGUSLCD_SCREEN_PID_BED = 218, DGUSLCD_SCREEN_Z_OFFSET = 222, DGUSLCD_SCREEN_INFOS = 36, DGUSLCD_SCREEN_CONFIRM = 240, DGUSLCD_SCREEN_KILL = 250, ///< Kill Screen. Must always be 250 (to be able to display "Error wrong LCD Version") DGUSLCD_SCREEN_WAITING = 251, DGUSLCD_SCREEN_POPUP = 252, ///< special target, popup screen will also return this code to say "return to previous screen" DGUSLDC_SCREEN_UNUSED = 255 }; // Display Memory layout used (T5UID) // Except system variables this is arbitrary, just to organize stuff.... // 0x0000 .. 0x0FFF -- System variables and reserved by the display // 0x1000 .. 0x1FFF -- Variables to never change location, regardless of UI Version // 0x2000 .. 0x2FFF -- Controls (VPs that will trigger some action) // 0x3000 .. 0x4FFF -- Marlin Data to be displayed // 0x5000 .. -- SPs (if we want to modify display elements, e.g change color or like) -- currently unused // As there is plenty of space (at least most displays have >8k RAM), we do not pack them too tight, // so that we can keep variables nicely together in the address space. // UI Version always on 0x1000...0x1002 so that the firmware can check this and bail out. constexpr uint16_t VP_UI_VERSION_MAJOR = 0x1000; // Major -- incremented when incompatible constexpr uint16_t VP_UI_VERSION_MINOR = 0x1001; // Minor -- incremented on new features, but compatible constexpr uint16_t VP_UI_VERSION_PATCH = 0x1002; // Patch -- fixed which do not change functionality. constexpr uint16_t VP_UI_FLAVOUR = 0x1010; // lets reserve 16 bytes here to determine if UI is suitable for this Marlin. tbd. // Storage space for the Killscreen messages. 0x1100 - 0x1200 . Reused for the popup. constexpr uint16_t VP_MSGSTR1 = 0x1100; constexpr uint8_t VP_MSGSTR1_LEN = 0x20; // might be more place for it... constexpr uint16_t VP_MSGSTR2 = 0x1140; constexpr uint8_t VP_MSGSTR2_LEN = 0x20; constexpr uint16_t VP_MSGSTR3 = 0x1180; constexpr uint8_t VP_MSGSTR3_LEN = 0x20; constexpr uint16_t VP_MSGSTR4 = 0x11C0; constexpr uint8_t VP_MSGSTR4_LEN = 0x20; // Screenchange request for screens that only make sense when printer is idle. // e.g movement is only allowed if printer is not printing. // Marlin must confirm by setting the screen manually. constexpr uint16_t VP_SCREENCHANGE_ASK = 0x2000; constexpr uint16_t VP_SCREENCHANGE = 0x2001; // Key-Return button to new menu pressed. Data contains target screen in low byte and info in high byte. constexpr uint16_t VP_TEMP_ALL_OFF = 0x2002; // Turn all heaters off. Value arbitrary ;)= constexpr uint16_t VP_SCREENCHANGE_WHENSD = 0x2003; // "Print" Button touched -- go only there if there is an SD Card. constexpr uint16_t VP_CONFIRMED = 0x2010; // OK on confirm screen. // Buttons on the SD-Card File listing. constexpr uint16_t VP_SD_ScrollEvent = 0x2020; // Data: 0 for "up a directory", numbers are the amount to scroll, e.g -1 one up, 1 one down constexpr uint16_t VP_SD_FileSelected = 0x2022; // Number of file field selected. constexpr uint16_t VP_SD_FileSelectConfirm = 0x2024; // (This is a virtual VP and emulated by the Confirm Screen when a file has been confirmed) constexpr uint16_t VP_SD_ResumePauseAbort = 0x2026; // Resume(Data=0), Pause(Data=1), Abort(Data=2) SD Card prints constexpr uint16_t VP_SD_AbortPrintConfirmed = 0x2028; // Abort print confirmation (virtual, will be injected by the confirm dialog) constexpr uint16_t VP_SD_Print_Setting = 0x2040; constexpr uint16_t VP_SD_Print_LiveAdjustZ = 0x2050; // Data: 0 down, 1 up // Controls for movement (we can't use the incremental / decremental feature of the display at this feature works only with 16 bit values // (which would limit us to 655.35mm, which is likely not a problem for common setups, but i don't want to rule out hangprinters support) // A word about the coding: The VP will be per axis and the return code will be an signed 16 bit value in 0.01 mm resolution, telling us // the relative travel amount t he user wants to do. So eg. if the display sends us VP=2100 with value 100, the user wants us to move X by +1 mm. constexpr uint16_t VP_MOVE_X = 0x2100; constexpr uint16_t VP_MOVE_Y = 0x2102; constexpr uint16_t VP_MOVE_Z = 0x2104; constexpr uint16_t VP_MOVE_E0 = 0x2110; constexpr uint16_t VP_MOVE_E1 = 0x2112; //constexpr uint16_t VP_MOVE_E2 = 0x2114; //constexpr uint16_t VP_MOVE_E3 = 0x2116; //constexpr uint16_t VP_MOVE_E4 = 0x2118; //constexpr uint16_t VP_MOVE_E5 = 0x211A; constexpr uint16_t VP_HOME_ALL = 0x2120; constexpr uint16_t VP_MOTOR_LOCK_UNLOK = 0x2130; // Power loss recovery constexpr uint16_t VP_POWER_LOSS_RECOVERY = 0x2180; // Fan Control Buttons , switch between "off" and "on" constexpr uint16_t VP_FAN0_CONTROL = 0x2200; constexpr uint16_t VP_FAN1_CONTROL = 0x2202; constexpr uint16_t VP_FAN2_CONTROL = 0x2204; constexpr uint16_t VP_FAN3_CONTROL = 0x2206; // Heater Control Buttons , triged between "cool down" and "heat PLA" state constexpr uint16_t VP_E0_CONTROL = 0x2210; constexpr uint16_t VP_E1_CONTROL = 0x2212; //constexpr uint16_t VP_E2_CONTROL = 0x2214; //constexpr uint16_t VP_E3_CONTROL = 0x2216; //constexpr uint16_t VP_E4_CONTROL = 0x2218; //constexpr uint16_t VP_E5_CONTROL = 0x221A; constexpr uint16_t VP_BED_CONTROL = 0x221C; // Preheat constexpr uint16_t VP_E0_BED_PREHEAT = 0x2220; constexpr uint16_t VP_E1_BED_PREHEAT = 0x2222; //constexpr uint16_t VP_E2_BED_PREHEAT = 0x2224; //constexpr uint16_t VP_E3_BED_PREHEAT = 0x2226; //constexpr uint16_t VP_E4_BED_PREHEAT = 0x2228; //constexpr uint16_t VP_E5_BED_PREHEAT = 0x222A; // Filament load and unload constexpr uint16_t VP_E0_FILAMENT_LOAD_UNLOAD = 0x2300; constexpr uint16_t VP_E1_FILAMENT_LOAD_UNLOAD = 0x2302; // Settings store , reset constexpr uint16_t VP_SETTINGS = 0x2400; // PID autotune constexpr uint16_t VP_PID_AUTOTUNE_E0 = 0x2410; constexpr uint16_t VP_PID_AUTOTUNE_E1 = 0x2412; //constexpr uint16_t VP_PID_AUTOTUNE_E2 = 0x2414; //constexpr uint16_t VP_PID_AUTOTUNE_E3 = 0x2416; //constexpr uint16_t VP_PID_AUTOTUNE_E4 = 0x2418; //constexpr uint16_t VP_PID_AUTOTUNE_E5 = 0x241A; constexpr uint16_t VP_PID_AUTOTUNE_BED = 0x2420; // Calibrate Z constexpr uint16_t VP_Z_CALIBRATE = 0x2430; // First layer cal constexpr uint16_t VP_Z_FIRST_LAYER_CAL = 0x2500; // Data: 0 - Cancel first layer cal progress, >0 filament type have loaded // Firmware version on the boot screen. constexpr uint16_t VP_MARLIN_VERSION = 0x3000; constexpr uint8_t VP_MARLIN_VERSION_LEN = 16; // there is more space on the display, if needed. // Place for status messages. constexpr uint16_t VP_M117 = 0x3020; constexpr uint8_t VP_M117_LEN = 0x20; // Temperatures. constexpr uint16_t VP_T_E0_Is = 0x3060; // 4 Byte Integer constexpr uint16_t VP_T_E0_Set = 0x3062; // 2 Byte Integer constexpr uint16_t VP_T_E1_Is = 0x3064; // 4 Byte Integer // reserved to support up to 6 Extruders: constexpr uint16_t VP_T_E1_Set = 0x3066; // 2 Byte Integer //constexpr uint16_t VP_T_E2_Is = 0x3068; // 4 Byte Integer //constexpr uint16_t VP_T_E2_Set = 0x306A; // 2 Byte Integer //constexpr uint16_t VP_T_E3_Is = 0x306C; // 4 Byte Integer //constexpr uint16_t VP_T_E3_Set = 0x306E; // 2 Byte Integer //constexpr uint16_t VP_T_E4_Is = 0x3070; // 4 Byte Integer //constexpr uint16_t VP_T_E4_Set = 0x3072; // 2 Byte Integer //constexpr uint16_t VP_T_E4_Is = 0x3074; // 4 Byte Integer //constexpr uint16_t VP_T_E4_Set = 0x3076; // 2 Byte Integer //constexpr uint16_t VP_T_E5_Is = 0x3078; // 4 Byte Integer //constexpr uint16_t VP_T_E5_Set = 0x307A; // 2 Byte Integer constexpr uint16_t VP_T_Bed_Is = 0x3080; // 4 Byte Integer constexpr uint16_t VP_T_Bed_Set = 0x3082; // 2 Byte Integer constexpr uint16_t VP_Flowrate_E0 = 0x3090; // 2 Byte Integer constexpr uint16_t VP_Flowrate_E1 = 0x3092; // 2 Byte Integer // reserved for up to 6 Extruders: //constexpr uint16_t VP_Flowrate_E2 = 0x3094; //constexpr uint16_t VP_Flowrate_E3 = 0x3096; //constexpr uint16_t VP_Flowrate_E4 = 0x3098; //constexpr uint16_t VP_Flowrate_E5 = 0x309A; constexpr uint16_t VP_Fan0_Percentage = 0x3100; // 2 Byte Integer (0..100) constexpr uint16_t VP_Fan1_Percentage = 0x3102; // 2 Byte Integer (0..100) constexpr uint16_t VP_Fan2_Percentage = 0x3104; // 2 Byte Integer (0..100) constexpr uint16_t VP_Fan3_Percentage = 0x3106; // 2 Byte Integer (0..100) constexpr uint16_t VP_Feedrate_Percentage = 0x3108; // 2 Byte Integer (0..100) // Actual Position constexpr uint16_t VP_XPos = 0x3110; // 4 Byte Fixed point number; format xxx.yy constexpr uint16_t VP_YPos = 0x3112; // 4 Byte Fixed point number; format xxx.yy constexpr uint16_t VP_ZPos = 0x3114; // 4 Byte Fixed point number; format xxx.yy constexpr uint16_t VP_EPos = 0x3120; // 4 Byte Fixed point number; format xxx.yy constexpr uint16_t VP_PrintProgress_Percentage = 0x3130; // 2 Byte Integer (0..100) constexpr uint16_t VP_PrintTime = 0x3140; constexpr uint16_t VP_PrintTime_LEN = 32; constexpr uint16_t VP_PrintAccTime = 0x3160; constexpr uint16_t VP_PrintAccTime_LEN = 32; constexpr uint16_t VP_PrintsTotal = 0x3180; constexpr uint16_t VP_PrintsTotal_LEN = 16; // SDCard File Listing constexpr uint16_t VP_SD_FileName_LEN = 32; // LEN is shared for all entries. constexpr uint16_t DGUS_SD_FILESPERSCREEN = 5; // FIXME move that info to the display and read it from there. constexpr uint16_t VP_SD_FileName0 = 0x3200; constexpr uint16_t VP_SD_FileName1 = 0x3220; constexpr uint16_t VP_SD_FileName2 = 0x3240; constexpr uint16_t VP_SD_FileName3 = 0x3260; constexpr uint16_t VP_SD_FileName4 = 0x3280; constexpr uint16_t VP_SD_Print_ProbeOffsetZ = 0x32A0; // constexpr uint16_t VP_SD_Print_Filename = 0x32C0; // Fan status constexpr uint16_t VP_FAN0_STATUS = 0x3300; constexpr uint16_t VP_FAN1_STATUS = 0x3302; constexpr uint16_t VP_FAN2_STATUS = 0x3304; constexpr uint16_t VP_FAN3_STATUS = 0x3306; // Heater status constexpr uint16_t VP_E0_STATUS = 0x3310; constexpr uint16_t VP_E1_STATUS = 0x3312; //constexpr uint16_t VP_E2_STATUS = 0x3314; //constexpr uint16_t VP_E3_STATUS = 0x3316; //constexpr uint16_t VP_E4_STATUS = 0x3318; //constexpr uint16_t VP_E5_STATUS = 0x331A; constexpr uint16_t VP_BED_STATUS = 0x331C; constexpr uint16_t VP_MOVE_OPTION = 0x3400; // Step per mm constexpr uint16_t VP_X_STEP_PER_MM = 0x3600; // at the moment , 2 byte unsigned int , 0~1638.4 //constexpr uint16_t VP_X2_STEP_PER_MM = 0x3602; constexpr uint16_t VP_Y_STEP_PER_MM = 0x3604; //constexpr uint16_t VP_Y2_STEP_PER_MM = 0x3606; constexpr uint16_t VP_Z_STEP_PER_MM = 0x3608; //constexpr uint16_t VP_Z2_STEP_PER_MM = 0x360A; constexpr uint16_t VP_E0_STEP_PER_MM = 0x3610; constexpr uint16_t VP_E1_STEP_PER_MM = 0x3612; //constexpr uint16_t VP_E2_STEP_PER_MM = 0x3614; //constexpr uint16_t VP_E3_STEP_PER_MM = 0x3616; //constexpr uint16_t VP_E4_STEP_PER_MM = 0x3618; //constexpr uint16_t VP_E5_STEP_PER_MM = 0x361A; // PIDs constexpr uint16_t VP_E0_PID_P = 0x3700; // at the moment , 2 byte unsigned int , 0~1638.4 constexpr uint16_t VP_E0_PID_I = 0x3702; constexpr uint16_t VP_E0_PID_D = 0x3704; constexpr uint16_t VP_E1_PID_P = 0x3706; // at the moment , 2 byte unsigned int , 0~1638.4 constexpr uint16_t VP_E1_PID_I = 0x3708; constexpr uint16_t VP_E1_PID_D = 0x370A; constexpr uint16_t VP_BED_PID_P = 0x3710; constexpr uint16_t VP_BED_PID_I = 0x3712; constexpr uint16_t VP_BED_PID_D = 0x3714; // Wating screen status constexpr uint16_t VP_WAITING_STATUS = 0x3800; // SPs for certain variables... // located at 0x5000 and up // Not used yet! // This can be used e.g to make controls / data display invisible constexpr uint16_t SP_T_E0_Is = 0x5000; constexpr uint16_t SP_T_E0_Set = 0x5010; constexpr uint16_t SP_T_E1_Is = 0x5020; constexpr uint16_t SP_T_Bed_Is = 0x5030; constexpr uint16_t SP_T_Bed_Set = 0x5040;
{ "pile_set_name": "Github" }
/* This file is part of the Pangolin Project. * http://github.com/stevenlovegrove/Pangolin * * Copyright (c) 2011 Steven Lovegrove * * Permission is hereby granted, free of charge, to any person * obtaining a copy of this software and associated documentation * files (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, * copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following * conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ #pragma once #include <Python.h> #include <pangolin/platform.h> namespace pangolin { /// Class represents a reference counted PythonObject. /// PythonObject is appropriately Py_INCREF'd and Py_DECREF'd class PyUniqueObj { public: inline PyUniqueObj() : obj(0) { } /// Assumption: PythonObject has already been appropriately INCREF'd. inline PyUniqueObj(PyObject* obj) : obj(obj) { } inline PyUniqueObj(const PyUniqueObj& other) :obj(other.obj) { if(obj) Py_INCREF(obj); } inline ~PyUniqueObj() { if(obj) Py_DECREF(obj); } inline PyUniqueObj(PyUniqueObj&& other) : obj(other.obj) { other.obj = 0; } inline void operator=(PyUniqueObj&& other) { Release(); obj = other.obj; other.obj = 0; } inline void operator=(PyObject* obj) { Release(); this->obj = obj; } inline void Release() { if(obj) { Py_DECREF(obj); obj = 0; } } inline PyObject* operator*() { return obj; } inline operator PyObject*() { return obj; } private: PyObject* obj; }; }
{ "pile_set_name": "Github" }
/*! \mainpage This is an example library that provides a very rudimentary OBJ file format plugin for Usd. It is intentionally not particularly functional. The aim is to provide nearly the bare minimum plugin structure for learning purposes. For a far more advanced example, including an implementation of a low-level layer backing store plugin, see the usdAbc alembic plugin. The plugin is structured by functional component: - \b fileFormat contains the SdfFileFormat plugin interface implementation. - \b stream and \b streamIO contain the OBJ parser and data representation. - \b translator contains the logic that translates OBJ data to Usd data. */
{ "pile_set_name": "Github" }
// increment export function increment(index) { return { type: 'INCREMENT_LIKES', index } } // add comment export function addComment(postId, author, comment) { return { type: 'ADD_COMMENT', postId, author, comment } } // remove comment export function removeComment(postId, i) { return { type: 'REMOVE_COMMENT', i, postId } }
{ "pile_set_name": "Github" }
/* * Copyright (C) 2005-2018 Team Kodi * This file is part of Kodi - https://kodi.tv * * SPDX-License-Identifier: GPL-2.0-or-later * See LICENSES/README.md for more information. */ #include "GUIRenderingControl.h" #include "threads/SingleLock.h" #include "guilib/IRenderingCallback.h" #ifdef TARGET_WINDOWS #include "rendering/dx/DeviceResources.h" #endif #define LABEL_ROW1 10 #define LABEL_ROW2 11 #define LABEL_ROW3 12 CGUIRenderingControl::CGUIRenderingControl(int parentID, int controlID, float posX, float posY, float width, float height) : CGUIControl(parentID, controlID, posX, posY, width, height) { ControlType = GUICONTROL_RENDERADDON; m_callback = NULL; } CGUIRenderingControl::CGUIRenderingControl(const CGUIRenderingControl &from) : CGUIControl(from) { ControlType = GUICONTROL_RENDERADDON; m_callback = NULL; } bool CGUIRenderingControl::InitCallback(IRenderingCallback *callback) { if (!callback) return false; CSingleLock lock(m_rendering); CServiceBroker::GetWinSystem()->GetGfxContext().CaptureStateBlock(); float x = CServiceBroker::GetWinSystem()->GetGfxContext().ScaleFinalXCoord(GetXPosition(), GetYPosition()); float y = CServiceBroker::GetWinSystem()->GetGfxContext().ScaleFinalYCoord(GetXPosition(), GetYPosition()); float w = CServiceBroker::GetWinSystem()->GetGfxContext().ScaleFinalXCoord(GetXPosition() + GetWidth(), GetYPosition() + GetHeight()) - x; float h = CServiceBroker::GetWinSystem()->GetGfxContext().ScaleFinalYCoord(GetXPosition() + GetWidth(), GetYPosition() + GetHeight()) - y; if (x < 0) x = 0; if (y < 0) y = 0; if (x + w > CServiceBroker::GetWinSystem()->GetGfxContext().GetWidth()) w = CServiceBroker::GetWinSystem()->GetGfxContext().GetWidth() - x; if (y + h > CServiceBroker::GetWinSystem()->GetGfxContext().GetHeight()) h = CServiceBroker::GetWinSystem()->GetGfxContext().GetHeight() - y; void *device = NULL; #if TARGET_WINDOWS device = DX::DeviceResources::Get()->GetD3DDevice(); #endif if (callback->Create((int)(x+0.5f), (int)(y+0.5f), (int)(w+0.5f), (int)(h+0.5f), device)) m_callback = callback; else return false; CServiceBroker::GetWinSystem()->GetGfxContext().ApplyStateBlock(); return true; } void CGUIRenderingControl::UpdateVisibility(const CGUIListItem *item) { // if made invisible, start timer, only free addonptr after // some period, configurable by window class CGUIControl::UpdateVisibility(item); if (!IsVisible() && m_callback) FreeResources(); } void CGUIRenderingControl::Process(unsigned int currentTime, CDirtyRegionList &dirtyregions) { //! @todo Add processing to the addon so it could mark when actually changing CSingleLock lock(m_rendering); if (m_callback && m_callback->IsDirty()) MarkDirtyRegion(); CGUIControl::Process(currentTime, dirtyregions); } void CGUIRenderingControl::Render() { CSingleLock lock(m_rendering); if (m_callback) { // set the viewport - note: We currently don't have any control over how // the addon renders, so the best we can do is attempt to define // a viewport?? CServiceBroker::GetWinSystem()->GetGfxContext().SetViewPort(m_posX, m_posY, m_width, m_height); CServiceBroker::GetWinSystem()->GetGfxContext().CaptureStateBlock(); m_callback->Render(); CServiceBroker::GetWinSystem()->GetGfxContext().ApplyStateBlock(); CServiceBroker::GetWinSystem()->GetGfxContext().RestoreViewPort(); } CGUIControl::Render(); } void CGUIRenderingControl::FreeResources(bool immediately) { CSingleLock lock(m_rendering); if (!m_callback) return; CServiceBroker::GetWinSystem()->GetGfxContext().CaptureStateBlock(); //! @todo locking m_callback->Stop(); CServiceBroker::GetWinSystem()->GetGfxContext().ApplyStateBlock(); m_callback = NULL; } bool CGUIRenderingControl::CanFocusFromPoint(const CPoint &point) const { // mouse is allowed to focus this control, but it doesn't actually receive focus return IsVisible() && HitTest(point); }
{ "pile_set_name": "Github" }
# Unix SMB/CIFS implementation. # Copyright (C) 2014 Catalyst.Net Ltd # # Auto generate param_functions.c # # ** NOTE! The following LGPL license applies to the ldb # ** library. This does NOT imply that all of Samba is released # ** under the LGPL # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 3 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, see <http://www.gnu.org/licenses/>. # import os import xml.etree.ElementTree as ET import optparse # parse command line arguments parser = optparse.OptionParser() parser.add_option("-f", "--file", dest="filename", help="input file", metavar="FILE") parser.add_option("-o", "--output", dest="output", help='output file', metavar="FILE") parser.add_option("--mode", type="choice", metavar="<FUNCTIONS|S3PROTO|LIBPROTO|PARAMDEFS|PARAMTABLE>", choices=["FUNCTIONS", "S3PROTO", "LIBPROTO", "PARAMDEFS", "PARAMTABLE"], default="FUNCTIONS") parser.add_option("--scope", metavar="<GLOBAL|LOCAL>", choices=["GLOBAL", "LOCAL"], default="GLOBAL") (options, args) = parser.parse_args() if options.filename is None: parser.error("No input file specified") if options.output is None: parser.error("No output file specified") def iterate_all(path): """Iterate and yield all the parameters. :param path: path to parameters xml file """ try: p = open(path, 'r') except IOError as e: raise Exception("Error opening parameters file") out = p.read() # parse the parameters xml file root = ET.fromstring(out) for parameter in root: name = parameter.attrib.get("name") param_type = parameter.attrib.get("type") context = parameter.attrib.get("context") func = parameter.attrib.get("function") synonym = parameter.attrib.get("synonym") removed = parameter.attrib.get("removed") generated = parameter.attrib.get("generated_function") handler = parameter.attrib.get("handler") enumlist = parameter.attrib.get("enumlist") deprecated = parameter.attrib.get("deprecated") synonyms = parameter.findall('synonym') if removed == "1": continue constant = parameter.attrib.get("constant") substitution = parameter.attrib.get("substitution") parm = parameter.attrib.get("parm") if name is None or param_type is None or context is None: raise Exception("Error parsing parameter: " + name) if func is None: func = name.replace(" ", "_").lower() if enumlist is None: enumlist = "NULL" if handler is None: handler = "NULL" yield {'name': name, 'type': param_type, 'context': context, 'function': func, 'constant': (constant == '1'), 'substitution': (substitution == '1'), 'parm': (parm == '1'), 'synonym' : synonym, 'generated' : generated, 'enumlist' : enumlist, 'handler' : handler, 'deprecated' : deprecated, 'synonyms' : synonyms } # map doc attributes to a section of the generated function context_dict = {"G": "_GLOBAL", "S": "_LOCAL"} param_type_dict = { "boolean" : "_BOOL", "list" : "_LIST", "string" : "_STRING", "integer" : "_INTEGER", "enum" : "_INTEGER", "char" : "_CHAR", "boolean-auto" : "_INTEGER", "cmdlist" : "_LIST", "bytes" : "_INTEGER", "octal" : "_INTEGER", "ustring" : "_STRING", } def generate_functions(path_in, path_out): f = open(path_out, 'w') try: f.write('/* This file was automatically generated by generate_param.py. DO NOT EDIT */\n\n') for parameter in iterate_all(options.filename): # filter out parameteric options if ':' in parameter['name']: continue if parameter['synonym'] == "1": continue if parameter['generated'] == "0": continue output_string = "FN" temp = context_dict.get(parameter['context']) if temp is None: raise Exception(parameter['name'] + " has an invalid context " + parameter['context']) output_string += temp if parameter['type'] == "string" or parameter['type'] == "ustring": if parameter['substitution']: output_string += "_SUBSTITUTED" else: output_string += "_CONST" if parameter['parm']: output_string += "_PARM" temp = param_type_dict.get(parameter['type']) if temp is None: raise Exception(parameter['name'] + " has an invalid param type " + parameter['type']) output_string += temp f.write(output_string + "(" + parameter['function'] + ", " + parameter['function'] + ')\n') finally: f.close() mapping = { 'boolean' : 'bool ', 'string' : 'char *', 'integer' : 'int ', 'char' : 'char ', 'list' : 'const char **', 'enum' : 'int ', 'boolean-auto' : 'int ', 'cmdlist' : 'const char **', 'bytes' : 'int ', 'octal' : 'int ', 'ustring' : 'char *', } def make_s3_param_proto(path_in, path_out): file_out = open(path_out, 'w') try: file_out.write('/* This file was automatically generated by generate_param.py. DO NOT EDIT */\n\n') header = get_header(path_out) file_out.write("#ifndef %s\n" % header) file_out.write("#define %s\n\n" % header) for parameter in iterate_all(path_in): # filter out parameteric options if ':' in parameter['name']: continue if parameter['synonym'] == "1": continue if parameter['generated'] == "0": continue output_string = "" param_type = mapping.get(parameter['type']) if param_type is None: raise Exception(parameter['name'] + " has an invalid context " + parameter['context']) output_string += param_type output_string += "lp_%s" % parameter['function'] param = None if parameter['parm']: param = "const struct share_params *p" else: param = "int" if parameter['type'] == 'string' or parameter['type'] == 'ustring': if parameter['substitution']: if parameter['context'] == 'G': output_string += '(TALLOC_CTX *ctx, const struct loadparm_substitution *lp_sub);\n' elif parameter['context'] == 'S': output_string += '(TALLOC_CTX *ctx, const struct loadparm_substitution *lp_sub, %s);\n' % param else: raise Exception(parameter['name'] + " has an invalid param type " + parameter['type']) else: if parameter['context'] == 'G': output_string = 'const ' + output_string + '(void);\n' elif parameter['context'] == 'S': output_string = 'const ' + output_string + '(%s);\n' % param else: raise Exception(parameter['name'] + " has an invalid param type " + parameter['type']) else: if parameter['context'] == 'G': output_string += '(void);\n' elif parameter['context'] == 'S': output_string += '(%s);\n' % param else: raise Exception(parameter['name'] + " has an invalid param type " + parameter['type']) file_out.write(output_string) file_out.write("\n#endif /* %s */\n\n" % header) finally: file_out.close() def make_lib_proto(path_in, path_out): file_out = open(path_out, 'w') try: file_out.write('/* This file was automatically generated by generate_param.py. DO NOT EDIT */\n\n') for parameter in iterate_all(path_in): # filter out parameteric options if ':' in parameter['name']: continue if parameter['synonym'] == "1": continue if parameter['generated'] == "0": continue output_string = "" param_type = mapping.get(parameter['type']) if param_type is None: raise Exception(parameter['name'] + " has an invalid context " + parameter['context']) output_string += param_type output_string += "lpcfg_%s" % parameter['function'] if parameter['type'] == 'string' or parameter['type'] == 'ustring': if parameter['substitution']: if parameter['context'] == 'G': output_string += '(struct loadparm_context *, const struct loadparm_substitution *lp_sub, TALLOC_CTX *ctx);\n' elif parameter['context'] == 'S': output_string += '(struct loadparm_service *, struct loadparm_service *, TALLOC_CTX *ctx);\n' else: raise Exception(parameter['name'] + " has an invalid context " + parameter['context']) else: if parameter['context'] == 'G': output_string = 'const ' + output_string + '(struct loadparm_context *);\n' elif parameter['context'] == 'S': output_string = 'const ' + output_string + '(struct loadparm_service *, struct loadparm_service *);\n' else: raise Exception(parameter['name'] + " has an invalid param type " + parameter['type']) else: if parameter['context'] == 'G': output_string += '(struct loadparm_context *);\n' elif parameter['context'] == 'S': output_string += '(struct loadparm_service *, struct loadparm_service *);\n' else: raise Exception(parameter['name'] + " has an invalid param type " + parameter['type']) file_out.write(output_string) finally: file_out.close() def get_header(path): header = os.path.basename(path).upper() header = header.replace(".", "_").replace("\\", "_").replace("-", "_") return "__%s__" % header def make_param_defs(path_in, path_out, scope): file_out = open(path_out, 'w') try: file_out.write('/* This file was automatically generated by generate_param.py. DO NOT EDIT */\n\n') header = get_header(path_out) file_out.write("#ifndef %s\n" % header) file_out.write("#define %s\n\n" % header) if scope == "GLOBAL": file_out.write("/**\n") file_out.write(" * This structure describes global (ie., server-wide) parameters.\n") file_out.write(" */\n") file_out.write("struct loadparm_global \n") file_out.write("{\n") file_out.write("\tTALLOC_CTX *ctx; /* Context for talloced members */\n") elif scope == "LOCAL": file_out.write("/**\n") file_out.write(" * This structure describes a single service.\n") file_out.write(" */\n") file_out.write("struct loadparm_service \n") file_out.write("{\n") file_out.write("\tbool autoloaded;\n") for parameter in iterate_all(path_in): # filter out parameteric options if ':' in parameter['name']: continue if parameter['synonym'] == "1": continue if (scope == "GLOBAL" and parameter['context'] != "G" or scope == "LOCAL" and parameter['context'] != "S"): continue output_string = "\t" param_type = mapping.get(parameter['type']) if param_type is None: raise Exception(parameter['name'] + " has an invalid context " + parameter['context']) output_string += param_type output_string += " %s;\n" % parameter['function'] file_out.write(output_string) file_out.write("LOADPARM_EXTRA_%sS\n" % scope) file_out.write("};\n") file_out.write("\n#endif /* %s */\n\n" % header) finally: file_out.close() type_dict = { "boolean" : "P_BOOL", "boolean-rev" : "P_BOOLREV", "boolean-auto" : "P_ENUM", "list" : "P_LIST", "string" : "P_STRING", "integer" : "P_INTEGER", "enum" : "P_ENUM", "char" : "P_CHAR", "cmdlist" : "P_CMDLIST", "bytes" : "P_BYTES", "octal" : "P_OCTAL", "ustring" : "P_USTRING", } def make_param_table(path_in, path_out): file_out = open(path_out, 'w') try: file_out.write('/* This file was automatically generated by generate_param.py. DO NOT EDIT */\n\n') header = get_header(path_out) file_out.write("#ifndef %s\n" % header) file_out.write("#define %s\n\n" % header) file_out.write("struct parm_struct parm_table[] = {\n") for parameter in iterate_all(path_in): # filter out parameteric options if ':' in parameter['name']: continue if parameter['context'] == 'G': p_class = "P_GLOBAL" else: p_class = "P_LOCAL" p_type = type_dict.get(parameter['type']) if parameter['context'] == 'G': temp = "GLOBAL" else: temp = "LOCAL" offset = "%s_VAR(%s)" % (temp, parameter['function']) enumlist = parameter['enumlist'] handler = parameter['handler'] synonym = parameter['synonym'] deprecated = parameter['deprecated'] flags_list = [] if synonym == "1": flags_list.append("FLAG_SYNONYM") if deprecated == "1": flags_list.append("FLAG_DEPRECATED") flags = "|".join(flags_list) synonyms = parameter['synonyms'] file_out.write("\t{\n") file_out.write("\t\t.label\t\t= \"%s\",\n" % parameter['name']) file_out.write("\t\t.type\t\t= %s,\n" % p_type) file_out.write("\t\t.p_class\t= %s,\n" % p_class) file_out.write("\t\t.offset\t\t= %s,\n" % offset) file_out.write("\t\t.special\t= %s,\n" % handler) file_out.write("\t\t.enum_list\t= %s,\n" % enumlist) if flags != "": file_out.write("\t\t.flags\t\t= %s,\n" % flags) file_out.write("\t},\n") if synonyms is not None: # for synonyms, we only list the synonym flag: flags = "FLAG_SYNONYM" for syn in synonyms: file_out.write("\t{\n") file_out.write("\t\t.label\t\t= \"%s\",\n" % syn.text) file_out.write("\t\t.type\t\t= %s,\n" % p_type) file_out.write("\t\t.p_class\t= %s,\n" % p_class) file_out.write("\t\t.offset\t\t= %s,\n" % offset) file_out.write("\t\t.special\t= %s,\n" % handler) file_out.write("\t\t.enum_list\t= %s,\n" % enumlist) if flags != "": file_out.write("\t\t.flags\t\t= %s,\n" % flags) file_out.write("\t},\n") file_out.write("\n\t{ .label = NULL }\n") file_out.write("};\n") file_out.write("\n#endif /* %s */\n\n" % header) finally: file_out.close() if options.mode == 'FUNCTIONS': generate_functions(options.filename, options.output) elif options.mode == 'S3PROTO': make_s3_param_proto(options.filename, options.output) elif options.mode == 'LIBPROTO': make_lib_proto(options.filename, options.output) elif options.mode == 'PARAMDEFS': make_param_defs(options.filename, options.output, options.scope) elif options.mode == 'PARAMTABLE': make_param_table(options.filename, options.output)
{ "pile_set_name": "Github" }
var R = require('../source'); var eq = require('./shared/eq'); describe('dropWhile', function() { it('skips elements while the function reports `true`', function() { eq(R.dropWhile(function(x) {return x < 5;}, [1, 3, 5, 7, 9]), [5, 7, 9]); }); it('returns an empty list for an empty list', function() { eq(R.dropWhile(function() { return false; }, []), []); eq(R.dropWhile(function() { return true; }, []), []); }); it('starts at the right arg and acknowledges undefined', function() { var sublist = R.dropWhile(function(x) {return x !== void 0;}, [1, 3, void 0, 5, 7]); eq(sublist.length, 3); eq(sublist[0], void 0); eq(sublist[1], 5); eq(sublist[2], 7); }); it('can operate on strings', function() { eq(R.dropWhile(function(x) { return x !== 'd'; }, 'Ramda'), 'da'); }); });
{ "pile_set_name": "Github" }
fileFormatVersion: 2 guid: 61757cf57a877ce46a12429a7e8996dd MonoImporter: externalObjects: {} serializedVersion: 2 defaultReferences: [] executionOrder: 0 icon: {instanceID: 0} userData: assetBundleName: assetBundleVariant:
{ "pile_set_name": "Github" }
GLOBAL SCOPE: (1,1) -> class Math (50,1) -> class Main CLASS SCOPE OF 'Math': (2,16) -> static function abs : int->int (10,16) -> static function pow : int->int->int (20,16) -> static function log : int->int (33,16) -> static function max : int->int->int (41,16) -> static function min : int->int->int FORMAL SCOPE OF 'abs': (2,24) -> variable @a : int LOCAL SCOPE: FORMAL SCOPE OF 'pow': (10,24) -> variable @a : int (10,31) -> variable @b : int LOCAL SCOPE: (11,13) -> variable i : int (12,13) -> variable result : int FORMAL SCOPE OF 'log': (20,24) -> variable @a : int LOCAL SCOPE: (24,13) -> variable result : int FORMAL SCOPE OF 'max': (33,24) -> variable @a : int (33,31) -> variable @b : int LOCAL SCOPE: FORMAL SCOPE OF 'min': (41,24) -> variable @a : int (41,31) -> variable @b : int LOCAL SCOPE: CLASS SCOPE OF 'Main': (51,17) -> static function main : void FORMAL SCOPE OF 'main': LOCAL SCOPE:
{ "pile_set_name": "Github" }
/* * Copyright (C) 2006-2020 Istituto Italiano di Tecnologia (IIT) * Copyright (C) 2006-2010 RobotCub Consortium * All rights reserved. * * This software may be modified and distributed under the terms of the * BSD-3-Clause license. See the accompanying LICENSE file for details. */ /* * Most of this file is from the output_example.c of ffmpeg - * copyright/copypolicy statement follows -- */ /* * Libavformat API example: Output a media file in any supported * libavformat format. The default codecs are used. * * Copyright (c) 2003 Fabrice Bellard * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #include "FfmpegWriter.h" #include "ffmpeg_api.h" #include <yarp/os/all.h> #include <yarp/sig/all.h> #include <yarp/os/Log.h> #include <yarp/os/LogComponent.h> #include <cstdlib> #include <cstring> #include <cmath> #ifndef M_PI #define M_PI 3.1415926535897931 #endif #define STREAM_FRAME_RATE 25 /* 25 images/s */ #define STREAM_PIX_FMT AV_PIX_FMT_YUV420P /* default pix_fmt */ #define STREAM_PIX_WORK AV_PIX_FMT_RGB24 using namespace yarp::os; using namespace yarp::dev; using namespace yarp::sig; using namespace yarp::sig::file; namespace { YARP_LOG_COMPONENT(FFMPEGWRITER, "yarp.device.ffmpeg_writer") } /**************************************************************/ /* audio output */ float t, tincr, tincr2; int16_t *samples; int samples_size; int samples_at; int samples_channels; uint8_t *audio_outbuf; int audio_outbuf_size; int audio_input_frame_size; /* * add an audio output stream */ static AVStream *add_audio_stream(AVFormatContext *oc, AVCodecID codec_id) { AVCodecContext *c; AVStream *st; st = avformat_new_stream(oc, NULL); if (!st) { yCFatal(FFMPEGWRITER, "Could not alloc stream"); } c = st->codec; c->codec_id = codec_id; c->codec_type = AVMEDIA_TYPE_AUDIO; /* put sample parameters */ c->bit_rate = 64000; c->sample_rate = 44100; c->channels = 2; return st; } static void open_audio(AVFormatContext *oc, AVStream *st) { yCInfo(FFMPEGWRITER, "Opening audio stream"); AVCodecContext *c; AVCodec *codec; c = st->codec; /* find the audio encoder */ codec = avcodec_find_encoder(c->codec_id); if (!codec) { yCFatal(FFMPEGWRITER, "Audio codec not found"); } /* open it */ if (avcodec_open2(c, codec, nullptr) < 0) { yCFatal(FFMPEGWRITER, "Could not open codec"); } /* init signal generator */ t = 0; tincr = 2 * M_PI * 110.0 / c->sample_rate; /* increment frequency by 110 Hz per second */ tincr2 = 2 * M_PI * 110.0 / c->sample_rate / c->sample_rate; audio_outbuf_size = 10000; audio_outbuf = (uint8_t*)av_malloc(audio_outbuf_size); /* ugly hack for PCM codecs (will be removed ASAP with new PCM support to compute the input frame size in samples */ if (c->frame_size <= 1) { audio_input_frame_size = audio_outbuf_size / c->channels; switch(st->codec->codec_id) { case AV_CODEC_ID_PCM_S16LE: case AV_CODEC_ID_PCM_S16BE: case AV_CODEC_ID_PCM_U16LE: case AV_CODEC_ID_PCM_U16BE: audio_input_frame_size >>= 1; break; default: break; } } else { audio_input_frame_size = c->frame_size; } samples_size = audio_input_frame_size; samples_at = 0; samples_channels = c->channels; samples = (int16_t*)av_malloc(samples_size*2*samples_channels); yCFatal(FFMPEGWRITER, "FRAME SIZE is %d / samples size is %d\n", c->frame_size, samples_size); } /* prepare a 16 bit dummy audio frame of 'frame_size' samples and 'nb_channels' channels */ static void get_audio_frame(int16_t *samples, int frame_size, int nb_channels) { int j, i, v; int16_t *q; q = samples; for(j=0;j<frame_size;j++) { v = (int)(sin(t) * 10000); for(i = 0; i < nb_channels; i++) *q++ = v; t += tincr; tincr += tincr2; } } static void make_audio_frame(AVCodecContext *c, AVFrame * &frame, void *&samples) { frame = av_frame_alloc(); if (!frame) { yCFatal(FFMPEGWRITER, "Could not allocate audio frame"); } frame->nb_samples = c->frame_size; frame->format = c->sample_fmt; frame->channel_layout = c->channel_layout; int buffer_size = av_samples_get_buffer_size(nullptr, c->channels, c->frame_size, c->sample_fmt, 0); if (buffer_size < 0) { yCError(FFMPEGWRITER, "Could not get sample buffer size"); } samples = av_malloc(buffer_size); if (!samples) { yCFatal(FFMPEGWRITER, "Could not allocate %d bytes for samples buffer", buffer_size); } /* setup the data pointers in the AVFrame */ int ret = avcodec_fill_audio_frame(frame, c->channels, c->sample_fmt, (const uint8_t*)samples, buffer_size, 0); if (ret < 0) { yCFatal(FFMPEGWRITER, "Could not setup audio frame"); } } static void write_audio_frame(AVFormatContext *oc, AVStream *st) { AVCodecContext *c; AVPacket pkt; av_init_packet(&pkt); c = st->codec; get_audio_frame(samples, audio_input_frame_size, c->channels); AVFrame *frame; void *samples; make_audio_frame(c,frame,samples); AVPacket tmp; int got_packet = 0; av_init_packet(&tmp); tmp.data = audio_outbuf; tmp.size = audio_outbuf_size; pkt.size = avcodec_encode_audio2(c, &tmp, frame, &got_packet); if (tmp.side_data_elems > 0) { for (int i = 0; i < tmp.side_data_elems; i++) { av_free(tmp.side_data[i].data); } av_freep(&tmp.side_data); tmp.side_data_elems = 0; } av_freep(&samples); av_frame_free(&frame); pkt.pts= av_rescale_q(c->coded_frame->pts, c->time_base, st->time_base); pkt.flags |= AV_PKT_FLAG_KEY; pkt.stream_index= st->index; pkt.data= audio_outbuf; /* write the compressed frame in the media file */ if (av_write_frame(oc, &pkt) != 0) { yCFatal(FFMPEGWRITER, "Error while writing audio frame"); } else { yCInfo(FFMPEGWRITER, "Wrote some audio"); } } static void write_audio_frame(AVFormatContext *oc, AVStream *st, Sound& snd) { yCInfo(FFMPEGWRITER, "Preparing to write audio (%d left over)", samples_at); AVCodecContext *c; int key = 1; c = st->codec; size_t at = 0; while (at<snd.getSamples()) { int avail = samples_size - samples_at; int remain = snd.getSamples() - at; int chan = snd.getChannels(); if (remain<avail) { avail = remain; } for (int i=0; i<avail; i++) { int offset = samples_at*samples_channels; for (int j=0; j<samples_channels; j++) { samples[offset+j] = snd.get(at,j%chan); } samples_at++; at++; } avail = samples_size - samples_at; if (avail==0) { AVPacket pkt; av_init_packet(&pkt); AVFrame *frame; void *samples; make_audio_frame(c,frame,samples); AVPacket tmp; int got_packet = 0; av_init_packet(&tmp); tmp.data = audio_outbuf; tmp.size = audio_outbuf_size; pkt.size = avcodec_encode_audio2(c, &tmp, frame, &got_packet); if (tmp.side_data_elems > 0) { for (int i = 0; i < tmp.side_data_elems; i++) { av_free(tmp.side_data[i].data); } av_freep(&tmp.side_data); tmp.side_data_elems = 0; } av_freep(&samples); av_frame_free(&frame); pkt.pts= av_rescale_q(c->coded_frame->pts, c->time_base, st->time_base); pkt.dts = pkt.pts; yCTrace(FFMPEGWRITER, "(%d)", pkt.size); if (key) { pkt.flags |= AV_PKT_FLAG_KEY; key = 0; } pkt.stream_index= st->index; pkt.data = audio_outbuf; pkt.duration = 0; /* write the compressed frame in the media file */ if (av_write_frame(oc, &pkt) != 0) { yCFatal(FFMPEGWRITER, "Error while writing audio frame"); } samples_at = 0; } } yCInfo(FFMPEGWRITER, " wrote audio\n"); } static void close_audio(AVFormatContext *oc, AVStream *st) { avcodec_close(st->codec); av_free(samples); av_free(audio_outbuf); } /**************************************************************/ /* video output */ /* add a video output stream */ static AVStream *add_video_stream(AVFormatContext *oc, AVCodecID codec_id, int w, int h, int framerate) { AVCodecContext *c; AVStream *st; st = avformat_new_stream(oc, NULL); if (!st) { yCFatal(FFMPEGWRITER, "Could not alloc stream"); } c = st->codec; c->codec_id = codec_id; c->codec_type = AVMEDIA_TYPE_VIDEO; /* put sample parameters */ c->bit_rate = 400000; /* resolution must be a multiple of two */ c->width = w; c->height = h; /* time base: this is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented. for fixed-fps content, timebase should be 1/framerate and timestamp increments should be identically 1. */ c->time_base.den = framerate; c->time_base.num = 1; c->gop_size = 12; /* emit one intra frame every twelve frames at most */ c->pix_fmt = STREAM_PIX_FMT; if (c->codec_id == AV_CODEC_ID_MPEG2VIDEO) { /* just for testing, we also add B frames */ c->max_b_frames = 2; } if (c->codec_id == AV_CODEC_ID_MPEG1VIDEO){ /* needed to avoid using macroblocks in which some coeffs overflow this doesnt happen with normal video, it just happens here as the motion of the chroma plane doesnt match the luma plane */ c->mb_decision=2; } // some formats want stream headers to be separate if(!strcmp(oc->oformat->name, "mp4") || !strcmp(oc->oformat->name, "mov") || !strcmp(oc->oformat->name, "3gp")) c->flags |= AV_CODEC_FLAG_GLOBAL_HEADER; return st; } static AVFrame *alloc_picture(int pix_fmt, int width, int height) { AVFrame *picture; uint8_t *picture_buf; int size; picture = av_frame_alloc(); if (!picture) return nullptr; size = avpicture_get_size((AVPixelFormat)pix_fmt, width, height); picture_buf = (uint8_t*)av_malloc(size); if (!picture_buf) { av_free(picture); return nullptr; } avpicture_fill((AVPicture *)picture, picture_buf, (AVPixelFormat)pix_fmt, width, height); return picture; } void FfmpegWriter::open_video(AVFormatContext *oc, AVStream *st) { yCInfo(FFMPEGWRITER, "Opening video stream"); AVCodec *codec; AVCodecContext *c; c = st->codec; /* find the video encoder */ codec = avcodec_find_encoder(c->codec_id); if (!codec) { yCFatal(FFMPEGWRITER, "Video codec not found"); } /* open the codec */ if (avcodec_open2(c, codec, nullptr) < 0) { yCFatal(FFMPEGWRITER, "Could not open codec"); } video_outbuf = nullptr; /* allocate output buffer */ /* XXX: API change will be done */ /* buffers passed into lav* can be allocated any way you prefer, as long as they're aligned enough for the architecture, and they're freed appropriately (such as using av_free for buffers allocated with av_malloc) */ video_outbuf_size = 200000; video_outbuf = (uint8_t*)av_malloc(video_outbuf_size); /* allocate the encoded raw picture */ picture = alloc_picture(c->pix_fmt, c->width, c->height); if (!picture) { yCFatal(FFMPEGWRITER, "Could not allocate picture"); } /* if the output format is not YUV420P, then a temporary YUV420P picture is needed too. It is then converted to the required output format */ tmp_picture = nullptr; if (c->pix_fmt != AV_PIX_FMT_RGB24) { tmp_picture = alloc_picture(AV_PIX_FMT_RGB24, c->width, c->height); if (!tmp_picture) { yCFatal(FFMPEGWRITER, "Could not allocate temporary picture"); } } } static void fill_rgb_image(AVFrame *pict, int frame_index, int width, int height, ImageOf<PixelRgb>& img) { int x, y; for(y=0;y<height;y++) { for(x=0;x<width;x++) { int base = y*(width*3); pict->data[0][base + x*3] = img.safePixel(x,y).r; pict->data[0][base +x*3+1] = img.safePixel(x,y).g; pict->data[0][base +x*3+2] = img.safePixel(x,y).b; } } } void FfmpegWriter::write_video_frame(AVFormatContext *oc, AVStream *st, ImageOf<PixelRgb>& img) { int out_size, ret; AVCodecContext *c; c = st->codec; if (c->pix_fmt != AV_PIX_FMT_RGB24) { fill_rgb_image(tmp_picture, frame_count, c->width, c->height, img); stable_img_convert((AVPicture *)picture, c->pix_fmt, (AVPicture *)tmp_picture, AV_PIX_FMT_RGB24, c->width, c->height); } else { fill_rgb_image(picture, frame_count, c->width, c->height, img); } /* encode the image */ AVPacket tmp; int got_packet = 0; av_init_packet(&tmp); tmp.data = video_outbuf; tmp.size = video_outbuf_size; out_size = avcodec_encode_video2(c, &tmp, picture, &got_packet); if (tmp.side_data_elems > 0) { for (int i = 0; i < tmp.side_data_elems; i++) { av_free(tmp.side_data[i].data); } av_freep(&tmp.side_data); tmp.side_data_elems = 0; } /* if zero size, it means the image was buffered */ if (out_size > 0) { AVPacket pkt; av_init_packet(&pkt); pkt.pts= av_rescale_q(c->coded_frame->pts, c->time_base, st->time_base); if(c->coded_frame->key_frame) pkt.flags |= AV_PKT_FLAG_KEY; pkt.stream_index= st->index; pkt.data= video_outbuf; pkt.size= out_size; /* static int x = 0; yCInfo(FFMPEGWRITER, "%ld / %ld : %ld / %ld --> %d\n", (long int) c->time_base.num, (long int) c->time_base.den, (long int) st->time_base.num, (long int) st->time_base.den, x); pkt.pts = x; x++; */ /* write the compressed frame in the media file */ ret = av_write_frame(oc, &pkt); } else { ret = 0; } if (ret != 0) { yCFatal(FFMPEGWRITER, "Error while writing video frame"); } frame_count++; } void FfmpegWriter::close_video(AVFormatContext *oc, AVStream *st) { avcodec_close(st->codec); av_free(picture->data[0]); av_free(picture); if (tmp_picture) { av_free(tmp_picture->data[0]); av_free(tmp_picture); } av_free(video_outbuf); } /**************************************************************/ /* YARP adaptation */ bool FfmpegWriter::open(yarp::os::Searchable & config) { yCTrace(FFMPEGWRITER, "ffmpeg libavcodec version number %d.%d.%d", LIBAVCODEC_VERSION_MAJOR, LIBAVCODEC_VERSION_MINOR, LIBAVCODEC_VERSION_MICRO); ready = false; savedConfig.fromString(config.toString()); // open if possible, if not will do it later return delayedOpen(config); } bool FfmpegWriter::delayedOpen(yarp::os::Searchable & config) { yCTrace(FFMPEGWRITER, "DELAYED OPEN %s", config.toString().c_str()); int w = config.check("width",Value(0), "width of image (must be even)").asInt32(); int h = config.check("height",Value(0), "height of image (must be even)").asInt32(); int framerate = config.check("framerate",Value(30), "baseline images per second").asInt32(); int sample_rate = 0; int channels = 0; bool audio = config.check("audio","should audio be included"); if (audio) { sample_rate = config.check("sample_rate",Value(44100), "audio samples per second").asInt32(); channels = config.check("channels",Value(1), "audio samples per second").asInt32(); } filename = config.check("out",Value("movie.avi"), "name of movie to write").asString(); delayed = false; if (w<=0||h<=0) { delayed = true; return true; } ready = true; /* initialize libavcodec, and register all codecs and formats */ av_register_all(); /* auto detect the output format from the name. default is mpeg. */ fmt = av_guess_format(nullptr, filename.c_str(), nullptr); if (!fmt) { yCInfo(FFMPEGWRITER, "Could not deduce output format from file extension: using MPEG."); fmt = av_guess_format("mpeg", nullptr, nullptr); } if (!fmt) { yCFatal(FFMPEGWRITER, "Could not find suitable output format"); } /* allocate the output media context */ oc = avformat_alloc_context(); if (!oc) { yCFatal(FFMPEGWRITER, "Memory error"); } oc->oformat = fmt; snprintf(oc->filename, sizeof(oc->filename), "%s", filename.c_str()); /* add the audio and video streams using the default format codecs and initialize the codecs */ video_st = nullptr; audio_st = nullptr; if (fmt->video_codec != AV_CODEC_ID_NONE) { video_st = add_video_stream(oc, fmt->video_codec, w, h, framerate); } if (audio) { yCInfo(FFMPEGWRITER, "Adding audio %dx%d", sample_rate, channels); if (fmt->audio_codec != AV_CODEC_ID_NONE) { audio_st = add_audio_stream(oc, fmt->audio_codec); if (audio_st!=nullptr) { AVCodecContext *c = audio_st->codec; c->sample_rate = sample_rate; c->channels = channels; } else { yCError(FFMPEGWRITER, "Failed to add audio"); } } else { yCWarning(FFMPEGWRITER, "No audio codec available"); } } else { yCInfo(FFMPEGWRITER, "Skipping audio"); } av_dump_format(oc, 0, filename.c_str(), 1); /* now that all the parameters are set, we can open the audio and video codecs and allocate the necessary encode buffers */ if (video_st) { open_video(oc, video_st); } if (audio_st) { open_audio(oc, audio_st); } /* open the output file, if needed */ if (!(fmt->flags & AVFMT_NOFILE)) { if (avio_open(&oc->pb, filename.c_str(), AVIO_FLAG_WRITE) < 0) { yCFatal(FFMPEGWRITER, "Could not open '%s'", filename.c_str()); } } /* write the stream header, if any */ avformat_write_header(oc, NULL); return true; } bool FfmpegWriter::close() { if (!isOk()) { return false; } /* close each codec */ if (video_st) close_video(oc, video_st); if (audio_st) close_audio(oc, audio_st); /* write the trailer, if any */ av_write_trailer(oc); /* free the streams */ for(unsigned int i = 0; i < oc->nb_streams; i++) { av_freep(&oc->streams[i]->codec); av_freep(&oc->streams[i]); } if (!(fmt->flags & AVFMT_NOFILE)) { /* close the output file */ avio_close(oc->pb); } /* free the stream */ av_free(oc); yCInfo(FFMPEGWRITER, "Closed media file %s", filename.c_str()); return true; } bool FfmpegWriter::putImage(yarp::sig::ImageOf<yarp::sig::PixelRgb> & image) { if (delayed) { savedConfig.put("width",Value((int)image.width())); savedConfig.put("height",Value((int)image.height())); } if (!isOk()) { return false; } /* compute current audio and video time */ if (audio_st) audio_pts = (double)av_stream_get_end_pts(audio_st) * audio_st->time_base.num / audio_st->time_base.den; else audio_pts = 0.0; if (video_st) video_pts = (double)av_stream_get_end_pts(video_st) * video_st->time_base.num / video_st->time_base.den; else video_pts = 0.0; if (!(audio_st||video_st)) return false; /* write interleaved audio and video frames */ if (!video_st || (video_st && audio_st && audio_pts < video_pts)) { write_audio_frame(oc, audio_st); } else { write_video_frame(oc, video_st, image); } return true; } bool FfmpegWriter::putAudioVisual(yarp::sig::ImageOf<yarp::sig::PixelRgb>& image, yarp::sig::Sound& sound) { if (delayed) { savedConfig.put("width",Value((int)image.width())); savedConfig.put("height",Value((int)image.height())); savedConfig.put("sample_rate",Value((int)sound.getFrequency())); savedConfig.put("channels",Value((int)sound.getChannels())); savedConfig.put("audio",Value(1)); } if (!isOk()) { return false; } /* write interleaved audio and video frames */ write_video_frame(oc, video_st, image); write_audio_frame(oc, audio_st, sound); return true; }
{ "pile_set_name": "Github" }
<!DOCTYPE html> <html lang="en" ng-app="jpm"> <head> <meta charset="utf-8" /> <meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1" /> <meta name="viewport" content="width=device-width, initial-scale=1" /> <link href="/releases/3.5.0/css/style.css" rel="stylesheet" /> <script src="https://code.jquery.com/jquery-3.4.1.min.js"></script> <script src="/js/releases.js"></script> <!-- Begin Jekyll SEO tag v2.5.0 --> <title>Bnd Gradle Plugins</title> <meta name="generator" content="Jekyll v3.8.5" /> <meta property="og:title" content="Bnd Gradle Plugins" /> <meta property="og:locale" content="en_US" /> <meta name="description" content="See the documentation on GitHub for details on how to configure and use the Bnd Gradle plugins." /> <meta property="og:description" content="See the documentation on GitHub for details on how to configure and use the Bnd Gradle plugins." /> <script type="application/ld+json"> {"@type":"WebPage","url":"/releases/3.5.0/tools/bnd-gradle.html","headline":"Bnd Gradle Plugins","description":"See the documentation on GitHub for details on how to configure and use the Bnd Gradle plugins.","@context":"http://schema.org"}</script> <!-- End Jekyll SEO tag --> </head> <body> <ul class="container12 menu-bar"> <li span=11><a class=menu-link href="/releases/3.5.0/"><img class=menu-logo src='/releases/3.5.0/img/bnd-80x40-white.png'></a> <a href="/releases/3.5.0/chapters/110-introduction.html">Intro </a><a href="/releases/3.5.0/chapters/800-headers.html">Headers </a><a href="/releases/3.5.0/chapters/820-instructions.html">Instructions </a><a href="/releases/3.5.0/chapters/850-macros.html">Macros </a><div class="releases"><button class="dropbtn">3.5.0</button><div class="dropdown-content"></div></div> <li class=menu-link span=1> <a href="https://github.com/bndtools/bnd" target="_"><img style="position:absolute;top:0;right:0;margin:0;padding:0;z-index:100" src="https://camo.githubusercontent.com/38ef81f8aca64bb9a64448d0d70f1308ef5341ab/68747470733a2f2f73332e616d617a6f6e6177732e636f6d2f6769746875622f726962626f6e732f666f726b6d655f72696768745f6461726b626c75655f3132313632312e706e67" alt="Fork me on GitHub" data-canonical-src="https://s3.amazonaws.com/github/ribbons/forkme_right_darkblue_121621.png"></a> </ul> <ul class=container12> <li span=3> <div> <ul class="side-nav"> <li><a href="/releases/3.5.0/chapters/100-release.html">Release</a> <li><a href="/releases/3.5.0/chapters/110-introduction.html">Introduction</a> <li><a href="/releases/3.5.0/chapters/120-install.html">How to install bnd</a> <li><a href="/releases/3.5.0/chapters/123-tour-workspace.html">Guided Tour</a> <li><a href="/releases/3.5.0/chapters/125-tour-features.html">Guided Tour Workspace & Projects</a> <li><a href="/releases/3.5.0/chapters/130-concepts.html">Concepts</a> <li><a href="/releases/3.5.0/chapters/140-best-practices.html">Best practices</a> <li><a href="/releases/3.5.0/chapters/150-build.html">Build</a> <li><a href="/releases/3.5.0/chapters/160-jars.html">Generating JARs</a> <li><a href="/releases/3.5.0/chapters/170-versioning.html">Versioning</a> <li><a href="/releases/3.5.0/chapters/180-baselining.html">Baselining</a> <li><a href="/releases/3.5.0/chapters/200-components.html">Service Components</a> <li><a href="/releases/3.5.0/chapters/210-metatype.html">Metatype</a> <li><a href="/releases/3.5.0/chapters/220-contracts.html">Contracts</a> <li><a href="/releases/3.5.0/chapters/230-manifest-annotations.html">Manifest Annotations</a> <li><a href="/releases/3.5.0/chapters/250-resolving.html">Resolving Dependencies</a> <li><a href="/releases/3.5.0/chapters/300-launching.html">Launching</a> <li><a href="/releases/3.5.0/chapters/310-testing.html">Testing</a> <li><a href="/releases/3.5.0/chapters/320-packaging.html">Packaging Applications</a> <li><a href="/releases/3.5.0/chapters/390-wrapping.html">Wrapping Libraries to OSGi Bundles</a> <li><a href="/releases/3.5.0/chapters/400-commandline.html">From the command line</a> <li><a href="/releases/3.5.0/chapters/600-developer.html">For Developers</a> <li><a href="/releases/3.5.0/chapters/610-plugin.html">Plugins</a> <li><a href="/releases/3.5.0/chapters/700-tools.html">Tools bound to bnd</a> <li><a href="/releases/3.5.0/chapters/790-format.html">File Format</a> <li><a href="/releases/3.5.0/chapters/800-headers.html">Header Reference</a> <li><a href="/releases/3.5.0/chapters/820-instructions.html">Instruction</a> <li><a href="/releases/3.5.0/chapters/825-instructions-ref.html">Instruction Index</a> <li><a href="/releases/3.5.0/chapters/850-macros.html">Macro Reference</a> <li><a href="/releases/3.5.0/chapters/860-commands.html">Command Reference</a> <li><a href="/releases/3.5.0/chapters/870-plugins.html">Plugins Reference</a> <li><a href="/releases/3.5.0/chapters/880-settings.html">Settings</a> <li><a href="/releases/3.5.0/chapters/900-errors.html">Errors</a> <li><a href="/releases/3.5.0/chapters/910-warnings.html">Warnings</a> <li><a href="/releases/3.5.0/chapters/920-faq.html">Frequently Asked Questions</a> </ul> <div class=enroute><a href="http://enroute.osgi.org">Supported by OSGi enRoute <img src="/releases/3.5.0/img/EnRouteIcon_CMYK.png"></a></div> </div> <li span=9> <div class=notes-margin> <h1> Bnd Gradle Plugins</h1> <p>See the <a href="https://github.com/bndtools/bnd/blob/master/biz.aQute.bnd.gradle/README.md">documentation on GitHub</a> for details on how to configure and use the Bnd Gradle plugins.</p> </div> </ul> <nav class=next-prev> <a href='/releases/3.5.0/tools/ant.html'></a> <a href='/releases/3.5.0/tools/bnd-maven.html'></a> </nav> <footer class="container12" style="border-top: 1px solid black;padding:10px 0"> <ul span=12 row> <li span=3> <ul> <li><a href="/releases/3.5.0/contact.html">Contact</a> </ul> <li span=3> <ul> <li><a href="/releases/3.5.0/">Developers</a> </ul> <li span=3> <ul> <li><a href="/releases/3.5.0/">More</a> </ul> </ul> </footer> </body> </html>
{ "pile_set_name": "Github" }
Actor RedBloodSplatterz : Inventory { inventory.maxamount 1 } Actor GreenBloodSplatterz : Inventory { inventory.maxamount 1 } Actor BlueBloodSplatterz : Inventory { inventory.maxamount 1 } Actor BloodSplasherz : Inventory { inventory.maxamount 1 } Actor WaterSplasherz : Inventory { inventory.maxamount 1 } Actor SlimeSplasherz : Inventory { inventory.maxamount 1 } Actor SludgeSplasherz : Inventory { inventory.maxamount 1 } Actor LavaSplasherz : Inventory { inventory.maxamount 1 } Actor BloodSplasherBlueBlood : Inventory { inventory.maxamount 1 } Actor SuperGoreSpawner { Projectile +RANDOMIZE +MISSILE +FORCEXYBILLBOARD // +BLOODSPLATTER +THRUACTORS Decal BloodSuper damage 0 radius 2 height 0 speed 10 renderstyle ADD alpha 0.9 scale .15 gravity 0.7 -NOGRAVITY states { Spawn: //TNT1 AAAAAAAA 3 A_SpawnItem("SuperGore") Stop Death: Stop XDeath: TNT1 A 0 Stop } } Actor SuperGoreSpawner2: SuperGoreSpawner { states { Spawn: TNT1 AAAAAAAAA 2 A_SpawnItem("SuperGore") Stop } } Actor UltraGoreSpawner: SuperGoreSpawner { speed 60 states { Spawn: TNT1 AAAAAAAAAAAAAA 1 A_SpawnItem("SuperGore") Stop } } actor SuperGore { Decal BloodSplat game Doom Alpha 0.45 +FORCEXYBILLBOARD +GHOST +NOBLOCKMAP Gravity 0.05 +DontSplash -EXPLODEONWATER -ALLOWPARTICLES +CLIENTSIDEONLY -NOGRAVITY +THRUACTORS Scale 0.57 states { Spawn: TNT1 A 0 TNT1 A 0 A_JumpIf(waterlevel > 1, "SpawnUnderwater") BLER FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGG 1 A_FadeOut(0.01) stop SpawnUnderwater: TNT1 A 0 TNT1 AA 0 A_CustomMissile ("Underblood3", 32, 0, random (0, 360), 2, random (0, 160)) Stop } } Actor SuperGoreMist: SuperGore { Scale 1.0 Speed 8 Gravity 0.3 } Actor SuperGoreMistSmall: SuperGore { Scale 0.35 Speed 2 Gravity 0.1 } Actor SuperGoreMistTiny: SuperGore { Scale 0.15 Speed 1 Gravity 0.1 } actor BlueSuperGore: SuperGore { Alpha 0.4 translation "168:191=192:207","16:47=240:247" States { SpawnUnderwater: TNT1 A 0 TNT1 AA 0 A_CustomMissile ("UnderbloodBlue3", 32, 0, random (0, 360), 2, random (0, 160)) Stop } } Actor BlueSuperGoreSpawner: SuperGoreSpawner { speed 20 states { Spawn: TNT1 AAAAAAAA 1 A_SpawnItem("BlueSuperGore") Stop } } actor GreenSuperGore: SuperGore { translation "168:191=112:127","16:47=120:127" Decal GreenBloodSplat States { SpawnUnderwater: TNT1 A 0 TNT1 AA 0 A_CustomMissile ("UnderbloodGreen3", 32, 0, random (0, 360), 2, random (0, 160)) Stop } } Actor GreenSuperGoreSpawner: SuperGoreSpawner { states { Spawn: TNT1 AAAA 1 A_SpawnItem("GreenSuperGore") Stop } } ACTOR StealthBloodLol { game Doom scale 1.1 speed 6 health 1 radius 8 height 1 Gravity 0.8 damage 0 Renderstyle Translucent Alpha 0.7 DamageType Blood Decal BloodSuper +MISSILE +CLIENTSIDEONLY +NOTELEPORT +NOBLOCKMAP +THRUACTORS +BLOODLESSIMPACT +FORCEXYBILLBOARD +NODAMAGETHRUST +MOVEWITHSECTOR +CORPSE -DONTSPLASH States { Spawn: TNT1 A 0 A_JumpIf(waterlevel > 1, "Splash") //XDT1 ABCD 4// A_SpawnItem ("BloodTr",0,0,0,1) TNT1 A 1 loop Splash: BLOD A 0 stop Death: //TNT1 A 0 A_CustomMissile ("SmallBloodSplasher", 0, 0, random (0, 360), 2, random (0, 160)) TNT1 A 0 A_PlaySound("blooddrop") TNT1 A 0 A_SpawnItem ("Brutal_Bloodspot", 5) XDT1 EFGHIJK 0 Stop } } actor BloodCloud { game Doom scale 1.4 mass 1 renderstyle Translucent alpha 0.9 Decal BloodSplat +LOWGRAVITY +NOTELEPORT +NOBLOCKMAP +NOCLIP +FORCEXYBILLBOARD +CLIENTSIDEONLY +DontSplash +MISSILE -NOGRAVITY Speed 2 states { Spawn: TNT1 A 0 A_JumpIf(waterlevel > 1, "Splash") //BTRL ABCD 4 BLOD ABCDDEEFFF 4 stop Death: TNT1 A 0 Stop Splash: BLOD A 0 stop } } ACTOR BloodSplasher2 { Game Doom damagefactor "Trample", 0.0 DamageType Blood Health 1 Radius 1 Height 1 Mass 1 +NOCLIP +NOGRAVITY +ACTIVATEMCROSS +WINDTHRUST +NODAMAGETHRUST +PIERCEARMOR +BLOODLESSIMPACT DeathSound "None" States { Spawn: BTRL A 1 //TNT1 A 0 A_Explode(3,200) Stop } } ACTOR BloodSplasher3: BloodSplasher2 { States { Spawn: BTRL A 1 //TNT1 A 0 A_Explode(3,600) Stop } } actor SmallSuperGore { Decal BloodSplat game Doom Alpha 0.5 +FORCEXYBILLBOARD +GHOST +NOBLOCKMAP +DontSplash -EXPLODEONWATER -ALLOWPARTICLES +CLIENTSIDEONLY +THRUACTORS +MISSILE +NOGRAVITY Speed 16 Scale 2 states { Spawn: TNT1 A 0 TNT1 A 0 A_JumpIf(waterlevel > 1, "SpawnUnderwater") BLOD EEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEE 1 A_FadeOut(0.03) stop SpawnUnderwater: TNT1 A 0 TNT1 AA 0 A_CustomMissile ("Underblood3", 32, 0, random (0, 360), 2, random (0, 160)) Stop } } actor SmallSuperGore2: SmallSuperGore { Alpha 0.3 Scale 0.6 Speed 1 } actor MediumSuperGore: SmallSuperGore { Scale 2.0 Speed 4 } actor GiantSuperGore: SmallSuperGore { Scale 4.0 Speed 8 } Actor GreenSuperGoreMist: GreenSuperGore { Scale 1.0 Speed 8 Gravity 0.3 } Actor GreenSuperGoreMistSmall: GreenSuperGore { Scale 0.35 Speed 2 Gravity 0.1 } Actor GreenSuperGoreMistTiny: GreenSuperGore { Scale 0.15 Speed 1 Gravity 0.1 } actor GreenSmallSuperGore : SmallSuperGore { Decal GreenBloodSplat Translation "168:191=112:127", "16:47=123:127" States { SpawnUnderwater: TNT1 A 0 TNT1 AA 0 A_CustomMissile ("UnderbloodGreen3", 32, 0, random (0, 360), 2, random (0, 160)) Stop } } actor GreenSmallSuperGore2: GreenSmallSuperGore { Alpha 0.3 Scale 0.6 Speed 1 Decal GreenBloodSplat } actor GreenMediumSuperGore: GreenSmallSuperGore { Scale 2.0 Speed 4 Decal GreenBloodSplat } actor GiantGreenSuperGore: GreenSmallSuperGore { Scale 4.0 Speed 8 Decal GreenBloodSplat }
{ "pile_set_name": "Github" }
/* * freeglut_internal.h * * The freeglut library private include file. * * Copyright (c) 1999-2000 Pawel W. Olszta. All Rights Reserved. * Written by Pawel W. Olszta, <[email protected]> * Creation date: Thu Dec 2 1999 * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included * in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * PAWEL W. OLSZTA BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #ifndef FREEGLUT_INTERNAL_H #define FREEGLUT_INTERNAL_H #ifdef HAVE_CONFIG_H # include "config.h" #endif /* XXX Update these for each release! */ #define VERSION_MAJOR 2 #define VERSION_MINOR 7 #define VERSION_PATCH 0 /* Freeglut is intended to function under all Unix/X11 and Win32 platforms. */ /* XXX: Don't all MS-Windows compilers (except Cygwin) have _WIN32 defined? * XXX: If so, remove the first set of defined()'s below. */ #if !defined(TARGET_HOST_POSIX_X11) && !defined(TARGET_HOST_MS_WINDOWS) && !defined(TARGET_HOST_MAC_OSX) && !defined(TARGET_HOST_SOLARIS) #if defined(_MSC_VER) || defined(__WATCOMC__) || defined(__MINGW32__) \ || defined(_WIN32) || defined(_WIN32_WCE) \ || ( defined(__CYGWIN__) && defined(X_DISPLAY_MISSING) ) # define TARGET_HOST_MS_WINDOWS 1 #elif defined(__posix__) || defined(__unix__) || defined(__linux__) || defined(__sun) # define TARGET_HOST_POSIX_X11 1 #elif defined(__APPLE__) /* This is a placeholder until we get native OSX support ironed out -- JFF 11/18/09 */ # define TARGET_HOST_POSIX_X11 1 /* # define TARGET_HOST_MAC_OSX 1 */ #else # error "Unrecognized target host!" #endif #endif /* Detect both SunPro and gcc compilers on Sun Solaris */ #if defined (__SVR4) && defined (__sun) # define TARGET_HOST_SOLARIS 1 #endif #ifndef TARGET_HOST_MS_WINDOWS # define TARGET_HOST_MS_WINDOWS 0 #endif #ifndef TARGET_HOST_POSIX_X11 # define TARGET_HOST_POSIX_X11 0 #endif #ifndef TARGET_HOST_MAC_OSX # define TARGET_HOST_MAC_OSX 0 #endif #ifndef TARGET_HOST_SOLARIS # define TARGET_HOST_SOLARIS 0 #endif /* -- FIXED CONFIGURATION LIMITS ------------------------------------------- */ #define FREEGLUT_MAX_MENUS 3 /* -- PLATFORM-SPECIFIC INCLUDES ------------------------------------------- */ /* All Win32 headers depend on the huge windows.h recursive include. * Note: Lower-case header names are used, for best cross-platform * compatibility. */ #if TARGET_HOST_MS_WINDOWS && !defined(_WIN32_WCE) # include <windows.h> # include <windowsx.h> # include <mmsystem.h> /* CYGWIN does not have tchar.h, but has TEXT(x), defined in winnt.h. */ # ifndef __CYGWIN__ # include <tchar.h> # else # define _TEXT(x) TEXT(x) # define _T(x) TEXT(x) # endif #elif TARGET_HOST_POSIX_X11 # include <GL/glx.h> # include <X11/Xlib.h> # include <X11/Xatom.h> # include <X11/keysym.h> # include <X11/extensions/XI.h> # ifdef HAVE_X11_EXTENSIONS_XF86VMODE_H # include <X11/extensions/xf86vmode.h> # endif # ifdef HAVE_X11_EXTENSIONS_XRANDR_H # include <X11/extensions/Xrandr.h> # endif /* If GLX is too old, we will fail during runtime when multisampling is requested, but at least freeglut compiles. */ # ifndef GLX_SAMPLE_BUFFERS # define GLX_SAMPLE_BUFFERS 0x80A8 # endif # ifndef GLX_SAMPLES # define GLX_SAMPLES 0x80A9 # endif #endif /* These files should be available on every platform. */ #include <stdio.h> #include <string.h> #include <math.h> #include <stdlib.h> #include <stdarg.h> /* These are included based on autoconf directives. */ #ifdef HAVE_SYS_TYPES_H # include <sys/types.h> #endif #include <unistd.h> #ifdef TIME_WITH_SYS_TIME # include <sys/time.h> # include <time.h> #elif defined(HAVE_SYS_TIME_H) # include <sys/time.h> #else # include <time.h> #endif /* -- AUTOCONF HACKS --------------------------------------------------------*/ /* XXX: Update autoconf to avoid these. * XXX: Are non-POSIX platforms intended not to use autoconf? * If so, perhaps there should be a config_guess.h for them. Alternatively, * config guesses could be placed above, just after the config.h exclusion. */ #if defined(__FreeBSD__) || defined(__NetBSD__) # define HAVE_USB_JS 1 # if defined(__NetBSD__) || ( defined(__FreeBSD__) && __FreeBSD_version >= 500000) # define HAVE_USBHID_H 1 # endif #endif #if TARGET_HOST_MS_WINDOWS # define HAVE_VFPRINTF 1 #endif /* MinGW may lack a prototype for ChangeDisplaySettingsEx() (depending on the version?) */ #if TARGET_HOST_MS_WINDOWS && !defined(ChangeDisplaySettingsEx) LONG WINAPI ChangeDisplaySettingsExA(LPCSTR,LPDEVMODEA,HWND,DWORD,LPVOID); LONG WINAPI ChangeDisplaySettingsExW(LPCWSTR,LPDEVMODEW,HWND,DWORD,LPVOID); # ifdef UNICODE # define ChangeDisplaySettingsEx ChangeDisplaySettingsExW # else # define ChangeDisplaySettingsEx ChangeDisplaySettingsExA # endif #endif #if defined(_MSC_VER) || defined(__WATCOMC__) /* strdup() is non-standard, for all but POSIX-2001 */ #define strdup _strdup #endif /* M_PI is non-standard (defined by BSD, not ISO-C) */ #ifndef M_PI # define M_PI 3.14159265358979323846 #endif #ifdef HAVE_STDBOOL_H # include <stdbool.h> # ifndef TRUE # define TRUE true # endif # ifndef FALSE # define FALSE false # endif #else # ifndef TRUE # define TRUE 1 # endif # ifndef FALSE # define FALSE 0 # endif #endif /* General defines */ #define INVALID_MODIFIERS 0xffffffff /* -- GLOBAL TYPE DEFINITIONS ---------------------------------------------- */ /* Freeglut callbacks type definitions */ typedef void (* FGCBDisplay )( void ); typedef void (* FGCBReshape )( int, int ); typedef void (* FGCBVisibility )( int ); typedef void (* FGCBKeyboard )( unsigned char, int, int ); typedef void (* FGCBSpecial )( int, int, int ); typedef void (* FGCBMouse )( int, int, int, int ); typedef void (* FGCBMouseWheel )( int, int, int, int ); typedef void (* FGCBMotion )( int, int ); typedef void (* FGCBPassive )( int, int ); typedef void (* FGCBEntry )( int ); typedef void (* FGCBWindowStatus )( int ); typedef void (* FGCBSelect )( int, int, int ); typedef void (* FGCBJoystick )( unsigned int, int, int, int ); typedef void (* FGCBKeyboardUp )( unsigned char, int, int ); typedef void (* FGCBSpecialUp )( int, int, int ); typedef void (* FGCBOverlayDisplay)( void ); typedef void (* FGCBSpaceMotion )( int, int, int ); typedef void (* FGCBSpaceRotation )( int, int, int ); typedef void (* FGCBSpaceButton )( int, int ); typedef void (* FGCBDials )( int, int ); typedef void (* FGCBButtonBox )( int, int ); typedef void (* FGCBTabletMotion )( int, int ); typedef void (* FGCBTabletButton )( int, int, int, int ); typedef void (* FGCBDestroy )( void ); typedef void (* FGCBMultiEntry )( int, int ); typedef void (* FGCBMultiButton )( int, int, int, int, int ); typedef void (* FGCBMultiMotion )( int, int, int ); typedef void (* FGCBMultiPassive )( int, int, int ); /* The global callbacks type definitions */ typedef void (* FGCBIdle )( void ); typedef void (* FGCBTimer )( int ); typedef void (* FGCBMenuState )( int ); typedef void (* FGCBMenuStatus )( int, int, int ); /* The callback used when creating/using menus */ typedef void (* FGCBMenu )( int ); /* The FreeGLUT error/warning handler type definition */ typedef void (* FGError ) ( const char *fmt, va_list ap); typedef void (* FGWarning ) ( const char *fmt, va_list ap); /* A list structure */ typedef struct tagSFG_List SFG_List; struct tagSFG_List { void *First; void *Last; }; /* A list node structure */ typedef struct tagSFG_Node SFG_Node; struct tagSFG_Node { void *Next; void *Prev; }; /* A helper structure holding two ints and a boolean */ typedef struct tagSFG_XYUse SFG_XYUse; struct tagSFG_XYUse { GLint X, Y; /* The two integers... */ GLboolean Use; /* ...and a single boolean. */ }; /* * An enumeration containing the state of the GLUT execution: * initializing, running, or stopping */ typedef enum { GLUT_EXEC_STATE_INIT, GLUT_EXEC_STATE_RUNNING, GLUT_EXEC_STATE_STOP } fgExecutionState ; /* This structure holds different freeglut settings */ typedef struct tagSFG_State SFG_State; struct tagSFG_State { SFG_XYUse Position; /* The default windows' position */ SFG_XYUse Size; /* The default windows' size */ unsigned int DisplayMode; /* Display mode for new windows */ GLboolean Initialised; /* freeglut has been initialised */ int DirectContext; /* Direct rendering state */ GLboolean ForceIconic; /* New top windows are iconified */ GLboolean UseCurrentContext; /* New windows share with current */ GLboolean GLDebugSwitch; /* OpenGL state debugging switch */ GLboolean XSyncSwitch; /* X11 sync protocol switch */ int KeyRepeat; /* Global key repeat mode. */ int Modifiers; /* Current ALT/SHIFT/CTRL state */ GLuint FPSInterval; /* Interval between FPS printfs */ GLuint SwapCount; /* Count of glutSwapBuffer calls */ GLuint SwapTime; /* Time of last SwapBuffers */ unsigned long Time; /* Time that glutInit was called */ SFG_List Timers; /* The freeglut timer hooks */ SFG_List FreeTimers; /* The unused timer hooks */ FGCBIdle IdleCallback; /* The global idle callback */ int ActiveMenus; /* Num. of currently active menus */ FGCBMenuState MenuStateCallback; /* Menu callbacks are global */ FGCBMenuStatus MenuStatusCallback; SFG_XYUse GameModeSize; /* Game mode screen's dimensions */ int GameModeDepth; /* The pixel depth for game mode */ int GameModeRefresh; /* The refresh rate for game mode */ int ActionOnWindowClose; /* Action when user closes window */ fgExecutionState ExecState; /* Used for GLUT termination */ char *ProgramName; /* Name of the invoking program */ GLboolean JoysticksInitialised; /* Only initialize if application calls for them */ int NumActiveJoysticks; /* Number of active joysticks -- if zero, don't poll joysticks */ GLboolean InputDevsInitialised; /* Only initialize if application calls for them */ int MouseWheelTicks; /* Number of ticks the mouse wheel has turned */ int AuxiliaryBufferNumber; /* Number of auxiliary buffers */ int SampleNumber; /* Number of samples per pixel */ int MajorVersion; /* Major OpenGL context version */ int MinorVersion; /* Minor OpenGL context version */ int ContextFlags; /* OpenGL context flags */ int ContextProfile; /* OpenGL context profile */ FGError ErrorFunc; /* User defined error handler */ FGWarning WarningFunc; /* User defined warning handler */ }; /* The structure used by display initialization in freeglut_init.c */ typedef struct tagSFG_Display SFG_Display; struct tagSFG_Display { #if TARGET_HOST_POSIX_X11 Display* Display; /* The display we are being run in. */ int Screen; /* The screen we are about to use. */ Window RootWindow; /* The screen's root window. */ int Connection; /* The display's connection number */ Atom DeleteWindow; /* The window deletion atom */ Atom State; /* The state atom */ Atom StateFullScreen; /* The full screen atom */ #ifdef HAVE_X11_EXTENSIONS_XRANDR_H int prev_xsz, prev_ysz; int prev_refresh; int prev_size_valid; #endif /* HAVE_X11_EXTENSIONS_XRANDR_H */ #ifdef HAVE_X11_EXTENSIONS_XF86VMODE_H /* * XF86VidMode may be compilable even if it fails at runtime. Therefore, * the validity of the VidMode has to be tracked */ int DisplayModeValid; /* Flag that indicates runtime status*/ XF86VidModeModeLine DisplayMode; /* Current screen's display settings */ int DisplayModeClock; /* The display mode's refresh rate */ int DisplayViewPortX; /* saved X location of the viewport */ int DisplayViewPortY; /* saved Y location of the viewport */ #endif /* HAVE_X11_EXTENSIONS_XF86VMODE_H */ int DisplayPointerX; /* saved X location of the pointer */ int DisplayPointerY; /* saved Y location of the pointer */ #elif TARGET_HOST_MS_WINDOWS HINSTANCE Instance; /* The application's instance */ DEVMODE DisplayMode; /* Desktop's display settings */ char *DisplayName; /* Display name for multi display support*/ #endif int ScreenWidth; /* The screen's width in pixels */ int ScreenHeight; /* The screen's height in pixels */ int ScreenWidthMM; /* The screen's width in milimeters */ int ScreenHeightMM; /* The screen's height in milimeters */ }; /* The user can create any number of timer hooks */ typedef struct tagSFG_Timer SFG_Timer; struct tagSFG_Timer { SFG_Node Node; int ID; /* The timer ID integer */ FGCBTimer Callback; /* The timer callback */ long TriggerTime; /* The timer trigger time */ }; /* * Make "freeglut" window handle and context types so that we don't need so * much conditionally-compiled code later in the library. */ #if TARGET_HOST_POSIX_X11 typedef Window SFG_WindowHandleType ; typedef GLXContext SFG_WindowContextType ; #elif TARGET_HOST_MS_WINDOWS typedef HWND SFG_WindowHandleType ; typedef HGLRC SFG_WindowContextType ; #endif /* * A window and its OpenGL context. The contents of this structure * are highly dependant on the target operating system we aim at... */ typedef struct tagSFG_Context SFG_Context; struct tagSFG_Context { SFG_WindowHandleType Handle; /* The window's handle */ SFG_WindowContextType Context; /* The window's OpenGL/WGL context */ #if TARGET_HOST_POSIX_X11 GLXFBConfig* FBConfig; /* The window's FBConfig */ #elif TARGET_HOST_MS_WINDOWS HDC Device; /* The window's device context */ #endif int DoubleBuffered; /* Treat the window as double-buffered */ }; /* Window's state description. This structure should be kept portable. */ typedef struct tagSFG_WindowState SFG_WindowState; struct tagSFG_WindowState { /* Note that on Windows, sizes always refer to the client area, thus without the window decorations */ int Width; /* Window's width in pixels */ int Height; /* The same about the height */ #if TARGET_HOST_POSIX_X11 int OldWidth; /* Window width from before a resize */ int OldHeight; /* " height " " " " */ #elif TARGET_HOST_MS_WINDOWS RECT OldRect; /* window rect - stored before the window is made fullscreen */ DWORD OldStyle; /* window style - stored before the window is made fullscreen */ #endif GLboolean Redisplay; /* Do we have to redisplay? */ GLboolean Visible; /* Is the window visible now */ int Cursor; /* The currently selected cursor */ long JoystickPollRate; /* The joystick polling rate */ long JoystickLastPoll; /* When the last poll happened */ int MouseX, MouseY; /* The most recent mouse position */ GLboolean IgnoreKeyRepeat; /* Whether to ignore key repeat. */ GLboolean KeyRepeating; /* Currently in repeat mode */ GLboolean NeedToResize; /* Do we need to resize the window? */ GLboolean IsFullscreen; /* is the window fullscreen? */ }; /* * A generic function pointer. We should really use the GLUTproc type * defined in freeglut_ext.h, but if we include that header in this file * a bunch of other stuff (font-related) blows up! */ typedef void (*SFG_Proc)(); /* * SET_WCB() is used as: * * SET_WCB( window, cbname, func ); * * ...where {window} is the freeglut window to set the callback, * {cbname} is the window-specific callback to set, * {func} is a function-pointer. * * Originally, {FETCH_WCB( ... ) = func} was rather sloppily used, * but this can cause warnings because the FETCH_WCB() macro type- * casts its result, and a type-cast value shouldn't be an lvalue. * * The {if( FETCH_WCB( ... ) != func )} test is to do type-checking * and for no other reason. Since it's hidden in the macro, the * ugliness is felt to be rather benign. */ #define SET_WCB(window,cbname,func) \ do \ { \ if( FETCH_WCB( window, cbname ) != (SFG_Proc)(func) ) \ (((window).CallBacks[CB_ ## cbname]) = (SFG_Proc)(func)); \ } while( 0 ) /* * FETCH_WCB() is used as: * * FETCH_WCB( window, cbname ); * * ...where {window} is the freeglut window to fetch the callback from, * {cbname} is the window-specific callback to fetch. * * The result is correctly type-cast to the callback function pointer * type. */ #define FETCH_WCB(window,cbname) \ ((window).CallBacks[CB_ ## cbname]) /* * INVOKE_WCB() is used as: * * INVOKE_WCB( window, cbname, ( arg_list ) ); * * ...where {window} is the freeglut window, * {cbname} is the window-specific callback to be invoked, * {(arg_list)} is the parameter list. * * The callback is invoked as: * * callback( arg_list ); * * ...so the parentheses are REQUIRED in the {arg_list}. * * NOTE that it does a sanity-check and also sets the * current window. * */ #if TARGET_HOST_MS_WINDOWS && !defined(_WIN32_WCE) /* FIXME: also WinCE? */ #define INVOKE_WCB(window,cbname,arg_list) \ do \ { \ if( FETCH_WCB( window, cbname ) ) \ { \ FGCB ## cbname func = (FGCB ## cbname)(FETCH_WCB( window, cbname )); \ fgSetWindow( &window ); \ func arg_list; \ } \ } while( 0 ) #else #define INVOKE_WCB(window,cbname,arg_list) \ do \ { \ if( FETCH_WCB( window, cbname ) ) \ { \ fgSetWindow( &window ); \ ((FGCB ## cbname)FETCH_WCB( window, cbname )) arg_list; \ } \ } while( 0 ) #endif /* * The window callbacks the user can supply us with. Should be kept portable. * * This enumeration provides the freeglut CallBack numbers. * The symbolic constants are indices into a window's array of * function callbacks. The names are formed by splicing a common * prefix onto the callback's base name. (This was originally * done so that an early stage of development could live side-by- * side with the old callback code. The old callback code used * the bare callback's name as a structure member, so I used a * prefix for the array index name.) * * XXX For consistancy, perhaps the prefix should match the * XXX FETCH* and INVOKE* macro suffices. I.e., WCB_, rather than * XXX CB_. */ enum { CB_Display, CB_Reshape, CB_Keyboard, CB_KeyboardUp, CB_Special, CB_SpecialUp, CB_Mouse, CB_MouseWheel, CB_Motion, CB_Passive, CB_Entry, CB_Visibility, CB_WindowStatus, CB_Joystick, CB_Destroy, /* MPX-related */ CB_MultiEntry, CB_MultiButton, CB_MultiMotion, CB_MultiPassive, /* Presently ignored */ CB_Select, CB_OverlayDisplay, CB_SpaceMotion, /* presently implemented only on UNIX/X11 */ CB_SpaceRotation, /* presently implemented only on UNIX/X11 */ CB_SpaceButton, /* presently implemented only on UNIX/X11 */ CB_Dials, CB_ButtonBox, CB_TabletMotion, CB_TabletButton, /* Always make this the LAST one */ TOTAL_CALLBACKS }; /* This structure holds the OpenGL rendering context for all the menu windows */ typedef struct tagSFG_MenuContext SFG_MenuContext; struct tagSFG_MenuContext { SFG_WindowContextType MContext; /* The menu window's WGL context */ }; /* This structure describes a menu */ typedef struct tagSFG_Window SFG_Window; typedef struct tagSFG_MenuEntry SFG_MenuEntry; typedef struct tagSFG_Menu SFG_Menu; struct tagSFG_Menu { SFG_Node Node; void *UserData; /* User data passed back at callback */ int ID; /* The global menu ID */ SFG_List Entries; /* The menu entries list */ FGCBMenu Callback; /* The menu callback */ FGCBDestroy Destroy; /* Destruction callback */ GLboolean IsActive; /* Is the menu selected? */ int Width; /* Menu box width in pixels */ int Height; /* Menu box height in pixels */ int X, Y; /* Menu box raster position */ SFG_MenuEntry *ActiveEntry; /* Currently active entry in the menu */ SFG_Window *Window; /* Window for menu */ SFG_Window *ParentWindow; /* Window in which the menu is invoked */ }; /* This is a menu entry */ struct tagSFG_MenuEntry { SFG_Node Node; int ID; /* The menu entry ID (local) */ int Ordinal; /* The menu's ordinal number */ char* Text; /* The text to be displayed */ SFG_Menu* SubMenu; /* Optional sub-menu tree */ GLboolean IsActive; /* Is the entry highlighted? */ int Width; /* Label's width in pixels */ }; /* * A window, making part of freeglut windows hierarchy. * Should be kept portable. * * NOTE that ActiveMenu is set to menu itself if the window is a menu. */ struct tagSFG_Window { SFG_Node Node; int ID; /* Window's ID number */ SFG_Context Window; /* Window and OpenGL context */ SFG_WindowState State; /* The window state */ SFG_Proc CallBacks[ TOTAL_CALLBACKS ]; /* Array of window callbacks */ void *UserData ; /* For use by user */ SFG_Menu* Menu[ FREEGLUT_MAX_MENUS ]; /* Menus appended to window */ SFG_Menu* ActiveMenu; /* The window's active menu */ SFG_Window* Parent; /* The parent to this window */ SFG_List Children; /* The subwindows d.l. list */ GLboolean IsMenu; /* Set to 1 if we are a menu */ }; /* A linked list structure of windows */ typedef struct tagSFG_WindowList SFG_WindowList ; struct tagSFG_WindowList { SFG_Node node; SFG_Window *window ; }; /* This holds information about all the windows, menus etc. */ typedef struct tagSFG_Structure SFG_Structure; struct tagSFG_Structure { SFG_List Windows; /* The global windows list */ SFG_List Menus; /* The global menus list */ SFG_List WindowsToDestroy; SFG_Window* CurrentWindow; /* The currently set window */ SFG_Menu* CurrentMenu; /* Same, but menu... */ SFG_MenuContext* MenuContext; /* OpenGL rendering context for menus */ SFG_Window* GameModeWindow; /* The game mode window */ int WindowID; /* The new current window ID */ int MenuID; /* The new current menu ID */ }; /* * This structure is used for the enumeration purposes. * You can easily extend its functionalities by declaring * a structure containing enumerator's contents and custom * data, then casting its pointer to (SFG_Enumerator *). */ typedef struct tagSFG_Enumerator SFG_Enumerator; struct tagSFG_Enumerator { GLboolean found; /* Used to terminate search */ void* data; /* Custom data pointer */ }; typedef void (* FGCBenumerator )( SFG_Window *, SFG_Enumerator * ); /* The bitmap font structure */ typedef struct tagSFG_Font SFG_Font; struct tagSFG_Font { char* Name; /* The source font name */ int Quantity; /* Number of chars in font */ int Height; /* Height of the characters */ const GLubyte** Characters; /* The characters mapping */ float xorig, yorig; /* Relative origin of the character */ }; /* The stroke font structures */ typedef struct tagSFG_StrokeVertex SFG_StrokeVertex; struct tagSFG_StrokeVertex { GLfloat X, Y; }; typedef struct tagSFG_StrokeStrip SFG_StrokeStrip; struct tagSFG_StrokeStrip { int Number; const SFG_StrokeVertex* Vertices; }; typedef struct tagSFG_StrokeChar SFG_StrokeChar; struct tagSFG_StrokeChar { GLfloat Right; int Number; const SFG_StrokeStrip* Strips; }; typedef struct tagSFG_StrokeFont SFG_StrokeFont; struct tagSFG_StrokeFont { char* Name; /* The source font name */ int Quantity; /* Number of chars in font */ GLfloat Height; /* Height of the characters */ const SFG_StrokeChar** Characters; /* The characters mapping */ }; /* -- GLOBAL VARIABLES EXPORTS --------------------------------------------- */ /* Freeglut display related stuff (initialized once per session) */ extern SFG_Display fgDisplay; /* Freeglut internal structure */ extern SFG_Structure fgStructure; /* The current freeglut settings */ extern SFG_State fgState; /* -- PRIVATE FUNCTION DECLARATIONS ---------------------------------------- */ /* * A call to this function makes us sure that the Display and Structure * subsystems have been properly initialized and are ready to be used */ #define FREEGLUT_EXIT_IF_NOT_INITIALISED( string ) \ if ( ! fgState.Initialised ) \ { \ fgError ( " ERROR: Function <%s> called" \ " without first calling 'glutInit'.", (string) ) ; \ } #define FREEGLUT_INTERNAL_ERROR_EXIT_IF_NOT_INITIALISED( string ) \ if ( ! fgState.Initialised ) \ { \ fgError ( " ERROR: Internal <%s> function called" \ " without first calling 'glutInit'.", (string) ) ; \ } #define FREEGLUT_INTERNAL_ERROR_EXIT( cond, string, function ) \ if ( ! ( cond ) ) \ { \ fgError ( " ERROR: Internal error <%s> in function %s", \ (string), (function) ) ; \ } /* * Following definitions are somewhat similiar to GLib's, * but do not generate any log messages: */ #define freeglut_return_if_fail( expr ) \ if( !(expr) ) \ return; #define freeglut_return_val_if_fail( expr, val ) \ if( !(expr) ) \ return val ; /* * A call to those macros assures us that there is a current * window set, respectively: */ #define FREEGLUT_EXIT_IF_NO_WINDOW( string ) \ if ( ! fgStructure.CurrentWindow && \ ( fgState.ActionOnWindowClose != GLUT_ACTION_CONTINUE_EXECUTION ) ) \ { \ fgError ( " ERROR: Function <%s> called" \ " with no current window defined.", (string) ) ; \ } /* * The deinitialize function gets called on glutMainLoop() end. It should clean up * everything inside of the freeglut */ void fgDeinitialize( void ); /* * Those two functions are used to create/destroy the freeglut internal * structures. This actually happens when calling glutInit() and when * quitting the glutMainLoop() (which actually happens, when all windows * have been closed). */ void fgCreateStructure( void ); void fgDestroyStructure( void ); /* A helper function to check if a display mode is possible to use */ #if TARGET_HOST_POSIX_X11 GLXFBConfig* fgChooseFBConfig( int* numcfgs ); #endif /* The window procedure for Win32 events handling */ #if TARGET_HOST_MS_WINDOWS LRESULT CALLBACK fgWindowProc( HWND hWnd, UINT uMsg, WPARAM wParam, LPARAM lParam ); void fgNewWGLCreateContext( SFG_Window* window ); GLboolean fgSetupPixelFormat( SFG_Window* window, GLboolean checkOnly, unsigned char layer_type ); #endif /* * Window creation, opening, closing and destruction. * Also CallBack clearing/initialization. * Defined in freeglut_structure.c, freeglut_window.c. */ SFG_Window* fgCreateWindow( SFG_Window* parent, const char* title, GLboolean positionUse, int x, int y, GLboolean sizeUse, int w, int h, GLboolean gameMode, GLboolean isMenu ); void fgSetWindow ( SFG_Window *window ); void fgOpenWindow( SFG_Window* window, const char* title, GLboolean positionUse, int x, int y, GLboolean sizeUse, int w, int h, GLboolean gameMode, GLboolean isSubWindow ); void fgCloseWindow( SFG_Window* window ); void fgAddToWindowDestroyList ( SFG_Window* window ); void fgCloseWindows (); void fgDestroyWindow( SFG_Window* window ); /* Menu creation and destruction. Defined in freeglut_structure.c */ SFG_Menu* fgCreateMenu( FGCBMenu menuCallback ); void fgDestroyMenu( SFG_Menu* menu ); /* Joystick device management functions, defined in freeglut_joystick.c */ int fgJoystickDetect( void ); void fgInitialiseJoysticks( void ); void fgJoystickClose( void ); void fgJoystickPollWindow( SFG_Window* window ); /* InputDevice Initialisation and Closure */ int fgInputDeviceDetect( void ); void fgInitialiseInputDevices( void ); void fgInputDeviceClose( void ); /* spaceball device functions, defined in freeglut_spaceball.c */ void fgInitialiseSpaceball( void ); void fgSpaceballClose( void ); void fgSpaceballSetWindow( SFG_Window *window ); int fgHasSpaceball( void ); int fgSpaceballNumButtons( void ); #if TARGET_HOST_POSIX_X11 int fgIsSpaceballXEvent( const XEvent *ev ); void fgSpaceballHandleXEvent( const XEvent *ev ); #endif /* Setting the cursor for a given window */ void fgSetCursor ( SFG_Window *window, int cursorID ); /* * Helper function to enumerate through all registered windows * and one to enumerate all of a window's subwindows... * * The GFunc callback for those functions will be defined as: * * void enumCallback( gpointer window, gpointer enumerator ); * * where window is the enumerated (sub)window pointer (SFG_Window *), * and userData is the a custom user-supplied pointer. Functions * are defined and exported from freeglut_structure.c file. */ void fgEnumWindows( FGCBenumerator enumCallback, SFG_Enumerator* enumerator ); void fgEnumSubWindows( SFG_Window* window, FGCBenumerator enumCallback, SFG_Enumerator* enumerator ); #if TARGET_HOST_MS_WINDOWS /* * Helper functions for getting client area from the window rect * and the window rect from the client area given the style of the window * (or a valid window pointer from which the style can be queried). */ void fghComputeWindowRectFromClientArea_UseStyle ( const DWORD windowStyle , RECT *clientRect, BOOL posIsOutside ); void fghComputeWindowRectFromClientArea_QueryWindow( const SFG_Window *window, RECT *clientRect, BOOL posIsOutside ); void fghComputeClientAreaFromWindowRect ( const SFG_Window *window, RECT *windowRect, BOOL wantPosOutside ); RECT fghGetClientArea ( const SFG_Window *window, BOOL wantPosOutside ); void fghGetBorderWidth(const DWORD windowStyle, int* xBorderWidth, int* yBorderWidth); #endif /* * fgWindowByHandle returns a (SFG_Window *) value pointing to the * first window in the queue matching the specified window handle. * The function is defined in freeglut_structure.c file. */ SFG_Window* fgWindowByHandle( SFG_WindowHandleType hWindow ); /* * This function is similiar to the previous one, except it is * looking for a specified (sub)window identifier. The function * is defined in freeglut_structure.c file. */ SFG_Window* fgWindowByID( int windowID ); /* * Looks up a menu given its ID. This is easier than fgWindowByXXX * as all menus are placed in a single doubly linked list... */ SFG_Menu* fgMenuByID( int menuID ); /* * The menu activation and deactivation the code. This is the meat * of the menu user interface handling code... */ void fgUpdateMenuHighlight ( SFG_Menu *menu ); GLboolean fgCheckActiveMenu ( SFG_Window *window, int button, GLboolean pressed, int mouse_x, int mouse_y ); void fgDeactivateMenu( SFG_Window *window ); /* * This function gets called just before the buffers swap, so that * freeglut can display the pull-down menus via OpenGL. The function * is defined in freeglut_menu.c file. */ void fgDisplayMenu( void ); /* Elapsed time as per glutGet(GLUT_ELAPSED_TIME). */ long fgElapsedTime( void ); /* System time in milliseconds */ long unsigned fgSystemTime(void); /* List functions */ void fgListInit(SFG_List *list); void fgListAppend(SFG_List *list, SFG_Node *node); void fgListRemove(SFG_List *list, SFG_Node *node); int fgListLength(SFG_List *list); void fgListInsert(SFG_List *list, SFG_Node *next, SFG_Node *node); /* Error Message functions */ void fgError( const char *fmt, ... ); void fgWarning( const char *fmt, ... ); /* * Check if "hint" is present in "property" for "window". See freeglut_init.c */ #if TARGET_HOST_POSIX_X11 int fgHintPresent(Window window, Atom property, Atom hint); /* Handler for X extension Events */ #ifdef HAVE_X11_EXTENSIONS_XINPUT2_H void fgHandleExtensionEvents( XEvent * ev ); void fgRegisterDevices( Display* dpy, Window* win ); #endif #endif SFG_Proc fghGetProcAddress( const char *procName ); #if TARGET_HOST_MS_WINDOWS extern void (__cdecl *__glutExitFunc)( int return_value ); #endif #include <fcntl.h> #endif /* FREEGLUT_INTERNAL_H */ /*** END OF FILE ***/
{ "pile_set_name": "Github" }
<?xml version='1.0' encoding='UTF-8'?> <org.jenkinsci.plugins.terraform.TerraformBuildWrapper_-DescriptorImpl plugin="[email protected]"> <installations> <org.jenkinsci.plugins.terraform.TerraformInstallation> <name>Terraform 0.9.11</name> <home></home> <properties> <hudson.tools.InstallSourceProperty> <installers> <org.jenkinsci.plugins.terraform.TerraformInstaller> <id>0.9.11-linux-amd64</id> </org.jenkinsci.plugins.terraform.TerraformInstaller> </installers> </hudson.tools.InstallSourceProperty> </properties> </org.jenkinsci.plugins.terraform.TerraformInstallation> </installations> </org.jenkinsci.plugins.terraform.TerraformBuildWrapper_-DescriptorImpl>
{ "pile_set_name": "Github" }
#!/usr/bin/env bash # 1o is as 1k, but putting the dropout on (c,m), i.e. the output # of the LstmNonlinearityComponent, which I believe is the same as # putting it on (i,f) which Gaofeng found worked well in the non-fast Lstm # component; and using schedule which maxes out at 0.3, not 0.7. # [note: this was a little worse. turns out it was not the same as # what gaofeng did because he had separate masks on (i,f). # note: I've since removed the script-level support for this. # local/chain/compare_wer_general.sh --looped exp/chain_cleaned/tdnn_lstm1{e,k,l,m,n,o}_sp_bi # System tdnn_lstm1e_sp_bi tdnn_lstm1k_sp_bi tdnn_lstm1l_sp_bi tdnn_lstm1m_sp_bi tdnn_lstm1n_sp_bi tdnn_lstm1o_sp_bi # WER on dev(orig) 9.0 8.7 8.9 9.0 8.8 8.8 # [looped:] 9.0 8.6 8.9 8.9 8.8 8.9 # WER on dev(rescored) 8.4 7.9 8.2 8.2 8.1 8.1 # [looped:] 8.4 7.8 8.2 8.3 8.1 8.2 # WER on test(orig) 8.8 8.8 8.9 8.9 8.7 8.7 # [looped:] 8.8 8.7 8.8 8.8 8.7 8.7 # WER on test(rescored) 8.4 8.3 8.2 8.5 8.3 8.2 # [looped:] 8.3 8.3 8.3 8.5 8.3 8.2 # Final train prob -0.0648 -0.0693 -0.0768 -0.0807 -0.0702 -0.0698 # Final valid prob -0.0827 -0.0854 -0.0943 -0.0931 -0.0836 -0.0858 # Final train prob (xent) -0.8372 -0.8848 -0.9371 -0.9807 -0.8719 -0.8998 # Final valid prob (xent) -0.9497 -0.9895 -1.0546 -1.0629 -0.9732 -1.0084 # 1e is as 1b, but reducing decay-time from 40 to 20. # 1d is as 1b, but adding decay-time=40 to the fast-lstmp-layers. note: it # uses egs from 1b, remember to remove that before I commit. # steps/info/chain_dir_info.pl exp/chain_cleaned/tdnn_lstm1a_sp_bi # exp/chain_cleaned/tdnn_lstm1a_sp_bi: num-iters=253 nj=2..12 num-params=9.5M dim=40+100->3607 combine=-0.07->-0.07 xent:train/valid[167,252,final]=(-0.960,-0.859,-0.852/-1.05,-0.999,-0.997) logprob:train/valid[167,252,final]=(-0.076,-0.064,-0.062/-0.099,-0.092,-0.091) # This is as run_lstm1e.sh except adding TDNN layers in between; also comparing below # with run_lstm1d.sh which had a larger non-recurrent-projection-dim and which had # better results. Note: these results are not with the updated LM (the LM data-prep # for this setup was changed in Nov 2016 but this was with an older directory). # # local/chain/compare_wer_general.sh exp/chain_cleaned/lstm1d_sp_bi exp/chain_cleaned/lstm1e_sp_bi exp/chain_cleaned/tdnn_lstm1a_sp_bi # System lstm1d_sp_bi lstm1e_sp_bi tdnn_lstm1a_sp_bi # WER on dev(orig) 10.3 10.7 9.7 # WER on dev(rescored) 9.8 10.1 9.3 # WER on test(orig) 9.7 9.8 9.1 # WER on test(rescored) 9.2 9.4 8.7 # Final train prob -0.0812 -0.0862 -0.0625 # Final valid prob -0.1049 -0.1047 -0.0910 # Final train prob (xent) -1.1334 -1.1763 -0.8518 # Final valid prob (xent) -1.2263 -1.2427 -0.9972 ## how you run this (note: this assumes that the run_tdnn_lstm.sh soft link points here; ## otherwise call it directly in its location). # by default, with cleanup: # local/chain/run_tdnn_lstm.sh # without cleanup: # local/chain/run_tdnn_lstm.sh --train-set train --gmm tri3 --nnet3-affix "" & # note, if you have already run one of the non-chain nnet3 systems # (e.g. local/nnet3/run_tdnn.sh), you may want to run with --stage 14. # run_tdnn_lstm_1a.sh was modified from run_lstm_1e.sh, which is a fairly # standard, LSTM, except that some TDNN layers were added in between the # LSTM layers. I was looking at egs/ami/s5b/local/chain/tuning/run_tdnn_lstm_1i.sh, but # this isn't exactly copied from there. set -e -o pipefail # First the options that are passed through to run_ivector_common.sh # (some of which are also used in this script directly). stage=0 nj=30 decode_nj=30 min_seg_len=1.55 label_delay=5 xent_regularize=0.1 train_set=train_cleaned gmm=tri3_cleaned # the gmm for the target data num_threads_ubm=32 nnet3_affix=_cleaned # cleanup affix for nnet3 and chain dirs, e.g. _cleaned # training options chunk_left_context=40 chunk_right_context=0 chunk_left_context_initial=0 chunk_right_context_final=0 # decode options extra_left_context=50 extra_right_context=0 extra_left_context_initial=0 extra_right_context_final=0 frames_per_chunk=140,100,160 frames_per_chunk_primary=140 # The rest are configs specific to this script. Most of the parameters # are just hardcoded at this level, in the commands below. train_stage=-10 tree_affix= # affix for tree directory, e.g. "a" or "b", in case we change the configuration. tdnn_lstm_affix=1o #affix for TDNN-LSTM directory, e.g. "a" or "b", in case we change the configuration. common_egs_dir=exp/chain_cleaned/tdnn_lstm1b_sp_bi/egs # you can set this to use previously dumped egs. # End configuration section. echo "$0 $@" # Print the command line for logging . ./cmd.sh . ./path.sh . ./utils/parse_options.sh if ! cuda-compiled; then cat <<EOF && exit 1 This script is intended to be used with GPUs but you have not compiled Kaldi with CUDA If you want to use GPUs (and have them), go to src/, and configure and make on a machine where "nvcc" is installed. EOF fi local/nnet3/run_ivector_common.sh --stage $stage \ --nj $nj \ --min-seg-len $min_seg_len \ --train-set $train_set \ --gmm $gmm \ --num-threads-ubm $num_threads_ubm \ --nnet3-affix "$nnet3_affix" gmm_dir=exp/$gmm ali_dir=exp/${gmm}_ali_${train_set}_sp_comb tree_dir=exp/chain${nnet3_affix}/tree_bi${tree_affix} lat_dir=exp/chain${nnet3_affix}/${gmm}_${train_set}_sp_comb_lats dir=exp/chain${nnet3_affix}/tdnn_lstm${tdnn_lstm_affix}_sp_bi train_data_dir=data/${train_set}_sp_hires_comb lores_train_data_dir=data/${train_set}_sp_comb train_ivector_dir=exp/nnet3${nnet3_affix}/ivectors_${train_set}_sp_hires_comb for f in $gmm_dir/final.mdl $train_data_dir/feats.scp $train_ivector_dir/ivector_online.scp \ $lores_train_data_dir/feats.scp $ali_dir/ali.1.gz $gmm_dir/final.mdl; do [ ! -f $f ] && echo "$0: expected file $f to exist" && exit 1 done if [ $stage -le 14 ]; then echo "$0: creating lang directory with one state per phone." # Create a version of the lang/ directory that has one state per phone in the # topo file. [note, it really has two states.. the first one is only repeated # once, the second one has zero or more repeats.] if [ -d data/lang_chain ]; then if [ data/lang_chain/L.fst -nt data/lang/L.fst ]; then echo "$0: data/lang_chain already exists, not overwriting it; continuing" else echo "$0: data/lang_chain already exists and seems to be older than data/lang..." echo " ... not sure what to do. Exiting." exit 1; fi else cp -r data/lang data/lang_chain silphonelist=$(cat data/lang_chain/phones/silence.csl) || exit 1; nonsilphonelist=$(cat data/lang_chain/phones/nonsilence.csl) || exit 1; # Use our special topology... note that later on may have to tune this # topology. steps/nnet3/chain/gen_topo.py $nonsilphonelist $silphonelist >data/lang_chain/topo fi fi if [ $stage -le 15 ]; then # Get the alignments as lattices (gives the chain training more freedom). # use the same num-jobs as the alignments steps/align_fmllr_lats.sh --nj 100 --cmd "$train_cmd" ${lores_train_data_dir} \ data/lang $gmm_dir $lat_dir rm $lat_dir/fsts.*.gz # save space fi if [ $stage -le 16 ]; then # Build a tree using our new topology. We know we have alignments for the # speed-perturbed data (local/nnet3/run_ivector_common.sh made them), so use # those. if [ -f $tree_dir/final.mdl ]; then echo "$0: $tree_dir/final.mdl already exists, refusing to overwrite it." exit 1; fi steps/nnet3/chain/build_tree.sh --frame-subsampling-factor 3 \ --context-opts "--context-width=2 --central-position=1" \ --leftmost-questions-truncate -1 \ --cmd "$train_cmd" 4000 ${lores_train_data_dir} data/lang_chain $ali_dir $tree_dir fi if [ $stage -le 17 ]; then mkdir -p $dir echo "$0: creating neural net configs using the xconfig parser"; num_targets=$(tree-info $tree_dir/tree |grep num-pdfs|awk '{print $2}') learning_rate_factor=$(echo "print (0.5/$xent_regularize)" | python) # note: the value of the dropout-proportion is not important, as it's # controlled by the dropout schedule; what's important is that we set it. lstmp_opts="decay-time=20 dropout-proportion=0.0 dropout-place=2 dropout-per-frame=true" mkdir -p $dir/configs cat <<EOF > $dir/configs/network.xconfig input dim=100 name=ivector input dim=40 name=input # please note that it is important to have input layer with the name=input # as the layer immediately preceding the fixed-affine-layer to enable # the use of short notation for the descriptor fixed-affine-layer name=lda input=Append(-2,-1,0,1,2,ReplaceIndex(ivector, t, 0)) affine-transform-file=$dir/configs/lda.mat # the first splicing is moved before the lda layer, so no splicing here relu-renorm-layer name=tdnn1 dim=512 relu-renorm-layer name=tdnn2 dim=512 input=Append(-1,0,1) fast-lstmp-layer name=lstm1 cell-dim=512 recurrent-projection-dim=128 non-recurrent-projection-dim=128 delay=-3 $lstmp_opts relu-renorm-layer name=tdnn3 dim=512 input=Append(-3,0,3) relu-renorm-layer name=tdnn4 dim=512 input=Append(-3,0,3) fast-lstmp-layer name=lstm2 cell-dim=512 recurrent-projection-dim=128 non-recurrent-projection-dim=128 delay=-3 $lstmp_opts relu-renorm-layer name=tdnn5 dim=512 input=Append(-3,0,3) relu-renorm-layer name=tdnn6 dim=512 input=Append(-3,0,3) fast-lstmp-layer name=lstm3 cell-dim=512 recurrent-projection-dim=128 non-recurrent-projection-dim=128 delay=-3 $lstmp_opts ## adding the layers for chain branch output-layer name=output input=lstm3 output-delay=$label_delay include-log-softmax=false dim=$num_targets max-change=1.5 # adding the layers for xent branch # This block prints the configs for a separate output that will be # trained with a cross-entropy objective in the 'chain' models... this # has the effect of regularizing the hidden parts of the model. we use # 0.5 / args.xent_regularize as the learning rate factor- the factor of # 0.5 / args.xent_regularize is suitable as it means the xent # final-layer learns at a rate independent of the regularization # constant; and the 0.5 was tuned so as to make the relative progress # similar in the xent and regular final layers. output-layer name=output-xent input=lstm3 output-delay=$label_delay dim=$num_targets learning-rate-factor=$learning_rate_factor max-change=1.5 EOF steps/nnet3/xconfig_to_configs.py --xconfig-file $dir/configs/network.xconfig --config-dir $dir/configs/ fi if [ $stage -le 18 ]; then if [[ $(hostname -f) == *.clsp.jhu.edu ]] && [ ! -d $dir/egs/storage ]; then utils/create_split_dir.pl \ /export/b0{5,6,7,8}/$USER/kaldi-data/egs/ami-$(date +'%m_%d_%H_%M')/s5/$dir/egs/storage $dir/egs/storage fi steps/nnet3/chain/train.py --stage $train_stage \ --cmd "$decode_cmd" \ --feat.online-ivector-dir $train_ivector_dir \ --feat.cmvn-opts "--norm-means=false --norm-vars=false" \ --trainer.dropout-schedule='0,[email protected],[email protected],[email protected],0' \ --chain.xent-regularize $xent_regularize \ --chain.leaky-hmm-coefficient 0.1 \ --chain.l2-regularize 0.00005 \ --chain.apply-deriv-weights false \ --chain.lm-opts="--num-extra-lm-states=2000" \ --egs.dir "$common_egs_dir" \ --egs.opts "--frames-overlap-per-eg 0" \ --egs.chunk-width "$frames_per_chunk" \ --egs.chunk-left-context "$chunk_left_context" \ --egs.chunk-right-context "$chunk_right_context" \ --egs.chunk-left-context-initial "$chunk_left_context_initial" \ --egs.chunk-right-context-final "$chunk_right_context_final" \ --trainer.num-chunk-per-minibatch 128,64 \ --trainer.frames-per-iter 1500000 \ --trainer.max-param-change 2.0 \ --trainer.num-epochs 4 \ --trainer.deriv-truncate-margin 10 \ --trainer.optimization.shrink-value 0.99 \ --trainer.optimization.num-jobs-initial 2 \ --trainer.optimization.num-jobs-final 12 \ --trainer.optimization.initial-effective-lrate 0.001 \ --trainer.optimization.final-effective-lrate 0.0001 \ --trainer.optimization.momentum 0.0 \ --cleanup.remove-egs true \ --feat-dir $train_data_dir \ --tree-dir $tree_dir \ --lat-dir $lat_dir \ --dir $dir \ --cleanup=false # --cleanup=false is temporary while debugging. fi if [ $stage -le 19 ]; then # Note: it might appear that this data/lang_chain directory is mismatched, and it is as # far as the 'topo' is concerned, but this script doesn't read the 'topo' from # the lang directory. utils/mkgraph.sh --self-loop-scale 1.0 data/lang $dir $dir/graph fi if [ $stage -le 20 ]; then rm $dir/.error 2>/dev/null || true for dset in dev test; do ( steps/nnet3/decode.sh --num-threads 4 --nj $decode_nj --cmd "$decode_cmd" \ --acwt 1.0 --post-decode-acwt 10.0 \ --extra-left-context $extra_left_context \ --extra-right-context $extra_right_context \ --extra-left-context-initial $extra_left_context_initial \ --extra-right-context-final $extra_right_context_final \ --frames-per-chunk "$frames_per_chunk_primary" \ --online-ivector-dir exp/nnet3${nnet3_affix}/ivectors_${dset}_hires \ --scoring-opts "--min-lmwt 5 " \ $dir/graph data/${dset}_hires $dir/decode_${dset} || exit 1; steps/lmrescore_const_arpa.sh --cmd "$decode_cmd" data/lang data/lang_rescore \ data/${dset}_hires ${dir}/decode_${dset} ${dir}/decode_${dset}_rescore || exit 1 ) || touch $dir/.error & done wait if [ -f $dir/.error ]; then echo "$0: something went wrong in decoding" exit 1 fi fi if [ $stage -le 21 ]; then # 'looped' decoding. we didn't write a -parallel version of this program yet, # so it will take a bit longer as the --num-threads option is not supported. # we just hardcode the --frames-per-chunk option as it doesn't have to # match any value used in training, and it won't affect the results (unlike # regular decoding). rm $dir/.error 2>/dev/null || true for dset in dev test; do ( steps/nnet3/decode_looped.sh --nj $decode_nj --cmd "$decode_cmd" \ --acwt 1.0 --post-decode-acwt 10.0 \ --extra-left-context-initial $extra_left_context_initial \ --frames-per-chunk 30 \ --online-ivector-dir exp/nnet3${nnet3_affix}/ivectors_${dset}_hires \ --scoring-opts "--min-lmwt 5 " \ $dir/graph data/${dset}_hires $dir/decode_looped_${dset} || exit 1; steps/lmrescore_const_arpa.sh --cmd "$decode_cmd" data/lang data/lang_rescore \ data/${dset}_hires ${dir}/decode_looped_${dset} ${dir}/decode_looped_${dset}_rescore || exit 1 ) || touch $dir/.error & done wait if [ -f $dir/.error ]; then echo "$0: something went wrong in decoding" exit 1 fi fi exit 0
{ "pile_set_name": "Github" }
{ "tests": [ { "description": "update: ServerTimestamp with data", "comment": "A key with the special ServerTimestamp sentinel is removed from\nthe data in the update operation. Instead it appears in a separate Transform operation.\nNote that in these tests, the string \"ServerTimestamp\" should be replaced with the\nspecial ServerTimestamp value.", "update": { "docRefPath": "projects/projectID/databases/(default)/documents/C/d", "jsonData": "{\"a\": 1, \"b\": \"ServerTimestamp\"}", "request": { "database": "projects/projectID/databases/(default)", "writes": [ { "update": { "name": "projects/projectID/databases/(default)/documents/C/d", "fields": { "a": { "integerValue": "1" } } }, "updateMask": { "fieldPaths": [ "a" ] }, "currentDocument": { "exists": true } }, { "transform": { "document": "projects/projectID/databases/(default)/documents/C/d", "fieldTransforms": [ { "fieldPath": "b", "setToServerValue": "REQUEST_TIME" } ] } } ] } } } ] }
{ "pile_set_name": "Github" }
// Copyright 2016 CoreOS, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package v2_3 import ( "reflect" "github.com/coreos/ignition/config/v2_3/types" ) // Append appends newConfig to oldConfig and returns the result. Appending one // config to another is accomplished by iterating over every field in the // config structure, appending slices, recursively appending structs, and // overwriting old values with new values for all other types. func Append(oldConfig, newConfig types.Config) types.Config { vOld := reflect.ValueOf(oldConfig) vNew := reflect.ValueOf(newConfig) vResult := appendStruct(vOld, vNew) return vResult.Interface().(types.Config) } // appendStruct is an internal helper function to AppendConfig. Given two values // of structures (assumed to be the same type), recursively iterate over every // field in the struct, appending slices, recursively appending structs, and // overwriting old values with the new for all other types. Some individual // struct fields have alternate merge strategies, determined by the field name. // Currently these fields are "ignition.version", which uses the old value, and // "ignition.config" which uses the new value. func appendStruct(vOld, vNew reflect.Value) reflect.Value { tOld := vOld.Type() vRes := reflect.New(tOld) for i := 0; i < tOld.NumField(); i++ { vfOld := vOld.Field(i) vfNew := vNew.Field(i) vfRes := vRes.Elem().Field(i) switch tOld.Field(i).Name { case "Version": vfRes.Set(vfOld) continue case "Config": vfRes.Set(vfNew) continue } switch vfOld.Type().Kind() { case reflect.Struct: vfRes.Set(appendStruct(vfOld, vfNew)) case reflect.Slice: vfRes.Set(reflect.AppendSlice(vfOld, vfNew)) default: if vfNew.Kind() == reflect.Ptr && vfNew.IsNil() { vfRes.Set(vfOld) } else { vfRes.Set(vfNew) } } } return vRes.Elem() }
{ "pile_set_name": "Github" }
/* ** 2001 September 16 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ****************************************************************************** ** ** This header file (together with is companion C source-code file ** "os.c") attempt to abstract the underlying operating system so that ** the SQLite library will work on both POSIX and windows systems. ** ** This header file is #include-ed by sqliteInt.h and thus ends up ** being included by every source file. */ #ifndef _SQLITE_OS_H_ #define _SQLITE_OS_H_ /* ** Attempt to automatically detect the operating system and setup the ** necessary pre-processor macros for it. */ #include "os_setup.h" /* If the SET_FULLSYNC macro is not defined above, then make it ** a no-op */ #ifndef SET_FULLSYNC # define SET_FULLSYNC(x,y) #endif /* ** The default size of a disk sector */ #ifndef SQLITE_DEFAULT_SECTOR_SIZE # define SQLITE_DEFAULT_SECTOR_SIZE 4096 #endif /* ** Temporary files are named starting with this prefix followed by 16 random ** alphanumeric characters, and no file extension. They are stored in the ** OS's standard temporary file directory, and are deleted prior to exit. ** If sqlite is being embedded in another program, you may wish to change the ** prefix to reflect your program's name, so that if your program exits ** prematurely, old temporary files can be easily identified. This can be done ** using -DSQLITE_TEMP_FILE_PREFIX=myprefix_ on the compiler command line. ** ** 2006-10-31: The default prefix used to be "sqlite_". But then ** Mcafee started using SQLite in their anti-virus product and it ** started putting files with the "sqlite" name in the c:/temp folder. ** This annoyed many windows users. Those users would then do a ** Google search for "sqlite", find the telephone numbers of the ** developers and call to wake them up at night and complain. ** For this reason, the default name prefix is changed to be "sqlite" ** spelled backwards. So the temp files are still identified, but ** anybody smart enough to figure out the code is also likely smart ** enough to know that calling the developer will not help get rid ** of the file. */ #ifndef SQLITE_TEMP_FILE_PREFIX # define SQLITE_TEMP_FILE_PREFIX "etilqs_" #endif /* ** The following values may be passed as the second argument to ** sqlite3OsLock(). The various locks exhibit the following semantics: ** ** SHARED: Any number of processes may hold a SHARED lock simultaneously. ** RESERVED: A single process may hold a RESERVED lock on a file at ** any time. Other processes may hold and obtain new SHARED locks. ** PENDING: A single process may hold a PENDING lock on a file at ** any one time. Existing SHARED locks may persist, but no new ** SHARED locks may be obtained by other processes. ** EXCLUSIVE: An EXCLUSIVE lock precludes all other locks. ** ** PENDING_LOCK may not be passed directly to sqlite3OsLock(). Instead, a ** process that requests an EXCLUSIVE lock may actually obtain a PENDING ** lock. This can be upgraded to an EXCLUSIVE lock by a subsequent call to ** sqlite3OsLock(). */ #define NO_LOCK 0 #define SHARED_LOCK 1 #define RESERVED_LOCK 2 #define PENDING_LOCK 3 #define EXCLUSIVE_LOCK 4 /* ** File Locking Notes: (Mostly about windows but also some info for Unix) ** ** We cannot use LockFileEx() or UnlockFileEx() on Win95/98/ME because ** those functions are not available. So we use only LockFile() and ** UnlockFile(). ** ** LockFile() prevents not just writing but also reading by other processes. ** A SHARED_LOCK is obtained by locking a single randomly-chosen ** byte out of a specific range of bytes. The lock byte is obtained at ** random so two separate readers can probably access the file at the ** same time, unless they are unlucky and choose the same lock byte. ** An EXCLUSIVE_LOCK is obtained by locking all bytes in the range. ** There can only be one writer. A RESERVED_LOCK is obtained by locking ** a single byte of the file that is designated as the reserved lock byte. ** A PENDING_LOCK is obtained by locking a designated byte different from ** the RESERVED_LOCK byte. ** ** On WinNT/2K/XP systems, LockFileEx() and UnlockFileEx() are available, ** which means we can use reader/writer locks. When reader/writer locks ** are used, the lock is placed on the same range of bytes that is used ** for probabilistic locking in Win95/98/ME. Hence, the locking scheme ** will support two or more Win95 readers or two or more WinNT readers. ** But a single Win95 reader will lock out all WinNT readers and a single ** WinNT reader will lock out all other Win95 readers. ** ** The following #defines specify the range of bytes used for locking. ** SHARED_SIZE is the number of bytes available in the pool from which ** a random byte is selected for a shared lock. The pool of bytes for ** shared locks begins at SHARED_FIRST. ** ** The same locking strategy and ** byte ranges are used for Unix. This leaves open the possibility of having ** clients on win95, winNT, and unix all talking to the same shared file ** and all locking correctly. To do so would require that samba (or whatever ** tool is being used for file sharing) implements locks correctly between ** windows and unix. I'm guessing that isn't likely to happen, but by ** using the same locking range we are at least open to the possibility. ** ** Locking in windows is manditory. For this reason, we cannot store ** actual data in the bytes used for locking. The pager never allocates ** the pages involved in locking therefore. SHARED_SIZE is selected so ** that all locks will fit on a single page even at the minimum page size. ** PENDING_BYTE defines the beginning of the locks. By default PENDING_BYTE ** is set high so that we don't have to allocate an unused page except ** for very large databases. But one should test the page skipping logic ** by setting PENDING_BYTE low and running the entire regression suite. ** ** Changing the value of PENDING_BYTE results in a subtly incompatible ** file format. Depending on how it is changed, you might not notice ** the incompatibility right away, even running a full regression test. ** The default location of PENDING_BYTE is the first byte past the ** 1GB boundary. ** */ #ifdef SQLITE_OMIT_WSD # define PENDING_BYTE (0x40000000) #else # define PENDING_BYTE sqlite3PendingByte #endif #define RESERVED_BYTE (PENDING_BYTE+1) #define SHARED_FIRST (PENDING_BYTE+2) #define SHARED_SIZE 510 /* ** Wrapper around OS specific sqlite3_os_init() function. */ int sqlite3OsInit(void); /* ** Functions for accessing sqlite3_file methods */ void sqlite3OsClose(sqlite3_file*); int sqlite3OsRead(sqlite3_file*, void*, int amt, i64 offset); int sqlite3OsWrite(sqlite3_file*, const void*, int amt, i64 offset); int sqlite3OsTruncate(sqlite3_file*, i64 size); int sqlite3OsSync(sqlite3_file*, int); int sqlite3OsFileSize(sqlite3_file*, i64 *pSize); int sqlite3OsLock(sqlite3_file*, int); int sqlite3OsUnlock(sqlite3_file*, int); int sqlite3OsCheckReservedLock(sqlite3_file *id, int *pResOut); int sqlite3OsFileControl(sqlite3_file*,int,void*); void sqlite3OsFileControlHint(sqlite3_file*,int,void*); #define SQLITE_FCNTL_DB_UNCHANGED 0xca093fa0 int sqlite3OsSectorSize(sqlite3_file *id); int sqlite3OsDeviceCharacteristics(sqlite3_file *id); int sqlite3OsShmMap(sqlite3_file *,int,int,int,void volatile **); int sqlite3OsShmLock(sqlite3_file *id, int, int, int); void sqlite3OsShmBarrier(sqlite3_file *id); int sqlite3OsShmUnmap(sqlite3_file *id, int); int sqlite3OsFetch(sqlite3_file *id, i64, int, void **); int sqlite3OsUnfetch(sqlite3_file *, i64, void *); /* ** Functions for accessing sqlite3_vfs methods */ int sqlite3OsOpen(sqlite3_vfs *, const char *, sqlite3_file*, int, int *); int sqlite3OsDelete(sqlite3_vfs *, const char *, int); int sqlite3OsAccess(sqlite3_vfs *, const char *, int, int *pResOut); int sqlite3OsFullPathname(sqlite3_vfs *, const char *, int, char *); #ifndef SQLITE_OMIT_LOAD_EXTENSION void *sqlite3OsDlOpen(sqlite3_vfs *, const char *); void sqlite3OsDlError(sqlite3_vfs *, int, char *); void (*sqlite3OsDlSym(sqlite3_vfs *, void *, const char *))(void); void sqlite3OsDlClose(sqlite3_vfs *, void *); #endif /* SQLITE_OMIT_LOAD_EXTENSION */ int sqlite3OsRandomness(sqlite3_vfs *, int, char *); int sqlite3OsSleep(sqlite3_vfs *, int); int sqlite3OsGetLastError(sqlite3_vfs*); int sqlite3OsCurrentTimeInt64(sqlite3_vfs *, sqlite3_int64*); /* ** Convenience functions for opening and closing files using ** sqlite3_malloc() to obtain space for the file-handle structure. */ int sqlite3OsOpenMalloc(sqlite3_vfs *, const char *, sqlite3_file **, int,int*); void sqlite3OsCloseFree(sqlite3_file *); #endif /* _SQLITE_OS_H_ */
{ "pile_set_name": "Github" }
// Copyright 2016 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "components/certificate_transparency/log_dns_client.h" #include <memory> #include <numeric> #include <string> #include <utility> #include <vector> #include "base/format_macros.h" #include "base/message_loop/message_loop.h" #include "base/run_loop.h" #include "base/strings/string_number_conversions.h" #include "base/strings/stringprintf.h" #include "base/test/test_timeouts.h" #include "components/certificate_transparency/mock_log_dns_traffic.h" #include "crypto/sha2.h" #include "net/base/net_errors.h" #include "net/cert/merkle_audit_proof.h" #include "net/cert/signed_certificate_timestamp.h" #include "net/dns/dns_client.h" #include "net/dns/dns_config_service.h" #include "net/dns/dns_protocol.h" #include "net/log/net_log.h" #include "net/test/gtest_util.h" #include "testing/gmock/include/gmock/gmock.h" #include "testing/gtest/include/gtest/gtest.h" namespace certificate_transparency { namespace { using ::testing::AllOf; using ::testing::Eq; using ::testing::IsEmpty; using ::testing::Le; using ::testing::Not; using ::testing::NotNull; using net::test::IsError; using net::test::IsOk; // Sample Merkle leaf hashes. const char* const kLeafHashes[] = { "\x1f\x25\xe1\xca\xba\x4f\xf9\xb8\x27\x24\x83\x0f\xca\x60\xe4\xc2\xbe\xa8" "\xc3\xa9\x44\x1c\x27\xb0\xb4\x3e\x6a\x96\x94\xc7\xb8\x04", "\x2c\x26\xb4\x6b\x68\xff\xc6\x8f\xf9\x9b\x45\x3c\x1d\x30\x41\x34\x13\x42" "\x2d\x70\x64\x83\xbf\xa0\xf9\x8a\x5e\x88\x62\x66\xe7\xae", "\xfc\xde\x2b\x2e\xdb\xa5\x6b\xf4\x08\x60\x1f\xb7\x21\xfe\x9b\x5c\x33\x8d" "\x10\xee\x42\x9e\xa0\x4f\xae\x55\x11\xb6\x8f\xbf\x8f\xb9", }; // DNS query names for looking up the leaf index associated with each hash in // |kLeafHashes|. Assumes the log domain is "ct.test". const char* const kLeafIndexQnames[] = { "D4S6DSV2J743QJZEQMH4UYHEYK7KRQ5JIQOCPMFUHZVJNFGHXACA.hash.ct.test.", "FQTLI23I77DI76M3IU6B2MCBGQJUELLQMSB37IHZRJPIQYTG46XA.hash.ct.test.", "7TPCWLW3UVV7ICDAD63SD7U3LQZY2EHOIKPKAT5OKUI3ND57R64Q.hash.ct.test.", }; // Leaf indices and tree sizes for use with |kLeafHashes|. const uint64_t kLeafIndices[] = {0, 1, 2}; const uint64_t kTreeSizes[] = {100, 10000, 1000000}; // Only 7 audit proof nodes can fit into a DNS response, because they are sent // in a TXT RDATA string, which has a maximum size of 255 bytes, and each node // is a SHA-256 hash (32 bytes), i.e. (255 / 32) == 7. // This means audit proofs consisting of more than 7 nodes require multiple DNS // requests to retrieve. const size_t kMaxProofNodesPerDnsResponse = 7; // Returns an example Merkle audit proof containing |length| nodes. // The proof cannot be used for cryptographic purposes; it is merely a // placeholder. std::vector<std::string> GetSampleAuditProof(size_t length) { std::vector<std::string> audit_proof(length); // Makes each node of the audit proof different, so that tests are able to // confirm that the audit proof is reconstructed in the correct order. for (size_t i = 0; i < length; ++i) { std::string node(crypto::kSHA256Length, '\0'); // Each node is 32 bytes, with each byte having a different value. for (size_t j = 0; j < crypto::kSHA256Length; ++j) { node[j] = static_cast<char>((-127 + i + j) % 128); } audit_proof[i].assign(std::move(node)); } return audit_proof; } } // namespace class LogDnsClientTest : public ::testing::TestWithParam<net::IoMode> { protected: LogDnsClientTest() : network_change_notifier_(net::NetworkChangeNotifier::CreateMock()) { mock_dns_.SetSocketReadMode(GetParam()); mock_dns_.InitializeDnsConfig(); } std::unique_ptr<LogDnsClient> CreateLogDnsClient( size_t max_concurrent_queries) { return std::make_unique<LogDnsClient>(mock_dns_.CreateDnsClient(), net::NetLogWithSource(), max_concurrent_queries); } // Convenience function for calling QueryAuditProof synchronously. template <typename... Types> net::Error QueryAuditProof(Types&&... args) { std::unique_ptr<LogDnsClient> log_client = CreateLogDnsClient(0); net::TestCompletionCallback callback; const net::Error result = log_client->QueryAuditProof( std::forward<Types>(args)..., callback.callback()); return result != net::ERR_IO_PENDING ? result : static_cast<net::Error>(callback.WaitForResult()); } // This will be the NetworkChangeNotifier singleton for the duration of the // test. It is accessed statically by LogDnsClient. std::unique_ptr<net::NetworkChangeNotifier> network_change_notifier_; // Queues and handles asynchronous DNS tasks. Indirectly used by LogDnsClient, // the underlying net::DnsClient, and NetworkChangeNotifier. base::MessageLoopForIO message_loop_; // Allows mock DNS sockets to be setup. MockLogDnsTraffic mock_dns_; }; TEST_P(LogDnsClientTest, QueryAuditProofReportsThatLogDomainDoesNotExist) { ASSERT_TRUE(mock_dns_.ExpectRequestAndErrorResponse( kLeafIndexQnames[0], net::dns_protocol::kRcodeNXDOMAIN)); std::unique_ptr<LogDnsClient::AuditProofQuery> query; ASSERT_THAT(QueryAuditProof("ct.test", kLeafHashes[0], kTreeSizes[0], &query), IsError(net::ERR_NAME_NOT_RESOLVED)); } TEST_P(LogDnsClientTest, QueryAuditProofReportsServerFailuresDuringLeafIndexRequests) { ASSERT_TRUE(mock_dns_.ExpectRequestAndErrorResponse( kLeafIndexQnames[0], net::dns_protocol::kRcodeSERVFAIL)); std::unique_ptr<LogDnsClient::AuditProofQuery> query; ASSERT_THAT(QueryAuditProof("ct.test", kLeafHashes[0], kTreeSizes[0], &query), IsError(net::ERR_DNS_SERVER_FAILED)); } TEST_P(LogDnsClientTest, QueryAuditProofReportsServerRefusalsDuringLeafIndexRequests) { ASSERT_TRUE(mock_dns_.ExpectRequestAndErrorResponse( kLeafIndexQnames[0], net::dns_protocol::kRcodeREFUSED)); std::unique_ptr<LogDnsClient::AuditProofQuery> query; ASSERT_THAT(QueryAuditProof("ct.test", kLeafHashes[0], kTreeSizes[0], &query), IsError(net::ERR_DNS_SERVER_FAILED)); } TEST_P( LogDnsClientTest, QueryAuditProofReportsMalformedResponseIfLeafIndexResponseContainsNoStrings) { ASSERT_TRUE(mock_dns_.ExpectRequestAndResponse( kLeafIndexQnames[0], std::vector<base::StringPiece>())); std::unique_ptr<LogDnsClient::AuditProofQuery> query; ASSERT_THAT(QueryAuditProof("ct.test", kLeafHashes[0], kTreeSizes[0], &query), IsError(net::ERR_DNS_MALFORMED_RESPONSE)); } TEST_P( LogDnsClientTest, QueryAuditProofReportsMalformedResponseIfLeafIndexResponseContainsMoreThanOneString) { ASSERT_TRUE( mock_dns_.ExpectRequestAndResponse(kLeafIndexQnames[0], {"123456", "7"})); std::unique_ptr<LogDnsClient::AuditProofQuery> query; ASSERT_THAT(QueryAuditProof("ct.test", kLeafHashes[0], kTreeSizes[0], &query), IsError(net::ERR_DNS_MALFORMED_RESPONSE)); } TEST_P(LogDnsClientTest, QueryAuditProofReportsMalformedResponseIfLeafIndexIsNotNumeric) { ASSERT_TRUE(mock_dns_.ExpectRequestAndResponse(kLeafIndexQnames[0], {"foo"})); std::unique_ptr<LogDnsClient::AuditProofQuery> query; ASSERT_THAT(QueryAuditProof("ct.test", kLeafHashes[0], kTreeSizes[0], &query), IsError(net::ERR_DNS_MALFORMED_RESPONSE)); } TEST_P(LogDnsClientTest, QueryAuditProofReportsMalformedResponseIfLeafIndexIsFloatingPoint) { ASSERT_TRUE( mock_dns_.ExpectRequestAndResponse(kLeafIndexQnames[0], {"123456.0"})); std::unique_ptr<LogDnsClient::AuditProofQuery> query; ASSERT_THAT(QueryAuditProof("ct.test", kLeafHashes[0], kTreeSizes[0], &query), IsError(net::ERR_DNS_MALFORMED_RESPONSE)); } TEST_P(LogDnsClientTest, QueryAuditProofReportsMalformedResponseIfLeafIndexIsEmpty) { ASSERT_TRUE(mock_dns_.ExpectRequestAndResponse(kLeafIndexQnames[0], {""})); std::unique_ptr<LogDnsClient::AuditProofQuery> query; ASSERT_THAT(QueryAuditProof("ct.test", kLeafHashes[0], kTreeSizes[0], &query), IsError(net::ERR_DNS_MALFORMED_RESPONSE)); } TEST_P(LogDnsClientTest, QueryAuditProofReportsMalformedResponseIfLeafIndexHasNonNumericPrefix) { ASSERT_TRUE( mock_dns_.ExpectRequestAndResponse(kLeafIndexQnames[0], {"foo123456"})); std::unique_ptr<LogDnsClient::AuditProofQuery> query; ASSERT_THAT(QueryAuditProof("ct.test", kLeafHashes[0], kTreeSizes[0], &query), IsError(net::ERR_DNS_MALFORMED_RESPONSE)); } TEST_P(LogDnsClientTest, QueryAuditProofReportsMalformedResponseIfLeafIndexHasNonNumericSuffix) { ASSERT_TRUE( mock_dns_.ExpectRequestAndResponse(kLeafIndexQnames[0], {"123456foo"})); std::unique_ptr<LogDnsClient::AuditProofQuery> query; ASSERT_THAT(QueryAuditProof("ct.test", kLeafHashes[0], kTreeSizes[0], &query), IsError(net::ERR_DNS_MALFORMED_RESPONSE)); } TEST_P(LogDnsClientTest, QueryAuditProofReportsInvalidArgIfLogDomainIsEmpty) { std::unique_ptr<LogDnsClient::AuditProofQuery> query; ASSERT_THAT(QueryAuditProof("", kLeafHashes[0], kTreeSizes[0], &query), IsError(net::ERR_INVALID_ARGUMENT)); } TEST_P(LogDnsClientTest, QueryAuditProofReportsInvalidArgIfLeafHashIsInvalid) { std::unique_ptr<LogDnsClient::AuditProofQuery> query; ASSERT_THAT(QueryAuditProof("ct.test", "foo", kTreeSizes[0], &query), IsError(net::ERR_INVALID_ARGUMENT)); } TEST_P(LogDnsClientTest, QueryAuditProofReportsInvalidArgIfLeafHashIsEmpty) { std::unique_ptr<LogDnsClient::AuditProofQuery> query; ASSERT_THAT(QueryAuditProof("ct.test", "", kTreeSizes[0], &query), IsError(net::ERR_INVALID_ARGUMENT)); } TEST_P(LogDnsClientTest, QueryAuditProofReportsSocketErrorsDuringLeafIndexRequests) { ASSERT_TRUE(mock_dns_.ExpectRequestAndSocketError( kLeafIndexQnames[0], net::ERR_CONNECTION_REFUSED)); std::unique_ptr<LogDnsClient::AuditProofQuery> query; ASSERT_THAT(QueryAuditProof("ct.test", kLeafHashes[0], kTreeSizes[0], &query), IsError(net::ERR_CONNECTION_REFUSED)); } TEST_P(LogDnsClientTest, QueryAuditProofReportsTimeoutsDuringLeafIndexRequests) { ASSERT_TRUE(mock_dns_.ExpectRequestAndTimeout(kLeafIndexQnames[0])); std::unique_ptr<LogDnsClient::AuditProofQuery> query; ASSERT_THAT(QueryAuditProof("ct.test", kLeafHashes[0], kTreeSizes[0], &query), IsError(net::ERR_DNS_TIMED_OUT)); } TEST_P(LogDnsClientTest, QueryAuditProof) { const std::vector<std::string> audit_proof = GetSampleAuditProof(20); // Expect a leaf index query first, to map the leaf hash to a leaf index. ASSERT_TRUE( mock_dns_.ExpectLeafIndexRequestAndResponse(kLeafIndexQnames[0], 123456)); // It takes a number of DNS requests to retrieve the entire |audit_proof| // (see |kMaxProofNodesPerDnsResponse|). for (size_t nodes_begin = 0; nodes_begin < audit_proof.size(); nodes_begin += kMaxProofNodesPerDnsResponse) { const size_t nodes_end = std::min( nodes_begin + kMaxProofNodesPerDnsResponse, audit_proof.size()); ASSERT_TRUE(mock_dns_.ExpectAuditProofRequestAndResponse( base::StringPrintf("%zu.123456.999999.tree.ct.test.", nodes_begin), audit_proof.begin() + nodes_begin, audit_proof.begin() + nodes_end)); } std::unique_ptr<LogDnsClient::AuditProofQuery> query; ASSERT_THAT(QueryAuditProof("ct.test", kLeafHashes[0], 999999, &query), IsOk()); const net::ct::MerkleAuditProof& proof = query->GetProof(); EXPECT_THAT(proof.leaf_index, Eq(123456u)); EXPECT_THAT(proof.tree_size, Eq(999999u)); EXPECT_THAT(proof.nodes, Eq(audit_proof)); } TEST_P(LogDnsClientTest, QueryAuditProofHandlesResponsesWithShortAuditPaths) { const std::vector<std::string> audit_proof = GetSampleAuditProof(20); // Expect a leaf index query first, to map the leaf hash to a leaf index. ASSERT_TRUE( mock_dns_.ExpectLeafIndexRequestAndResponse(kLeafIndexQnames[0], 123456)); // Make some of the responses contain fewer proof nodes than they can hold. ASSERT_TRUE(mock_dns_.ExpectAuditProofRequestAndResponse( "0.123456.999999.tree.ct.test.", audit_proof.begin(), audit_proof.begin() + 1)); ASSERT_TRUE(mock_dns_.ExpectAuditProofRequestAndResponse( "1.123456.999999.tree.ct.test.", audit_proof.begin() + 1, audit_proof.begin() + 3)); ASSERT_TRUE(mock_dns_.ExpectAuditProofRequestAndResponse( "3.123456.999999.tree.ct.test.", audit_proof.begin() + 3, audit_proof.begin() + 6)); ASSERT_TRUE(mock_dns_.ExpectAuditProofRequestAndResponse( "6.123456.999999.tree.ct.test.", audit_proof.begin() + 6, audit_proof.begin() + 10)); ASSERT_TRUE(mock_dns_.ExpectAuditProofRequestAndResponse( "10.123456.999999.tree.ct.test.", audit_proof.begin() + 10, audit_proof.begin() + 13)); ASSERT_TRUE(mock_dns_.ExpectAuditProofRequestAndResponse( "13.123456.999999.tree.ct.test.", audit_proof.begin() + 13, audit_proof.end())); std::unique_ptr<LogDnsClient::AuditProofQuery> query; ASSERT_THAT(QueryAuditProof("ct.test", kLeafHashes[0], 999999, &query), IsOk()); const net::ct::MerkleAuditProof& proof = query->GetProof(); EXPECT_THAT(proof.leaf_index, Eq(123456u)); EXPECT_THAT(proof.tree_size, Eq(999999u)); EXPECT_THAT(proof.nodes, Eq(audit_proof)); } TEST_P(LogDnsClientTest, QueryAuditProofReportsThatAuditProofQnameDoesNotExist) { ASSERT_TRUE( mock_dns_.ExpectLeafIndexRequestAndResponse(kLeafIndexQnames[0], 123456)); ASSERT_TRUE(mock_dns_.ExpectRequestAndErrorResponse( "0.123456.999999.tree.ct.test.", net::dns_protocol::kRcodeNXDOMAIN)); std::unique_ptr<LogDnsClient::AuditProofQuery> query; ASSERT_THAT(QueryAuditProof("ct.test", kLeafHashes[0], 999999, &query), IsError(net::ERR_NAME_NOT_RESOLVED)); } TEST_P(LogDnsClientTest, QueryAuditProofReportsServerFailuresDuringAuditProofRequests) { ASSERT_TRUE( mock_dns_.ExpectLeafIndexRequestAndResponse(kLeafIndexQnames[0], 123456)); ASSERT_TRUE(mock_dns_.ExpectRequestAndErrorResponse( "0.123456.999999.tree.ct.test.", net::dns_protocol::kRcodeSERVFAIL)); std::unique_ptr<LogDnsClient::AuditProofQuery> query; ASSERT_THAT(QueryAuditProof("ct.test", kLeafHashes[0], 999999, &query), IsError(net::ERR_DNS_SERVER_FAILED)); } TEST_P(LogDnsClientTest, QueryAuditProofReportsServerRefusalsDuringAuditProofRequests) { ASSERT_TRUE( mock_dns_.ExpectLeafIndexRequestAndResponse(kLeafIndexQnames[0], 123456)); ASSERT_TRUE(mock_dns_.ExpectRequestAndErrorResponse( "0.123456.999999.tree.ct.test.", net::dns_protocol::kRcodeREFUSED)); std::unique_ptr<LogDnsClient::AuditProofQuery> query; ASSERT_THAT(QueryAuditProof("ct.test", kLeafHashes[0], 999999, &query), IsError(net::ERR_DNS_SERVER_FAILED)); } TEST_P( LogDnsClientTest, QueryAuditProofReportsResponseMalformedIfProofNodesResponseContainsNoStrings) { // Expect a leaf index query first, to map the leaf hash to a leaf index. ASSERT_TRUE( mock_dns_.ExpectLeafIndexRequestAndResponse(kLeafIndexQnames[0], 123456)); ASSERT_TRUE(mock_dns_.ExpectRequestAndResponse( "0.123456.999999.tree.ct.test.", std::vector<base::StringPiece>())); std::unique_ptr<LogDnsClient::AuditProofQuery> query; ASSERT_THAT(QueryAuditProof("ct.test", kLeafHashes[0], 999999, &query), IsError(net::ERR_DNS_MALFORMED_RESPONSE)); } TEST_P( LogDnsClientTest, QueryAuditProofReportsResponseMalformedIfProofNodesResponseContainsMoreThanOneString) { // The CT-over-DNS draft RFC states that the response will contain "exactly // one character-string." const std::vector<std::string> audit_proof = GetSampleAuditProof(10); std::string first_chunk_of_proof = std::accumulate( audit_proof.begin(), audit_proof.begin() + 7, std::string()); std::string second_chunk_of_proof = std::accumulate( audit_proof.begin() + 7, audit_proof.end(), std::string()); // Expect a leaf index query first, to map the leaf hash to a leaf index. ASSERT_TRUE( mock_dns_.ExpectLeafIndexRequestAndResponse(kLeafIndexQnames[0], 123456)); ASSERT_TRUE(mock_dns_.ExpectRequestAndResponse( "0.123456.999999.tree.ct.test.", {first_chunk_of_proof, second_chunk_of_proof})); std::unique_ptr<LogDnsClient::AuditProofQuery> query; ASSERT_THAT(QueryAuditProof("ct.test", kLeafHashes[0], 999999, &query), IsError(net::ERR_DNS_MALFORMED_RESPONSE)); } TEST_P(LogDnsClientTest, QueryAuditProofReportsResponseMalformedIfNodeTooShort) { // node is shorter than a SHA-256 hash (31 vs 32 bytes) const std::vector<std::string> audit_proof(1, std::string(31, 'a')); ASSERT_TRUE( mock_dns_.ExpectLeafIndexRequestAndResponse(kLeafIndexQnames[0], 123456)); ASSERT_TRUE(mock_dns_.ExpectAuditProofRequestAndResponse( "0.123456.999999.tree.ct.test.", audit_proof.begin(), audit_proof.end())); std::unique_ptr<LogDnsClient::AuditProofQuery> query; ASSERT_THAT(QueryAuditProof("ct.test", kLeafHashes[0], 999999, &query), IsError(net::ERR_DNS_MALFORMED_RESPONSE)); } TEST_P(LogDnsClientTest, QueryAuditProofReportsResponseMalformedIfNodeTooLong) { // node is longer than a SHA-256 hash (33 vs 32 bytes) const std::vector<std::string> audit_proof(1, std::string(33, 'a')); ASSERT_TRUE( mock_dns_.ExpectLeafIndexRequestAndResponse(kLeafIndexQnames[0], 123456)); ASSERT_TRUE(mock_dns_.ExpectAuditProofRequestAndResponse( "0.123456.999999.tree.ct.test.", audit_proof.begin(), audit_proof.end())); std::unique_ptr<LogDnsClient::AuditProofQuery> query; ASSERT_THAT(QueryAuditProof("ct.test", kLeafHashes[0], 999999, &query), IsError(net::ERR_DNS_MALFORMED_RESPONSE)); } TEST_P(LogDnsClientTest, QueryAuditProofReportsResponseMalformedIfEmpty) { const std::vector<std::string> audit_proof; ASSERT_TRUE( mock_dns_.ExpectLeafIndexRequestAndResponse(kLeafIndexQnames[0], 123456)); ASSERT_TRUE(mock_dns_.ExpectAuditProofRequestAndResponse( "0.123456.999999.tree.ct.test.", audit_proof.begin(), audit_proof.end())); std::unique_ptr<LogDnsClient::AuditProofQuery> query; ASSERT_THAT(QueryAuditProof("ct.test", kLeafHashes[0], 999999, &query), IsError(net::ERR_DNS_MALFORMED_RESPONSE)); } TEST_P(LogDnsClientTest, QueryAuditProofReportsInvalidArgIfLeafIndexEqualToTreeSize) { ASSERT_TRUE( mock_dns_.ExpectLeafIndexRequestAndResponse(kLeafIndexQnames[0], 123456)); std::unique_ptr<LogDnsClient::AuditProofQuery> query; ASSERT_THAT(QueryAuditProof("ct.test", kLeafHashes[0], 123456, &query), IsError(net::ERR_INVALID_ARGUMENT)); } TEST_P(LogDnsClientTest, QueryAuditProofReportsInvalidArgIfLeafIndexGreaterThanTreeSize) { ASSERT_TRUE( mock_dns_.ExpectLeafIndexRequestAndResponse(kLeafIndexQnames[0], 999999)); std::unique_ptr<LogDnsClient::AuditProofQuery> query; ASSERT_THAT(QueryAuditProof("ct.test", kLeafHashes[0], 123456, &query), IsError(net::ERR_INVALID_ARGUMENT)); } TEST_P(LogDnsClientTest, QueryAuditProofReportsSocketErrorsDuringAuditProofRequests) { ASSERT_TRUE( mock_dns_.ExpectLeafIndexRequestAndResponse(kLeafIndexQnames[0], 123456)); ASSERT_TRUE(mock_dns_.ExpectRequestAndSocketError( "0.123456.999999.tree.ct.test.", net::ERR_CONNECTION_REFUSED)); std::unique_ptr<LogDnsClient::AuditProofQuery> query; ASSERT_THAT(QueryAuditProof("ct.test", kLeafHashes[0], 999999, &query), IsError(net::ERR_CONNECTION_REFUSED)); } TEST_P(LogDnsClientTest, QueryAuditProofReportsTimeoutsDuringAuditProofRequests) { ASSERT_TRUE( mock_dns_.ExpectLeafIndexRequestAndResponse(kLeafIndexQnames[0], 123456)); ASSERT_TRUE( mock_dns_.ExpectRequestAndTimeout("0.123456.999999.tree.ct.test.")); std::unique_ptr<LogDnsClient::AuditProofQuery> query; ASSERT_THAT(QueryAuditProof("ct.test", kLeafHashes[0], 999999, &query), IsError(net::ERR_DNS_TIMED_OUT)); } TEST_P(LogDnsClientTest, AdoptsLatestDnsConfigIfValid) { std::unique_ptr<net::DnsClient> tmp = mock_dns_.CreateDnsClient(); net::DnsClient* dns_client = tmp.get(); LogDnsClient log_client(std::move(tmp), net::NetLogWithSource(), 0); // Get the current DNS config, modify it and broadcast the update. net::DnsConfig config(*dns_client->GetConfig()); ASSERT_NE(123, config.attempts); config.attempts = 123; mock_dns_.SetDnsConfig(config); // Let the DNS config change propogate. base::RunLoop().RunUntilIdle(); EXPECT_EQ(123, dns_client->GetConfig()->attempts); } TEST_P(LogDnsClientTest, IgnoresLatestDnsConfigIfInvalid) { std::unique_ptr<net::DnsClient> tmp = mock_dns_.CreateDnsClient(); net::DnsClient* dns_client = tmp.get(); LogDnsClient log_client(std::move(tmp), net::NetLogWithSource(), 0); // Get the current DNS config, modify it and broadcast the update. net::DnsConfig config(*dns_client->GetConfig()); ASSERT_THAT(config.nameservers, Not(IsEmpty())); config.nameservers.clear(); // Makes config invalid mock_dns_.SetDnsConfig(config); // Let the DNS config change propogate. base::RunLoop().RunUntilIdle(); EXPECT_THAT(dns_client->GetConfig()->nameservers, Not(IsEmpty())); } // Test that changes to the DNS config after starting a query are adopted and // that the query is not disrupted. TEST_P(LogDnsClientTest, AdoptsLatestDnsConfigMidQuery) { const std::vector<std::string> audit_proof = GetSampleAuditProof(20); // Expect a leaf index query first, to map the leaf hash to a leaf index. ASSERT_TRUE( mock_dns_.ExpectLeafIndexRequestAndResponse(kLeafIndexQnames[0], 123456)); // It takes a number of DNS requests to retrieve the entire |audit_proof| // (see |kMaxProofNodesPerDnsResponse|). for (size_t nodes_begin = 0; nodes_begin < audit_proof.size(); nodes_begin += kMaxProofNodesPerDnsResponse) { const size_t nodes_end = std::min( nodes_begin + kMaxProofNodesPerDnsResponse, audit_proof.size()); ASSERT_TRUE(mock_dns_.ExpectAuditProofRequestAndResponse( base::StringPrintf("%zu.123456.999999.tree.ct.test.", nodes_begin), audit_proof.begin() + nodes_begin, audit_proof.begin() + nodes_end)); } std::unique_ptr<net::DnsClient> tmp = mock_dns_.CreateDnsClient(); net::DnsClient* dns_client = tmp.get(); LogDnsClient log_client(std::move(tmp), net::NetLogWithSource(), 0); // Start query. std::unique_ptr<LogDnsClient::AuditProofQuery> query; net::TestCompletionCallback callback; ASSERT_THAT(log_client.QueryAuditProof("ct.test", kLeafHashes[0], 999999, &query, callback.callback()), IsError(net::ERR_IO_PENDING)); // Get the current DNS config, modify it and publish the update. // The new config is distributed asynchronously via NetworkChangeNotifier. net::DnsConfig config(*dns_client->GetConfig()); ASSERT_NE(123, config.attempts); config.attempts = 123; mock_dns_.SetDnsConfig(config); // The new config is distributed asynchronously via NetworkChangeNotifier. // Config change shouldn't have taken effect yet. ASSERT_NE(123, dns_client->GetConfig()->attempts); // Wait for the query to complete, then check that it was successful. // The DNS config should be updated during this time. ASSERT_THAT(callback.WaitForResult(), IsOk()); const net::ct::MerkleAuditProof& proof = query->GetProof(); EXPECT_THAT(proof.leaf_index, Eq(123456u)); EXPECT_THAT(proof.tree_size, Eq(999999u)); EXPECT_THAT(proof.nodes, Eq(audit_proof)); // Check that the DNS config change was adopted. ASSERT_EQ(123, dns_client->GetConfig()->attempts); } TEST_P(LogDnsClientTest, CanPerformQueriesInParallel) { // Check that 3 queries can be performed in parallel. constexpr size_t kNumOfParallelQueries = 3; ASSERT_THAT(kNumOfParallelQueries, AllOf(Le(arraysize(kLeafIndexQnames)), Le(arraysize(kLeafIndices)), Le(arraysize(kTreeSizes)))) << "Not enough test data for this many parallel queries"; std::unique_ptr<LogDnsClient> log_client = CreateLogDnsClient(kNumOfParallelQueries); net::TestCompletionCallback callbacks[kNumOfParallelQueries]; // Expect multiple leaf index requests. for (size_t i = 0; i < kNumOfParallelQueries; ++i) { ASSERT_TRUE(mock_dns_.ExpectLeafIndexRequestAndResponse(kLeafIndexQnames[i], kLeafIndices[i])); } // Make each query require one more audit proof request than the last, by // increasing the number of nodes in the audit proof by // kMaxProofNodesPerDnsResponse for each query. This helps to test that // parallel queries do not intefere with each other, e.g. one query causing // another to end prematurely. std::vector<std::string> audit_proofs[kNumOfParallelQueries]; for (size_t query_i = 0; query_i < kNumOfParallelQueries; ++query_i) { const size_t dns_requests_required = query_i + 1; audit_proofs[query_i] = GetSampleAuditProof(dns_requests_required * kMaxProofNodesPerDnsResponse); } // The most DNS requests that are made by any of the above N queries is N. const size_t kMaxDnsRequestsPerQuery = kNumOfParallelQueries; // Setup expectations for up to N DNS requests per query performed. // All of the queries will be started at the same time, so expect the DNS // requests and responses to be interleaved. // NB: // Ideally, the tests wouldn't require that the DNS requests sent by the // parallel queries are interleaved. However, the mock socket framework does // not provide a way to express this. for (size_t dns_req_i = 0; dns_req_i < kMaxDnsRequestsPerQuery; ++dns_req_i) { for (size_t query_i = 0; query_i < kNumOfParallelQueries; ++query_i) { const std::vector<std::string>& proof = audit_proofs[query_i]; // Closed-open range of |proof| nodes that are expected in this response. const size_t start_node = dns_req_i * 7; const size_t end_node = std::min(start_node + kMaxProofNodesPerDnsResponse, proof.size()); // If there are any nodes left, expect another request and response. if (start_node < end_node) { ASSERT_TRUE(mock_dns_.ExpectAuditProofRequestAndResponse( base::StringPrintf("%zu.%" PRIu64 ".%" PRIu64 ".tree.ct.test.", start_node, kLeafIndices[query_i], kTreeSizes[query_i]), proof.begin() + start_node, proof.begin() + end_node)); } } } std::unique_ptr<LogDnsClient::AuditProofQuery> queries[kNumOfParallelQueries]; // Start the queries. for (size_t i = 0; i < kNumOfParallelQueries; ++i) { ASSERT_THAT( log_client->QueryAuditProof("ct.test", kLeafHashes[i], kTreeSizes[i], &queries[i], callbacks[i].callback()), IsError(net::ERR_IO_PENDING)) << "query #" << i; } // Wait for each query to complete and check its results. for (size_t i = 0; i < kNumOfParallelQueries; ++i) { net::TestCompletionCallback& callback = callbacks[i]; SCOPED_TRACE(testing::Message() << "callbacks[" << i << "]"); EXPECT_THAT(callback.WaitForResult(), IsOk()); const net::ct::MerkleAuditProof& proof = queries[i]->GetProof(); EXPECT_THAT(proof.leaf_index, Eq(kLeafIndices[i])); EXPECT_THAT(proof.tree_size, Eq(kTreeSizes[i])); EXPECT_THAT(proof.nodes, Eq(audit_proofs[i])); } } TEST_P(LogDnsClientTest, CanBeThrottledToOneQueryAtATime) { // Check that queries can be rate-limited to one at a time. // The second query, initiated while the first is in progress, should fail. const std::vector<std::string> audit_proof = GetSampleAuditProof(20); // Expect the first query to send leaf index and audit proof requests, but the // second should not due to throttling. ASSERT_TRUE( mock_dns_.ExpectLeafIndexRequestAndResponse(kLeafIndexQnames[0], 123456)); // It should require 3 requests to collect the entire audit proof, as there is // only space for 7 nodes per TXT record. One node is 32 bytes long and the // TXT RDATA can have a maximum length of 255 bytes (255 / 32). // Rate limiting should not interfere with these requests. ASSERT_TRUE(mock_dns_.ExpectAuditProofRequestAndResponse( "0.123456.999999.tree.ct.test.", audit_proof.begin(), audit_proof.begin() + 7)); ASSERT_TRUE(mock_dns_.ExpectAuditProofRequestAndResponse( "7.123456.999999.tree.ct.test.", audit_proof.begin() + 7, audit_proof.begin() + 14)); ASSERT_TRUE(mock_dns_.ExpectAuditProofRequestAndResponse( "14.123456.999999.tree.ct.test.", audit_proof.begin() + 14, audit_proof.end())); const size_t kMaxConcurrentQueries = 1; std::unique_ptr<LogDnsClient> log_client = CreateLogDnsClient(kMaxConcurrentQueries); // Try to start the queries. std::unique_ptr<LogDnsClient::AuditProofQuery> query1; net::TestCompletionCallback callback1; ASSERT_THAT(log_client->QueryAuditProof("ct.test", kLeafHashes[0], 999999, &query1, callback1.callback()), IsError(net::ERR_IO_PENDING)); std::unique_ptr<LogDnsClient::AuditProofQuery> query2; net::TestCompletionCallback callback2; ASSERT_THAT(log_client->QueryAuditProof("ct.test", kLeafHashes[1], 999999, &query2, callback2.callback()), IsError(net::ERR_TEMPORARILY_THROTTLED)); // Check that the first query succeeded. EXPECT_THAT(callback1.WaitForResult(), IsOk()); const net::ct::MerkleAuditProof& proof1 = query1->GetProof(); EXPECT_THAT(proof1.leaf_index, Eq(123456u)); EXPECT_THAT(proof1.tree_size, Eq(999999u)); EXPECT_THAT(proof1.nodes, Eq(audit_proof)); // Try a third query, which should succeed now that the first is finished. ASSERT_TRUE( mock_dns_.ExpectLeafIndexRequestAndResponse(kLeafIndexQnames[2], 666)); ASSERT_TRUE(mock_dns_.ExpectAuditProofRequestAndResponse( "0.666.999999.tree.ct.test.", audit_proof.begin(), audit_proof.begin() + 7)); ASSERT_TRUE(mock_dns_.ExpectAuditProofRequestAndResponse( "7.666.999999.tree.ct.test.", audit_proof.begin() + 7, audit_proof.begin() + 14)); ASSERT_TRUE(mock_dns_.ExpectAuditProofRequestAndResponse( "14.666.999999.tree.ct.test.", audit_proof.begin() + 14, audit_proof.end())); std::unique_ptr<LogDnsClient::AuditProofQuery> query3; net::TestCompletionCallback callback3; ASSERT_THAT(log_client->QueryAuditProof("ct.test", kLeafHashes[2], 999999, &query3, callback3.callback()), IsError(net::ERR_IO_PENDING)); // Check that the third query succeeded. EXPECT_THAT(callback3.WaitForResult(), IsOk()); const net::ct::MerkleAuditProof& proof3 = query3->GetProof(); EXPECT_THAT(proof3.leaf_index, Eq(666u)); EXPECT_THAT(proof3.tree_size, Eq(999999u)); EXPECT_THAT(proof3.nodes, Eq(audit_proof)); } TEST_P(LogDnsClientTest, NotifiesWhenNoLongerThrottled) { const std::vector<std::string> audit_proof = GetSampleAuditProof(20); ASSERT_TRUE( mock_dns_.ExpectLeafIndexRequestAndResponse(kLeafIndexQnames[0], 123456)); ASSERT_TRUE(mock_dns_.ExpectAuditProofRequestAndResponse( "0.123456.999999.tree.ct.test.", audit_proof.begin(), audit_proof.begin() + 7)); ASSERT_TRUE(mock_dns_.ExpectAuditProofRequestAndResponse( "7.123456.999999.tree.ct.test.", audit_proof.begin() + 7, audit_proof.begin() + 14)); ASSERT_TRUE(mock_dns_.ExpectAuditProofRequestAndResponse( "14.123456.999999.tree.ct.test.", audit_proof.begin() + 14, audit_proof.end())); const size_t kMaxConcurrentQueries = 1; std::unique_ptr<LogDnsClient> log_client = CreateLogDnsClient(kMaxConcurrentQueries); // Start a query. std::unique_ptr<LogDnsClient::AuditProofQuery> query1; net::TestCompletionCallback query_callback1; ASSERT_THAT(log_client->QueryAuditProof("ct.test", kLeafHashes[0], 999999, &query1, query_callback1.callback()), IsError(net::ERR_IO_PENDING)); net::TestClosure not_throttled_callback; log_client->NotifyWhenNotThrottled(not_throttled_callback.closure()); ASSERT_THAT(query_callback1.WaitForResult(), IsOk()); not_throttled_callback.WaitForResult(); // Start another query to check |not_throttled_callback| doesn't fire again. ASSERT_TRUE( mock_dns_.ExpectLeafIndexRequestAndResponse(kLeafIndexQnames[1], 666)); ASSERT_TRUE(mock_dns_.ExpectAuditProofRequestAndResponse( "0.666.999999.tree.ct.test.", audit_proof.begin(), audit_proof.begin() + 7)); ASSERT_TRUE(mock_dns_.ExpectAuditProofRequestAndResponse( "7.666.999999.tree.ct.test.", audit_proof.begin() + 7, audit_proof.begin() + 14)); ASSERT_TRUE(mock_dns_.ExpectAuditProofRequestAndResponse( "14.666.999999.tree.ct.test.", audit_proof.begin() + 14, audit_proof.end())); std::unique_ptr<LogDnsClient::AuditProofQuery> query2; net::TestCompletionCallback query_callback2; ASSERT_THAT(log_client->QueryAuditProof("ct.test", kLeafHashes[1], 999999, &query2, query_callback2.callback()), IsError(net::ERR_IO_PENDING)); // Give the query a chance to run. ASSERT_THAT(query_callback2.WaitForResult(), IsOk()); // Give |not_throttled_callback| a chance to run - it shouldn't though. base::RunLoop().RunUntilIdle(); ASSERT_FALSE(not_throttled_callback.have_result()); } TEST_P(LogDnsClientTest, CanCancelQueries) { const size_t kMaxConcurrentQueries = 1; std::unique_ptr<LogDnsClient> log_client = CreateLogDnsClient(kMaxConcurrentQueries); // Expect the first request of the query to be sent, but not the rest because // it'll be cancelled before it gets that far. ASSERT_TRUE( mock_dns_.ExpectLeafIndexRequestAndResponse(kLeafIndexQnames[0], 123456)); // Start query. std::unique_ptr<LogDnsClient::AuditProofQuery> query; net::TestCompletionCallback callback; ASSERT_THAT(log_client->QueryAuditProof("ct.test", kLeafHashes[0], 999999, &query, callback.callback()), IsError(net::ERR_IO_PENDING)); // Cancel the query. query.reset(); // Give |callback| a chance to run - it shouldn't though. base::RunLoop().RunUntilIdle(); ASSERT_FALSE(callback.have_result()); } INSTANTIATE_TEST_CASE_P(ReadMode, LogDnsClientTest, ::testing::Values(net::IoMode::ASYNC, net::IoMode::SYNCHRONOUS)); } // namespace certificate_transparency
{ "pile_set_name": "Github" }
{ "name": "@mostly-adequate/support", "version": "2.0.1", "description": "Support functions and data-structures from the Mostly Adequate Guide to Functional Programming", "license": "MIT", "main": "index.js", "repository": { "type": "git", "url": "https://github.com/MostlyAdequate/mostly-adequate-guide" }, "author": "@mostly-adequate", "bugs": { "url": "https://github.com/MostlyAdequate/mostly-adequate-guide/issues" }, "homepage": "https://github.com/MostlyAdequate/mostly-adequate-guide/support", "keywords": [ "functional programming", "mostly adequate", "guide", "fp" ], "dependencies": {}, "devDependencies": { "eslint": "^5.9.0", "eslint-config-airbnb": "^16.1.0", "eslint-plugin-import": "^2.8.0", "eslint-plugin-jsx-a11y": "^6.0.2", "eslint-plugin-react": "^7.5.1" }, "scripts": { "lint": "eslint ." } }
{ "pile_set_name": "Github" }
package com.salesforce.phoenix.filter; import java.util.Arrays; import java.util.Collection; import java.util.List; import junit.framework.TestCase; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.filter.Filter.ReturnCode; import org.apache.hadoop.hbase.util.Bytes; import org.junit.Test; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; import org.junit.runners.Parameterized.Parameters; import com.google.common.base.Function; import com.google.common.collect.Lists; import com.salesforce.phoenix.query.KeyRange; import com.salesforce.phoenix.query.QueryConstants; import com.salesforce.phoenix.schema.ColumnModifier; import com.salesforce.phoenix.schema.PDataType; import com.salesforce.phoenix.schema.PDatum; import com.salesforce.phoenix.schema.RowKeySchema.RowKeySchemaBuilder; import com.salesforce.phoenix.util.ByteUtil; //reset() //filterAllRemaining() -> true indicates scan is over, false, keep going on. //filterRowKey(byte[],int,int) -> true to drop this row, if false, we will also call //filterKeyValue(KeyValue) -> true to drop this key/value //filterRow(List) -> allows direct modification of the final list to be submitted //filterRow() -> last chance to drop entire row based on the sequence of filterValue() calls. Eg: filter a row if it doesn't contain a specified column. @RunWith(Parameterized.class) public class SkipScanFilterTest extends TestCase { private final SkipScanFilter skipper; private final List<List<KeyRange>> cnf; private final List<Expectation> expectations; public SkipScanFilterTest(List<List<KeyRange>> cnf, int[] widths, List<Expectation> expectations) { this.expectations = expectations; this.cnf = cnf; RowKeySchemaBuilder builder = new RowKeySchemaBuilder(widths.length); for (final int width : widths) { builder.addField( new PDatum() { @Override public boolean isNullable() { return width <= 0; } @Override public PDataType getDataType() { return width <= 0 ? PDataType.VARCHAR : PDataType.CHAR; } @Override public Integer getByteSize() { return width <= 0 ? null : width; } @Override public Integer getMaxLength() { return getByteSize(); } @Override public Integer getScale() { return null; } @Override public ColumnModifier getColumnModifier() { return null; } }, width <= 0, null); } skipper = new SkipScanFilter(cnf, builder.build()); } @Test public void test() { System.out.println("CNF: " + cnf + "\n" + "Expectations: " + expectations); for (Expectation expectation : expectations) { expectation.examine(skipper); } } @Parameters(name="{0} {1} {2}") public static Collection<Object> data() { List<Object> testCases = Lists.newArrayList(); testCases.addAll( foreach(new KeyRange[][]{{ PDataType.CHAR.getKeyRange(Bytes.toBytes("abc"), true, Bytes.toBytes("def"), true), PDataType.CHAR.getKeyRange(Bytes.toBytes("dzy"), false, Bytes.toBytes("xyz"), false), }, { PDataType.CHAR.getKeyRange(Bytes.toBytes("AA"), true, Bytes.toBytes("AB"), false), }, { PDataType.CHAR.getKeyRange(Bytes.toBytes("AA"), true, Bytes.toBytes("AB"), false), }, { PDataType.CHAR.getKeyRange(Bytes.toBytes("AA"), true, Bytes.toBytes("AB"), false), }, { PDataType.CHAR.getKeyRange(Bytes.toBytes("AA"), true, Bytes.toBytes("AB"), false), }}, new int[]{3,2,2,2,2}, //new SeekNext("abcABABABAB", "abdAAAAAAAA"), new SeekNext("defAAABABAB", "dzzAAAAAAAA"), new Finished("xyyABABABAB")) ); testCases.addAll( foreach(new KeyRange[][]{{ PDataType.VARCHAR.getKeyRange(Bytes.toBytes("j"), false, Bytes.toBytes("k"), true), }}, new int[]{0}, new SeekNext(Bytes.toBytes("a"), ByteUtil.nextKey(new byte[] {'j',QueryConstants.SEPARATOR_BYTE})), new Include("ja"), new Include("jz"), new Include("k"), new Finished("ka"))); testCases.addAll( foreach(new KeyRange[][]{{ PDataType.CHAR.getKeyRange(Bytes.toBytes("aaa"), true, Bytes.toBytes("aaa"), true), PDataType.CHAR.getKeyRange(Bytes.toBytes("aac"), true, Bytes.toBytes("aad"), true), PDataType.CHAR.getKeyRange(Bytes.toBytes("abc"), true, Bytes.toBytes("def"), true) }}, new int[]{3}, new SeekNext("aab", "aac"), new SeekNext("abb", "abc"), new Include("abc"), new Include("abe"), new Include("def"), new Finished("deg"))); testCases.addAll( foreach(new KeyRange[][]{{ PDataType.CHAR.getKeyRange(Bytes.toBytes("aaa"), true, Bytes.toBytes("aaa"), true), PDataType.CHAR.getKeyRange(Bytes.toBytes("abc"), false, Bytes.toBytes("def"), true) }}, new int[]{3}, new SeekNext("aba", "abd"), new Include("abe"), new Include("def"), new Finished("deg"))); testCases.addAll( foreach(new KeyRange[][]{{ PDataType.CHAR.getKeyRange(Bytes.toBytes("aaa"), true, Bytes.toBytes("aaa"), true), PDataType.CHAR.getKeyRange(Bytes.toBytes("abc"), false, Bytes.toBytes("def"), false) }}, new int[]{3}, new SeekNext("aba", "abd"), new Finished("def")) ); testCases.addAll( foreach(new KeyRange[][]{{ PDataType.CHAR.getKeyRange(Bytes.toBytes("abc"), true, Bytes.toBytes("def"), true), PDataType.CHAR.getKeyRange(Bytes.toBytes("dzy"), false, Bytes.toBytes("xyz"), false), }}, new int[]{3}, new Include("def"), new SeekNext("deg", "dzz"), new Include("eee"), new Finished("xyz")) ); testCases.addAll( foreach(new KeyRange[][]{{ PDataType.CHAR.getKeyRange(Bytes.toBytes("aaa"), true, Bytes.toBytes("aaa"), true), PDataType.CHAR.getKeyRange(Bytes.toBytes("abc"), true, Bytes.toBytes("abc"), true), PDataType.CHAR.getKeyRange(Bytes.toBytes("def"), true, Bytes.toBytes("def"), true), }, { PDataType.CHAR.getKeyRange(Bytes.toBytes("AB"), true, Bytes.toBytes("AX"), true), PDataType.CHAR.getKeyRange(Bytes.toBytes("EA"), false, Bytes.toBytes("EZ"), false), PDataType.CHAR.getKeyRange(Bytes.toBytes("PO"), true, Bytes.toBytes("PP"), false), }}, new int[]{3,2}, new Include("abcAB"), new SeekNext("abcAY","abcEB"), new Include("abcEF"), new SeekNext("abcPP","defAB"), new SeekNext("defEZ","defPO"), new Include("defPO"), new Finished("defPP") ) ); testCases.addAll( foreach(new KeyRange[][]{{ PDataType.CHAR.getKeyRange(Bytes.toBytes("AB"), true, Bytes.toBytes("AX"), true), PDataType.CHAR.getKeyRange(Bytes.toBytes("EA"), false, Bytes.toBytes("EZ"), false), PDataType.CHAR.getKeyRange(Bytes.toBytes("PO"), true, Bytes.toBytes("PP"), false), }, { PDataType.CHAR.getKeyRange(Bytes.toBytes("abc"), true, Bytes.toBytes("abc"), true), PDataType.CHAR.getKeyRange(Bytes.toBytes("def"), true, Bytes.toBytes("def"), true), }}, new int[]{2,3}, new Include("ABabc"), new SeekNext("ABdeg","ACabc"), new Include("AMabc"), new SeekNext("AYabc","EBabc"), new Include("EFabc"), new SeekNext("EZdef","POabc"), new SeekNext("POabd","POdef"), new Include("POdef"), new Finished("PPabc")) ); testCases.addAll( foreach(new KeyRange[][]{{ PDataType.CHAR.getKeyRange(Bytes.toBytes("PO"), true, Bytes.toBytes("PP"), false), }, { PDataType.CHAR.getKeyRange(Bytes.toBytes("def"), true, Bytes.toBytes("def"), true), }}, new int[]{2,3}, new Include("POdef"), new Finished("POdeg")) ); testCases.addAll( foreach(new KeyRange[][]{{ PDataType.CHAR.getKeyRange(Bytes.toBytes("PO"), true, Bytes.toBytes("PO"), true), }, { PDataType.CHAR.getKeyRange(Bytes.toBytes("def"), true, Bytes.toBytes("def"), true), }}, new int[]{2,3}, new Include("POdef")) ); testCases.addAll( foreach(new KeyRange[][]{{ PDataType.CHAR.getKeyRange(Bytes.toBytes("AAA"), true, Bytes.toBytes("AAA"), true), PDataType.CHAR.getKeyRange(Bytes.toBytes("abc"), true, Bytes.toBytes("def"), true), PDataType.CHAR.getKeyRange(Bytes.toBytes("dzy"), false, Bytes.toBytes("xyz"), false), }, { PDataType.CHAR.getKeyRange(Bytes.toBytes("AB"), true, Bytes.toBytes("AX"), true), PDataType.CHAR.getKeyRange(Bytes.toBytes("EA"), false, Bytes.toBytes("EZ"), false), PDataType.CHAR.getKeyRange(Bytes.toBytes("PO"), true, Bytes.toBytes("PP"), false), }}, new int[]{3,2}, new SeekNext("aaaAA", "abcAB"), new SeekNext("abcZZ", "abdAB"), new SeekNext("abdZZ", "abeAB"), new SeekNext(new byte[]{'d','e','a',(byte)0xFF,(byte)0xFF}, new byte[]{'d','e','b','A','B'}), new Include("defAB"), new Include("defAC"), new Include("defAW"), new Include("defAX"), new Include("defEB"), new Include("defPO"), new SeekNext("degAB", "dzzAB"), new Include("dzzAX"), new Include("dzzEY"), new SeekNext("dzzEZ", "dzzPO"), new Include("eeeAB"), new Include("eeeAC"), new SeekNext("eeeEA", "eeeEB"), new Include("eeeEF"), new SeekNext("eeeEZ","eeePO"), new Finished("xyzAA")) ); testCases.addAll( foreach(new KeyRange[][]{{ PDataType.CHAR.getKeyRange(Bytes.toBytes("aaa"), true, Bytes.toBytes("aaa"), true), PDataType.CHAR.getKeyRange(Bytes.toBytes("abc"), true, Bytes.toBytes("def"), true), PDataType.CHAR.getKeyRange(Bytes.toBytes("dzz"), true, Bytes.toBytes("xyz"), false), }}, new int[]{3}, new SeekNext("abb", "abc"), new Include("abc"), new Include("abe"), new Finished("xyz")) ); testCases.addAll( foreach(new KeyRange[][]{{ PDataType.CHAR.getKeyRange(Bytes.toBytes("abc"), true, Bytes.toBytes("def"), true), PDataType.CHAR.getKeyRange(Bytes.toBytes("dzy"), false, Bytes.toBytes("xyz"), false), }, { PDataType.CHAR.getKeyRange(Bytes.toBytes("AB"), true, Bytes.toBytes("AX"), true), PDataType.CHAR.getKeyRange(Bytes.toBytes("EA"), false, Bytes.toBytes("EZ"), false), PDataType.CHAR.getKeyRange(Bytes.toBytes("PO"), true, Bytes.toBytes("PP"), false), }, { PDataType.CHAR.getKeyRange(Bytes.toBytes("100"), true, Bytes.toBytes("250"), false), PDataType.CHAR.getKeyRange(Bytes.toBytes("700"), false, Bytes.toBytes("901"), false), }}, new int[]{3,2,3}, new SeekNext("abcEB700", "abcEB701"), new Include("abcEB701"), new SeekNext("dzzAB250", "dzzAB701"), new Finished("zzzAA000")) ); // TODO variable length columns // testCases.addAll( // foreach(new KeyRange[][]{{ // PDataType.CHAR.getKeyRange(Bytes.toBytes("apple"), true, Bytes.toBytes("lemon"), true), // PDataType.CHAR.getKeyRange(Bytes.toBytes("pear"), false, Bytes.toBytes("yam"), false), // }, // { // PDataType.CHAR.getKeyRange(Bytes.toBytes("AB"), true, Bytes.toBytes("AX"), true), // PDataType.CHAR.getKeyRange(Bytes.toBytes("EA"), false, Bytes.toBytes("EZ"), false), // PDataType.CHAR.getKeyRange(Bytes.toBytes("PO"), true, Bytes.toBytes("PP"), false), // }, // { // PDataType.CHAR.getKeyRange(Bytes.toBytes("100"), true, Bytes.toBytes("250"), false), // PDataType.CHAR.getKeyRange(Bytes.toBytes("700"), false, Bytes.toBytes("901"), false), // }}, // new int[]{3,3}) // ); return testCases; } private static Collection<?> foreach(KeyRange[][] ranges, int[] widths, Expectation... expectations) { List<List<KeyRange>> cnf = Lists.transform(Lists.newArrayList(ranges), ARRAY_TO_LIST); List<Object> ret = Lists.newArrayList(); ret.add(new Object[] {cnf, widths, Arrays.asList(expectations)} ); return ret; } private static final Function<KeyRange[], List<KeyRange>> ARRAY_TO_LIST = new Function<KeyRange[], List<KeyRange>>() { @Override public List<KeyRange> apply(KeyRange[] input) { return Lists.newArrayList(input); } }; static interface Expectation { void examine(SkipScanFilter skipper); } private static final class SeekNext implements Expectation { private final byte[] rowkey, hint; public SeekNext(String rowkey, String hint) { this.rowkey = Bytes.toBytes(rowkey); this.hint = Bytes.toBytes(hint); } public SeekNext(byte[] rowkey, byte[] hint) { this.rowkey = rowkey; this.hint = hint; } @Override public void examine(SkipScanFilter skipper) { KeyValue kv = KeyValue.createFirstOnRow(rowkey); skipper.reset(); assertFalse(skipper.filterAllRemaining()); assertFalse(skipper.filterRowKey(kv.getBuffer(), kv.getRowOffset(), kv.getRowLength())); assertEquals(ReturnCode.SEEK_NEXT_USING_HINT, skipper.filterKeyValue(kv)); assertEquals(KeyValue.createFirstOnRow(hint), skipper.getNextKeyHint(kv)); } @Override public String toString() { return "rowkey=" + Bytes.toStringBinary(rowkey)+", expected seek next using hint: " + Bytes.toStringBinary(hint); } } private static final class Include implements Expectation { private final byte[] rowkey; public Include(String rowkey) { this.rowkey = Bytes.toBytes(rowkey); } @Override public void examine(SkipScanFilter skipper) { KeyValue kv = KeyValue.createFirstOnRow(rowkey); skipper.reset(); assertFalse(skipper.filterAllRemaining()); assertFalse(skipper.filterRowKey(kv.getBuffer(), kv.getRowOffset(), kv.getRowLength())); assertEquals(kv.toString(), ReturnCode.INCLUDE, skipper.filterKeyValue(kv)); } @Override public String toString() { return "rowkey=" + Bytes.toStringBinary(rowkey)+", expected include"; } } private static final class Finished implements Expectation { private final byte[] rowkey; public Finished(String rowkey) { this.rowkey = Bytes.toBytes(rowkey); } @Override public void examine(SkipScanFilter skipper) { KeyValue kv = KeyValue.createFirstOnRow(rowkey); skipper.reset(); assertEquals(ReturnCode.NEXT_ROW,skipper.filterKeyValue(kv)); skipper.reset(); assertTrue(skipper.filterAllRemaining()); } @Override public String toString() { return "rowkey=" + Bytes.toStringBinary(rowkey)+", expected finished"; } } }
{ "pile_set_name": "Github" }
define([ 'logger', 'backbone', 'injector', 'marionette', 'vent', '#qt_core/controllers/api', 'commands/index', 'controllers/nav', 'controllers/initdata', 'routers/router', 'views/layout', '#beans/beans', "modernizr", 'fastclick', '#qt_core/controllers/config' ], function (logger, Backbone, injector,Marionette,vent,api,commandoPool,NavController,InitDataController,Router, LayoutView, beans, Modernizr, FastClick,Cfg) { 'use strict'; //Done FastClick.attach(document.body); console.error('WARN : mainLocale done in code'); injector.set(injector.cfg.currentMainLocale,'fr'); injector.set(injector.cfg.currentSubLocales,[]); //A déplacer!!!! api.setInterceptEndCallFunction(function(res) { try { // Get CSRF Token value from Cookie using jQuery function getCookie(name) { var cookieValue = null; if (document.cookie && document.cookie !== '') { var cookies = document.cookie.split(';'); for (var i = 0; i < cookies.length; i++) { var cookie = jQuery.trim(cookies[i]); // Does this cookie string begin with the name we want? if (cookie.substring(0, name.length + 1) === (name + '=')) { cookieValue = decodeURIComponent(cookie.substring(name.length + 1)); break; } } } return cookieValue; } var csrfToken = getCookie('csrftoken'); //var csrfToken = res.xhr.getResponseHeader("X-CSRF-TOKEN"); if (csrfToken) { //console.log("Ok CSRF : "+csrfToken); injector.set(injector.cfg.csrfToken,csrfToken); var apiController = injector.get(injector.cfg.apiController); apiController.removeHeader("X-CSRF-TOKEN"); apiController.addHeader("X-CSRF-TOKEN",csrfToken); } else { console.log('No CSRF Token?'); } } catch (e) { console.error("error while catching end call XHR : "+(e ? JSON.stringify(e) : "NULL?")); } }); //end déplacer var app = new Marionette.Application(); app.addInitializer(function () { this.commandoPool = commandoPool; //temp eric : trace when error this.commandoPool.commandError =function(error) { console.log('Error : '+error); if (error.stack) console.log(error.stack); }; window.quemaInjector = injector; window.quemaVent = vent; injector.set(injector.config['commando.pool'], commandoPool); this.apiController = api; injector.set(injector.config.api, api.api); injector.set(injector.config.apiController, api); }); return app; });
{ "pile_set_name": "Github" }
/** ****************************************************************************** * @file system_stm32f4xx.c * @author MCD Application Team * @version V1.4.0 * @date 04-August-2014 * @brief CMSIS Cortex-M4 Device Peripheral Access Layer System Source File. * This file contains the system clock configuration for STM32F4xx devices. * * 1. This file provides two functions and one global variable to be called from * user application: * - SystemInit(): Setups the system clock (System clock source, PLL Multiplier * and Divider factors, AHB/APBx prescalers and Flash settings), * depending on the configuration made in the clock xls tool. * This function is called at startup just after reset and * before branch to main program. This call is made inside * the "startup_stm32f4xx.s" file. * * - SystemCoreClock variable: Contains the core clock (HCLK), it can be used * by the user application to setup the SysTick * timer or configure other parameters. * * - SystemCoreClockUpdate(): Updates the variable SystemCoreClock and must * be called whenever the core clock is changed * during program execution. * * 2. After each device reset the HSI (16 MHz) is used as system clock source. * Then SystemInit() function is called, in "startup_stm32f4xx.s" file, to * configure the system clock before to branch to main program. * * 3. If the system clock source selected by user fails to startup, the SystemInit() * function will do nothing and HSI still used as system clock source. User can * add some code to deal with this issue inside the SetSysClock() function. * * 4. The default value of HSE crystal is set to 25MHz, refer to "HSE_VALUE" define * in "stm32f4xx.h" file. When HSE is used as system clock source, directly or * through PLL, and you are using different crystal you have to adapt the HSE * value to your own configuration. * * 5. This file configures the system clock as follows: *============================================================================= *============================================================================= * Supported STM32F40xxx/41xxx devices *----------------------------------------------------------------------------- * System Clock source | PLL (HSE) *----------------------------------------------------------------------------- * SYSCLK(Hz) | 168000000 *----------------------------------------------------------------------------- * HCLK(Hz) | 168000000 *----------------------------------------------------------------------------- * AHB Prescaler | 1 *----------------------------------------------------------------------------- * APB1 Prescaler | 4 *----------------------------------------------------------------------------- * APB2 Prescaler | 2 *----------------------------------------------------------------------------- * HSE Frequency(Hz) | 25000000 *----------------------------------------------------------------------------- * PLL_M | 25 *----------------------------------------------------------------------------- * PLL_N | 336 *----------------------------------------------------------------------------- * PLL_P | 2 *----------------------------------------------------------------------------- * PLL_Q | 7 *----------------------------------------------------------------------------- * PLLI2S_N | NA *----------------------------------------------------------------------------- * PLLI2S_R | NA *----------------------------------------------------------------------------- * I2S input clock | NA *----------------------------------------------------------------------------- * VDD(V) | 3.3 *----------------------------------------------------------------------------- * Main regulator output voltage | Scale1 mode *----------------------------------------------------------------------------- * Flash Latency(WS) | 5 *----------------------------------------------------------------------------- * Prefetch Buffer | ON *----------------------------------------------------------------------------- * Instruction cache | ON *----------------------------------------------------------------------------- * Data cache | ON *----------------------------------------------------------------------------- * Require 48MHz for USB OTG FS, | Disabled * SDIO and RNG clock | *----------------------------------------------------------------------------- *============================================================================= *============================================================================= * Supported STM32F42xxx/43xxx devices *----------------------------------------------------------------------------- * System Clock source | PLL (HSE) *----------------------------------------------------------------------------- * SYSCLK(Hz) | 180000000 *----------------------------------------------------------------------------- * HCLK(Hz) | 180000000 *----------------------------------------------------------------------------- * AHB Prescaler | 1 *----------------------------------------------------------------------------- * APB1 Prescaler | 4 *----------------------------------------------------------------------------- * APB2 Prescaler | 2 *----------------------------------------------------------------------------- * HSE Frequency(Hz) | 25000000 *----------------------------------------------------------------------------- * PLL_M | 25 *----------------------------------------------------------------------------- * PLL_N | 360 *----------------------------------------------------------------------------- * PLL_P | 2 *----------------------------------------------------------------------------- * PLL_Q | 7 *----------------------------------------------------------------------------- * PLLI2S_N | NA *----------------------------------------------------------------------------- * PLLI2S_R | NA *----------------------------------------------------------------------------- * I2S input clock | NA *----------------------------------------------------------------------------- * VDD(V) | 3.3 *----------------------------------------------------------------------------- * Main regulator output voltage | Scale1 mode *----------------------------------------------------------------------------- * Flash Latency(WS) | 5 *----------------------------------------------------------------------------- * Prefetch Buffer | ON *----------------------------------------------------------------------------- * Instruction cache | ON *----------------------------------------------------------------------------- * Data cache | ON *----------------------------------------------------------------------------- * Require 48MHz for USB OTG FS, | Disabled * SDIO and RNG clock | *----------------------------------------------------------------------------- *============================================================================= *============================================================================= * Supported STM32F401xx devices *----------------------------------------------------------------------------- * System Clock source | PLL (HSE) *----------------------------------------------------------------------------- * SYSCLK(Hz) | 84000000 *----------------------------------------------------------------------------- * HCLK(Hz) | 84000000 *----------------------------------------------------------------------------- * AHB Prescaler | 1 *----------------------------------------------------------------------------- * APB1 Prescaler | 2 *----------------------------------------------------------------------------- * APB2 Prescaler | 1 *----------------------------------------------------------------------------- * HSE Frequency(Hz) | 25000000 *----------------------------------------------------------------------------- * PLL_M | 25 *----------------------------------------------------------------------------- * PLL_N | 336 *----------------------------------------------------------------------------- * PLL_P | 4 *----------------------------------------------------------------------------- * PLL_Q | 7 *----------------------------------------------------------------------------- * PLLI2S_N | NA *----------------------------------------------------------------------------- * PLLI2S_R | NA *----------------------------------------------------------------------------- * I2S input clock | NA *----------------------------------------------------------------------------- * VDD(V) | 3.3 *----------------------------------------------------------------------------- * Main regulator output voltage | Scale1 mode *----------------------------------------------------------------------------- * Flash Latency(WS) | 2 *----------------------------------------------------------------------------- * Prefetch Buffer | ON *----------------------------------------------------------------------------- * Instruction cache | ON *----------------------------------------------------------------------------- * Data cache | ON *----------------------------------------------------------------------------- * Require 48MHz for USB OTG FS, | Disabled * SDIO and RNG clock | *----------------------------------------------------------------------------- *============================================================================= *============================================================================= * Supported STM32F411xx devices *----------------------------------------------------------------------------- * System Clock source | PLL (HSI) *----------------------------------------------------------------------------- * SYSCLK(Hz) | 100000000 *----------------------------------------------------------------------------- * HCLK(Hz) | 100000000 *----------------------------------------------------------------------------- * AHB Prescaler | 1 *----------------------------------------------------------------------------- * APB1 Prescaler | 2 *----------------------------------------------------------------------------- * APB2 Prescaler | 1 *----------------------------------------------------------------------------- * HSI Frequency(Hz) | 16000000 *----------------------------------------------------------------------------- * PLL_M | 16 *----------------------------------------------------------------------------- * PLL_N | 400 *----------------------------------------------------------------------------- * PLL_P | 4 *----------------------------------------------------------------------------- * PLL_Q | 7 *----------------------------------------------------------------------------- * PLLI2S_N | NA *----------------------------------------------------------------------------- * PLLI2S_R | NA *----------------------------------------------------------------------------- * I2S input clock | NA *----------------------------------------------------------------------------- * VDD(V) | 3.3 *----------------------------------------------------------------------------- * Main regulator output voltage | Scale1 mode *----------------------------------------------------------------------------- * Flash Latency(WS) | 3 *----------------------------------------------------------------------------- * Prefetch Buffer | ON *----------------------------------------------------------------------------- * Instruction cache | ON *----------------------------------------------------------------------------- * Data cache | ON *----------------------------------------------------------------------------- * Require 48MHz for USB OTG FS, | Disabled * SDIO and RNG clock | *----------------------------------------------------------------------------- *============================================================================= ****************************************************************************** * @attention * * <h2><center>&copy; COPYRIGHT 2013 STMicroelectronics</center></h2> * * Licensed under MCD-ST Liberty SW License Agreement V2, (the "License"); * You may not use this file except in compliance with the License. * You may obtain a copy of the License at: * * http://www.st.com/software_license_agreement_liberty_v2 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ****************************************************************************** */ /** @addtogroup CMSIS * @{ */ /** @addtogroup stm32f4xx_system * @{ */ /** @addtogroup STM32F4xx_System_Private_Includes * @{ */ #include "stm32f4xx.h" /** * @} */ /** @addtogroup STM32F4xx_System_Private_TypesDefinitions * @{ */ /** * @} */ /** @addtogroup STM32F4xx_System_Private_Defines * @{ */ /************************* Miscellaneous Configuration ************************/ /*!< Uncomment the following line if you need to use external SRAM or SDRAM mounted on STM324xG_EVAL/STM324x7I_EVAL/STM324x9I_EVAL boards as data memory */ #if defined (STM32F40_41xxx) || defined (STM32F427_437xx) || defined (STM32F429_439xx) /* #define DATA_IN_ExtSRAM */ #endif /* STM32F40_41xxx || STM32F427_437x || STM32F429_439xx */ #if defined (STM32F427_437xx) || defined (STM32F429_439xx) /* #define DATA_IN_ExtSDRAM */ #endif /* STM32F427_437x || STM32F429_439xx */ #if defined (STM32F411xE) /*!< Uncomment the following line if you need to clock the STM32F411xE by HSE Bypass through STLINK MCO pin of STM32F103 microcontroller. The frequency cannot be changed and is fixed at 8 MHz. Hardware configuration needed for Nucleo Board: – SB54, SB55 OFF – R35 removed – SB16, SB50 ON */ /* #define USE_HSE_BYPASS */ #if defined (USE_HSE_BYPASS) #define HSE_BYPASS_INPUT_FREQUENCY 8000000 #endif /* USE_HSE_BYPASS */ #endif /* STM32F411xE */ /*!< Uncomment the following line if you need to relocate your vector Table in Internal SRAM. */ /* #define VECT_TAB_SRAM */ #define VECT_TAB_OFFSET 0x00 /*!< Vector Table base offset field. This value must be a multiple of 0x200. */ /******************************************************************************/ /************************* PLL Parameters *************************************/ #if defined (STM32F40_41xxx) || defined (STM32F427_437xx) || defined (STM32F429_439xx) || defined (STM32F401xx) /* PLL_VCO = (HSE_VALUE or HSI_VALUE / PLL_M) * PLL_N */ #define PLL_M 8 #else /* STM32F411xE */ #if defined (USE_HSE_BYPASS) #define PLL_M 8 #else /* STM32F411xE */ #define PLL_M 16 #endif /* USE_HSE_BYPASS */ #endif /* STM32F40_41xxx || STM32F427_437xx || STM32F429_439xx || STM32F401xx */ /* USB OTG FS, SDIO and RNG Clock = PLL_VCO / PLLQ */ #define PLL_Q 7 #if defined (STM32F40_41xxx) #define PLL_N 336 /* SYSCLK = PLL_VCO / PLL_P */ #define PLL_P 2 #endif /* STM32F40_41xxx */ #if defined (STM32F427_437xx) || defined (STM32F429_439xx) #define PLL_N 360 /* SYSCLK = PLL_VCO / PLL_P */ #define PLL_P 2 #endif /* STM32F427_437x || STM32F429_439xx */ #if defined (STM32F401xx) #define PLL_N 336 /* SYSCLK = PLL_VCO / PLL_P */ #define PLL_P 4 #endif /* STM32F401xx */ #if defined (STM32F411xE) #define PLL_N 400 /* SYSCLK = PLL_VCO / PLL_P */ #define PLL_P 4 #endif /* STM32F411xx */ /******************************************************************************/ /** * @} */ /** @addtogroup STM32F4xx_System_Private_Macros * @{ */ /** * @} */ /** @addtogroup STM32F4xx_System_Private_Variables * @{ */ #if defined (STM32F40_41xxx) uint32_t SystemCoreClock = 168000000; #endif /* STM32F40_41xxx */ #if defined (STM32F427_437xx) || defined (STM32F429_439xx) uint32_t SystemCoreClock = 180000000; #endif /* STM32F427_437x || STM32F429_439xx */ #if defined (STM32F401xx) uint32_t SystemCoreClock = 84000000; #endif /* STM32F401xx */ #if defined (STM32F411xE) uint32_t SystemCoreClock = 100000000; #endif /* STM32F401xx */ __I uint8_t AHBPrescTable[16] = {0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 4, 6, 7, 8, 9}; /** * @} */ /** @addtogroup STM32F4xx_System_Private_FunctionPrototypes * @{ */ static void SetSysClock(void); #if defined (DATA_IN_ExtSRAM) || defined (DATA_IN_ExtSDRAM) static void SystemInit_ExtMemCtl(void); #endif /* DATA_IN_ExtSRAM || DATA_IN_ExtSDRAM */ /** * @} */ /** @addtogroup STM32F4xx_System_Private_Functions * @{ */ /** * @brief Setup the microcontroller system * Initialize the Embedded Flash Interface, the PLL and update the * SystemFrequency variable. * @param None * @retval None */ void SystemInit(void) { /* FPU settings ------------------------------------------------------------*/ #if (__FPU_PRESENT == 1) && (__FPU_USED == 1) SCB->CPACR |= ((3UL << 10*2)|(3UL << 11*2)); /* set CP10 and CP11 Full Access */ #endif /* Reset the RCC clock configuration to the default reset state ------------*/ /* Set HSION bit */ RCC->CR |= (uint32_t)0x00000001; /* Reset CFGR register */ RCC->CFGR = 0x00000000; /* Reset HSEON, CSSON and PLLON bits */ RCC->CR &= (uint32_t)0xFEF6FFFF; /* Reset PLLCFGR register */ RCC->PLLCFGR = 0x24003010; /* Reset HSEBYP bit */ RCC->CR &= (uint32_t)0xFFFBFFFF; /* Disable all interrupts */ RCC->CIR = 0x00000000; #if defined (DATA_IN_ExtSRAM) || defined (DATA_IN_ExtSDRAM) SystemInit_ExtMemCtl(); #endif /* DATA_IN_ExtSRAM || DATA_IN_ExtSDRAM */ /* Configure the System clock source, PLL Multiplier and Divider factors, AHB/APBx prescalers and Flash settings ----------------------------------*/ SetSysClock(); /* Configure the Vector Table location add offset address ------------------*/ #ifdef VECT_TAB_SRAM SCB->VTOR = SRAM_BASE | VECT_TAB_OFFSET; /* Vector Table Relocation in Internal SRAM */ #else SCB->VTOR = FLASH_BASE | VECT_TAB_OFFSET; /* Vector Table Relocation in Internal FLASH */ #endif } /** * @brief Update SystemCoreClock variable according to Clock Register Values. * The SystemCoreClock variable contains the core clock (HCLK), it can * be used by the user application to setup the SysTick timer or configure * other parameters. * * @note Each time the core clock (HCLK) changes, this function must be called * to update SystemCoreClock variable value. Otherwise, any configuration * based on this variable will be incorrect. * * @note - The system frequency computed by this function is not the real * frequency in the chip. It is calculated based on the predefined * constant and the selected clock source: * * - If SYSCLK source is HSI, SystemCoreClock will contain the HSI_VALUE(*) * * - If SYSCLK source is HSE, SystemCoreClock will contain the HSE_VALUE(**) * * - If SYSCLK source is PLL, SystemCoreClock will contain the HSE_VALUE(**) * or HSI_VALUE(*) multiplied/divided by the PLL factors. * * (*) HSI_VALUE is a constant defined in stm32f4xx.h file (default value * 16 MHz) but the real value may vary depending on the variations * in voltage and temperature. * * (**) HSE_VALUE is a constant defined in stm32f4xx.h file (default value * 25 MHz), user has to ensure that HSE_VALUE is same as the real * frequency of the crystal used. Otherwise, this function may * have wrong result. * * - The result of this function could be not correct when using fractional * value for HSE crystal. * * @param None * @retval None */ void SystemCoreClockUpdate(void) { uint32_t tmp = 0, pllvco = 0, pllp = 2, pllsource = 0, pllm = 2; /* Get SYSCLK source -------------------------------------------------------*/ tmp = RCC->CFGR & RCC_CFGR_SWS; switch (tmp) { case 0x00: /* HSI used as system clock source */ SystemCoreClock = HSI_VALUE; break; case 0x04: /* HSE used as system clock source */ SystemCoreClock = HSE_VALUE; break; case 0x08: /* PLL used as system clock source */ /* PLL_VCO = (HSE_VALUE or HSI_VALUE / PLL_M) * PLL_N SYSCLK = PLL_VCO / PLL_P */ pllsource = (RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) >> 22; pllm = RCC->PLLCFGR & RCC_PLLCFGR_PLLM; #if defined (STM32F40_41xxx) || defined (STM32F427_437xx) || defined (STM32F429_439xx) || defined (STM32F401xx) if (pllsource != 0) { /* HSE used as PLL clock source */ pllvco = (HSE_VALUE / pllm) * ((RCC->PLLCFGR & RCC_PLLCFGR_PLLN) >> 6); } else { /* HSI used as PLL clock source */ pllvco = (HSI_VALUE / pllm) * ((RCC->PLLCFGR & RCC_PLLCFGR_PLLN) >> 6); } #elif defined (STM32F411xE) #if defined (USE_HSE_BYPASS) if (pllsource != 0) { /* HSE used as PLL clock source */ pllvco = (HSE_BYPASS_INPUT_FREQUENCY / pllm) * ((RCC->PLLCFGR & RCC_PLLCFGR_PLLN) >> 6); } #else if (pllsource == 0) { /* HSI used as PLL clock source */ pllvco = (HSI_VALUE / pllm) * ((RCC->PLLCFGR & RCC_PLLCFGR_PLLN) >> 6); } #endif /* USE_HSE_BYPASS */ #endif /* STM32F40_41xxx || STM32F427_437xx || STM32F429_439xx || STM32F401xx */ pllp = (((RCC->PLLCFGR & RCC_PLLCFGR_PLLP) >>16) + 1 ) *2; SystemCoreClock = pllvco/pllp; break; default: SystemCoreClock = HSI_VALUE; break; } /* Compute HCLK frequency --------------------------------------------------*/ /* Get HCLK prescaler */ tmp = AHBPrescTable[((RCC->CFGR & RCC_CFGR_HPRE) >> 4)]; /* HCLK frequency */ SystemCoreClock >>= tmp; } /** * @brief Configures the System clock source, PLL Multiplier and Divider factors, * AHB/APBx prescalers and Flash settings * @Note This function should be called only once the RCC clock configuration * is reset to the default reset state (done in SystemInit() function). * @param None * @retval None */ static void SetSysClock(void) { #if defined (STM32F40_41xxx) || defined (STM32F427_437xx) || defined (STM32F429_439xx) || defined (STM32F401xx) /******************************************************************************/ /* PLL (clocked by HSE) used as System clock source */ /******************************************************************************/ __IO uint32_t StartUpCounter = 0, HSEStatus = 0; /* Enable HSE */ RCC->CR |= ((uint32_t)RCC_CR_HSEON); /* Wait till HSE is ready and if Time out is reached exit */ do { HSEStatus = RCC->CR & RCC_CR_HSERDY; StartUpCounter++; } while((HSEStatus == 0) && (StartUpCounter != HSE_STARTUP_TIMEOUT)); if ((RCC->CR & RCC_CR_HSERDY) != RESET) { HSEStatus = (uint32_t)0x01; } else { HSEStatus = (uint32_t)0x00; } if (HSEStatus == (uint32_t)0x01) { /* Select regulator voltage output Scale 1 mode */ RCC->APB1ENR |= RCC_APB1ENR_PWREN; PWR->CR |= PWR_CR_VOS; /* HCLK = SYSCLK / 1*/ RCC->CFGR |= RCC_CFGR_HPRE_DIV1; #if defined (STM32F40_41xxx) || defined (STM32F427_437xx) || defined (STM32F429_439xx) /* PCLK2 = HCLK / 2*/ RCC->CFGR |= RCC_CFGR_PPRE2_DIV2; /* PCLK1 = HCLK / 4*/ RCC->CFGR |= RCC_CFGR_PPRE1_DIV4; #endif /* STM32F40_41xxx || STM32F427_437x || STM32F429_439xx */ #if defined (STM32F401xx) /* PCLK2 = HCLK / 2*/ RCC->CFGR |= RCC_CFGR_PPRE2_DIV1; /* PCLK1 = HCLK / 4*/ RCC->CFGR |= RCC_CFGR_PPRE1_DIV2; #endif /* STM32F401xx */ /* Configure the main PLL */ RCC->PLLCFGR = PLL_M | (PLL_N << 6) | (((PLL_P >> 1) -1) << 16) | (RCC_PLLCFGR_PLLSRC_HSE) | (PLL_Q << 24); /* Enable the main PLL */ RCC->CR |= RCC_CR_PLLON; /* Wait till the main PLL is ready */ while((RCC->CR & RCC_CR_PLLRDY) == 0) { } #if defined (STM32F427_437xx) || defined (STM32F429_439xx) /* Enable the Over-drive to extend the clock frequency to 180 Mhz */ PWR->CR |= PWR_CR_ODEN; while((PWR->CSR & PWR_CSR_ODRDY) == 0) { } PWR->CR |= PWR_CR_ODSWEN; while((PWR->CSR & PWR_CSR_ODSWRDY) == 0) { } /* Configure Flash prefetch, Instruction cache, Data cache and wait state */ FLASH->ACR = FLASH_ACR_PRFTEN | FLASH_ACR_ICEN |FLASH_ACR_DCEN |FLASH_ACR_LATENCY_5WS; #endif /* STM32F427_437x || STM32F429_439xx */ #if defined (STM32F40_41xxx) /* Configure Flash prefetch, Instruction cache, Data cache and wait state */ FLASH->ACR = FLASH_ACR_PRFTEN | FLASH_ACR_ICEN |FLASH_ACR_DCEN |FLASH_ACR_LATENCY_5WS; #endif /* STM32F40_41xxx */ #if defined (STM32F401xx) /* Configure Flash prefetch, Instruction cache, Data cache and wait state */ FLASH->ACR = FLASH_ACR_PRFTEN | FLASH_ACR_ICEN |FLASH_ACR_DCEN |FLASH_ACR_LATENCY_2WS; #endif /* STM32F401xx */ /* Select the main PLL as system clock source */ RCC->CFGR &= (uint32_t)((uint32_t)~(RCC_CFGR_SW)); RCC->CFGR |= RCC_CFGR_SW_PLL; /* Wait till the main PLL is used as system clock source */ while ((RCC->CFGR & (uint32_t)RCC_CFGR_SWS ) != RCC_CFGR_SWS_PLL); { } } else { /* If HSE fails to start-up, the application will have wrong clock configuration. User can add here some code to deal with this error */ } #elif defined (STM32F411xE) #if defined (USE_HSE_BYPASS) /******************************************************************************/ /* PLL (clocked by HSE) used as System clock source */ /******************************************************************************/ __IO uint32_t StartUpCounter = 0, HSEStatus = 0; /* Enable HSE and HSE BYPASS */ RCC->CR |= ((uint32_t)RCC_CR_HSEON | RCC_CR_HSEBYP); /* Wait till HSE is ready and if Time out is reached exit */ do { HSEStatus = RCC->CR & RCC_CR_HSERDY; StartUpCounter++; } while((HSEStatus == 0) && (StartUpCounter != HSE_STARTUP_TIMEOUT)); if ((RCC->CR & RCC_CR_HSERDY) != RESET) { HSEStatus = (uint32_t)0x01; } else { HSEStatus = (uint32_t)0x00; } if (HSEStatus == (uint32_t)0x01) { /* Select regulator voltage output Scale 1 mode */ RCC->APB1ENR |= RCC_APB1ENR_PWREN; PWR->CR |= PWR_CR_VOS; /* HCLK = SYSCLK / 1*/ RCC->CFGR |= RCC_CFGR_HPRE_DIV1; /* PCLK2 = HCLK / 2*/ RCC->CFGR |= RCC_CFGR_PPRE2_DIV1; /* PCLK1 = HCLK / 4*/ RCC->CFGR |= RCC_CFGR_PPRE1_DIV2; /* Configure the main PLL */ RCC->PLLCFGR = PLL_M | (PLL_N << 6) | (((PLL_P >> 1) -1) << 16) | (RCC_PLLCFGR_PLLSRC_HSE) | (PLL_Q << 24); /* Enable the main PLL */ RCC->CR |= RCC_CR_PLLON; /* Wait till the main PLL is ready */ while((RCC->CR & RCC_CR_PLLRDY) == 0) { } /* Configure Flash prefetch, Instruction cache, Data cache and wait state */ FLASH->ACR = FLASH_ACR_PRFTEN | FLASH_ACR_ICEN |FLASH_ACR_DCEN |FLASH_ACR_LATENCY_2WS; /* Select the main PLL as system clock source */ RCC->CFGR &= (uint32_t)((uint32_t)~(RCC_CFGR_SW)); RCC->CFGR |= RCC_CFGR_SW_PLL; /* Wait till the main PLL is used as system clock source */ while ((RCC->CFGR & (uint32_t)RCC_CFGR_SWS ) != RCC_CFGR_SWS_PLL); { } } else { /* If HSE fails to start-up, the application will have wrong clock configuration. User can add here some code to deal with this error */ } #else /* HSI will be used as PLL clock source */ /* Select regulator voltage output Scale 1 mode */ RCC->APB1ENR |= RCC_APB1ENR_PWREN; PWR->CR |= PWR_CR_VOS; /* HCLK = SYSCLK / 1*/ RCC->CFGR |= RCC_CFGR_HPRE_DIV1; /* PCLK2 = HCLK / 2*/ RCC->CFGR |= RCC_CFGR_PPRE2_DIV1; /* PCLK1 = HCLK / 4*/ RCC->CFGR |= RCC_CFGR_PPRE1_DIV2; /* Configure the main PLL */ RCC->PLLCFGR = PLL_M | (PLL_N << 6) | (((PLL_P >> 1) -1) << 16) | (PLL_Q << 24); /* Enable the main PLL */ RCC->CR |= RCC_CR_PLLON; /* Wait till the main PLL is ready */ while((RCC->CR & RCC_CR_PLLRDY) == 0) { } /* Configure Flash prefetch, Instruction cache, Data cache and wait state */ FLASH->ACR = FLASH_ACR_PRFTEN | FLASH_ACR_ICEN |FLASH_ACR_DCEN |FLASH_ACR_LATENCY_2WS; /* Select the main PLL as system clock source */ RCC->CFGR &= (uint32_t)((uint32_t)~(RCC_CFGR_SW)); RCC->CFGR |= RCC_CFGR_SW_PLL; /* Wait till the main PLL is used as system clock source */ while ((RCC->CFGR & (uint32_t)RCC_CFGR_SWS ) != RCC_CFGR_SWS_PLL); { } #endif /* USE_HSE_BYPASS */ #endif /* STM32F40_41xxx || STM32F427_437xx || STM32F429_439xx || STM32F401xx */ } /** * @brief Setup the external memory controller. Called in startup_stm32f4xx.s * before jump to __main * @param None * @retval None */ #ifdef DATA_IN_ExtSRAM /** * @brief Setup the external memory controller. * Called in startup_stm32f4xx.s before jump to main. * This function configures the external SRAM mounted on STM324xG_EVAL/STM324x7I boards * This SRAM will be used as program data memory (including heap and stack). * @param None * @retval None */ void SystemInit_ExtMemCtl(void) { /*-- GPIOs Configuration -----------------------------------------------------*/ /* +-------------------+--------------------+------------------+--------------+ + SRAM pins assignment + +-------------------+--------------------+------------------+--------------+ | PD0 <-> FMC_D2 | PE0 <-> FMC_NBL0 | PF0 <-> FMC_A0 | PG0 <-> FMC_A10 | | PD1 <-> FMC_D3 | PE1 <-> FMC_NBL1 | PF1 <-> FMC_A1 | PG1 <-> FMC_A11 | | PD4 <-> FMC_NOE | PE3 <-> FMC_A19 | PF2 <-> FMC_A2 | PG2 <-> FMC_A12 | | PD5 <-> FMC_NWE | PE4 <-> FMC_A20 | PF3 <-> FMC_A3 | PG3 <-> FMC_A13 | | PD8 <-> FMC_D13 | PE7 <-> FMC_D4 | PF4 <-> FMC_A4 | PG4 <-> FMC_A14 | | PD9 <-> FMC_D14 | PE8 <-> FMC_D5 | PF5 <-> FMC_A5 | PG5 <-> FMC_A15 | | PD10 <-> FMC_D15 | PE9 <-> FMC_D6 | PF12 <-> FMC_A6 | PG9 <-> FMC_NE2 | | PD11 <-> FMC_A16 | PE10 <-> FMC_D7 | PF13 <-> FMC_A7 |-----------------+ | PD12 <-> FMC_A17 | PE11 <-> FMC_D8 | PF14 <-> FMC_A8 | | PD13 <-> FMC_A18 | PE12 <-> FMC_D9 | PF15 <-> FMC_A9 | | PD14 <-> FMC_D0 | PE13 <-> FMC_D10 |-----------------+ | PD15 <-> FMC_D1 | PE14 <-> FMC_D11 | | | PE15 <-> FMC_D12 | +------------------+------------------+ */ /* Enable GPIOD, GPIOE, GPIOF and GPIOG interface clock */ RCC->AHB1ENR |= 0x00000078; /* Connect PDx pins to FMC Alternate function */ GPIOD->AFR[0] = 0x00cc00cc; GPIOD->AFR[1] = 0xcccccccc; /* Configure PDx pins in Alternate function mode */ GPIOD->MODER = 0xaaaa0a0a; /* Configure PDx pins speed to 100 MHz */ GPIOD->OSPEEDR = 0xffff0f0f; /* Configure PDx pins Output type to push-pull */ GPIOD->OTYPER = 0x00000000; /* No pull-up, pull-down for PDx pins */ GPIOD->PUPDR = 0x00000000; /* Connect PEx pins to FMC Alternate function */ GPIOE->AFR[0] = 0xcccccccc; GPIOE->AFR[1] = 0xcccccccc; /* Configure PEx pins in Alternate function mode */ GPIOE->MODER = 0xaaaaaaaa; /* Configure PEx pins speed to 100 MHz */ GPIOE->OSPEEDR = 0xffffffff; /* Configure PEx pins Output type to push-pull */ GPIOE->OTYPER = 0x00000000; /* No pull-up, pull-down for PEx pins */ GPIOE->PUPDR = 0x00000000; /* Connect PFx pins to FMC Alternate function */ GPIOF->AFR[0] = 0x00cccccc; GPIOF->AFR[1] = 0xcccc0000; /* Configure PFx pins in Alternate function mode */ GPIOF->MODER = 0xaa000aaa; /* Configure PFx pins speed to 100 MHz */ GPIOF->OSPEEDR = 0xff000fff; /* Configure PFx pins Output type to push-pull */ GPIOF->OTYPER = 0x00000000; /* No pull-up, pull-down for PFx pins */ GPIOF->PUPDR = 0x00000000; /* Connect PGx pins to FMC Alternate function */ GPIOG->AFR[0] = 0x00cccccc; GPIOG->AFR[1] = 0x000000c0; /* Configure PGx pins in Alternate function mode */ GPIOG->MODER = 0x00080aaa; /* Configure PGx pins speed to 100 MHz */ GPIOG->OSPEEDR = 0x000c0fff; /* Configure PGx pins Output type to push-pull */ GPIOG->OTYPER = 0x00000000; /* No pull-up, pull-down for PGx pins */ GPIOG->PUPDR = 0x00000000; /*-- FMC Configuration ------------------------------------------------------*/ /* Enable the FMC/FSMC interface clock */ RCC->AHB3ENR |= 0x00000001; #if defined (STM32F427_437xx) || defined (STM32F429_439xx) /* Configure and enable Bank1_SRAM2 */ FMC_Bank1->BTCR[2] = 0x00001011; FMC_Bank1->BTCR[3] = 0x00000201; FMC_Bank1E->BWTR[2] = 0x0fffffff; #endif /* STM32F427_437xx || STM32F429_439xx */ #if defined (STM32F40_41xxx) /* Configure and enable Bank1_SRAM2 */ FSMC_Bank1->BTCR[2] = 0x00001011; FSMC_Bank1->BTCR[3] = 0x00000201; FSMC_Bank1E->BWTR[2] = 0x0fffffff; #endif /* STM32F40_41xxx */ /* Bank1_SRAM2 is configured as follow: In case of FSMC configuration NORSRAMTimingStructure.FSMC_AddressSetupTime = 1; NORSRAMTimingStructure.FSMC_AddressHoldTime = 0; NORSRAMTimingStructure.FSMC_DataSetupTime = 2; NORSRAMTimingStructure.FSMC_BusTurnAroundDuration = 0; NORSRAMTimingStructure.FSMC_CLKDivision = 0; NORSRAMTimingStructure.FSMC_DataLatency = 0; NORSRAMTimingStructure.FSMC_AccessMode = FMC_AccessMode_A; FSMC_NORSRAMInitStructure.FSMC_Bank = FSMC_Bank1_NORSRAM2; FSMC_NORSRAMInitStructure.FSMC_DataAddressMux = FSMC_DataAddressMux_Disable; FSMC_NORSRAMInitStructure.FSMC_MemoryType = FSMC_MemoryType_SRAM; FSMC_NORSRAMInitStructure.FSMC_MemoryDataWidth = FSMC_MemoryDataWidth_16b; FSMC_NORSRAMInitStructure.FSMC_BurstAccessMode = FSMC_BurstAccessMode_Disable; FSMC_NORSRAMInitStructure.FSMC_AsynchronousWait = FSMC_AsynchronousWait_Disable; FSMC_NORSRAMInitStructure.FSMC_WaitSignalPolarity = FSMC_WaitSignalPolarity_Low; FSMC_NORSRAMInitStructure.FSMC_WrapMode = FSMC_WrapMode_Disable; FSMC_NORSRAMInitStructure.FSMC_WaitSignalActive = FSMC_WaitSignalActive_BeforeWaitState; FSMC_NORSRAMInitStructure.FSMC_WriteOperation = FSMC_WriteOperation_Enable; FSMC_NORSRAMInitStructure.FSMC_WaitSignal = FSMC_WaitSignal_Disable; FSMC_NORSRAMInitStructure.FSMC_ExtendedMode = FSMC_ExtendedMode_Disable; FSMC_NORSRAMInitStructure.FSMC_WriteBurst = FSMC_WriteBurst_Disable; FSMC_NORSRAMInitStructure.FSMC_ReadWriteTimingStruct = &NORSRAMTimingStructure; FSMC_NORSRAMInitStructure.FSMC_WriteTimingStruct = &NORSRAMTimingStructure; In case of FMC configuration NORSRAMTimingStructure.FMC_AddressSetupTime = 1; NORSRAMTimingStructure.FMC_AddressHoldTime = 0; NORSRAMTimingStructure.FMC_DataSetupTime = 2; NORSRAMTimingStructure.FMC_BusTurnAroundDuration = 0; NORSRAMTimingStructure.FMC_CLKDivision = 0; NORSRAMTimingStructure.FMC_DataLatency = 0; NORSRAMTimingStructure.FMC_AccessMode = FMC_AccessMode_A; FMC_NORSRAMInitStructure.FMC_Bank = FMC_Bank1_NORSRAM2; FMC_NORSRAMInitStructure.FMC_DataAddressMux = FMC_DataAddressMux_Disable; FMC_NORSRAMInitStructure.FMC_MemoryType = FMC_MemoryType_SRAM; FMC_NORSRAMInitStructure.FMC_MemoryDataWidth = FMC_MemoryDataWidth_16b; FMC_NORSRAMInitStructure.FMC_BurstAccessMode = FMC_BurstAccessMode_Disable; FMC_NORSRAMInitStructure.FMC_AsynchronousWait = FMC_AsynchronousWait_Disable; FMC_NORSRAMInitStructure.FMC_WaitSignalPolarity = FMC_WaitSignalPolarity_Low; FMC_NORSRAMInitStructure.FMC_WrapMode = FMC_WrapMode_Disable; FMC_NORSRAMInitStructure.FMC_WaitSignalActive = FMC_WaitSignalActive_BeforeWaitState; FMC_NORSRAMInitStructure.FMC_WriteOperation = FMC_WriteOperation_Enable; FMC_NORSRAMInitStructure.FMC_WaitSignal = FMC_WaitSignal_Disable; FMC_NORSRAMInitStructure.FMC_ExtendedMode = FMC_ExtendedMode_Disable; FMC_NORSRAMInitStructure.FMC_WriteBurst = FMC_WriteBurst_Disable; FMC_NORSRAMInitStructure.FMC_ContinousClock = FMC_CClock_SyncOnly; FMC_NORSRAMInitStructure.FMC_ReadWriteTimingStruct = &NORSRAMTimingStructure; FMC_NORSRAMInitStructure.FMC_WriteTimingStruct = &NORSRAMTimingStructure; */ } #endif /* DATA_IN_ExtSRAM */ #ifdef DATA_IN_ExtSDRAM /** * @brief Setup the external memory controller. * Called in startup_stm32f4xx.s before jump to main. * This function configures the external SDRAM mounted on STM324x9I_EVAL board * This SDRAM will be used as program data memory (including heap and stack). * @param None * @retval None */ void SystemInit_ExtMemCtl(void) { register uint32_t tmpreg = 0, timeout = 0xFFFF; register uint32_t index; /* Enable GPIOC, GPIOD, GPIOE, GPIOF, GPIOG, GPIOH and GPIOI interface clock */ RCC->AHB1ENR |= 0x000001FC; /* Connect PCx pins to FMC Alternate function */ GPIOC->AFR[0] = 0x0000000c; GPIOC->AFR[1] = 0x00007700; /* Configure PCx pins in Alternate function mode */ GPIOC->MODER = 0x00a00002; /* Configure PCx pins speed to 50 MHz */ GPIOC->OSPEEDR = 0x00a00002; /* Configure PCx pins Output type to push-pull */ GPIOC->OTYPER = 0x00000000; /* No pull-up, pull-down for PCx pins */ GPIOC->PUPDR = 0x00500000; /* Connect PDx pins to FMC Alternate function */ GPIOD->AFR[0] = 0x000000CC; GPIOD->AFR[1] = 0xCC000CCC; /* Configure PDx pins in Alternate function mode */ GPIOD->MODER = 0xA02A000A; /* Configure PDx pins speed to 50 MHz */ GPIOD->OSPEEDR = 0xA02A000A; /* Configure PDx pins Output type to push-pull */ GPIOD->OTYPER = 0x00000000; /* No pull-up, pull-down for PDx pins */ GPIOD->PUPDR = 0x00000000; /* Connect PEx pins to FMC Alternate function */ GPIOE->AFR[0] = 0xC00000CC; GPIOE->AFR[1] = 0xCCCCCCCC; /* Configure PEx pins in Alternate function mode */ GPIOE->MODER = 0xAAAA800A; /* Configure PEx pins speed to 50 MHz */ GPIOE->OSPEEDR = 0xAAAA800A; /* Configure PEx pins Output type to push-pull */ GPIOE->OTYPER = 0x00000000; /* No pull-up, pull-down for PEx pins */ GPIOE->PUPDR = 0x00000000; /* Connect PFx pins to FMC Alternate function */ GPIOF->AFR[0] = 0xcccccccc; GPIOF->AFR[1] = 0xcccccccc; /* Configure PFx pins in Alternate function mode */ GPIOF->MODER = 0xAA800AAA; /* Configure PFx pins speed to 50 MHz */ GPIOF->OSPEEDR = 0xAA800AAA; /* Configure PFx pins Output type to push-pull */ GPIOF->OTYPER = 0x00000000; /* No pull-up, pull-down for PFx pins */ GPIOF->PUPDR = 0x00000000; /* Connect PGx pins to FMC Alternate function */ GPIOG->AFR[0] = 0xcccccccc; GPIOG->AFR[1] = 0xcccccccc; /* Configure PGx pins in Alternate function mode */ GPIOG->MODER = 0xaaaaaaaa; /* Configure PGx pins speed to 50 MHz */ GPIOG->OSPEEDR = 0xaaaaaaaa; /* Configure PGx pins Output type to push-pull */ GPIOG->OTYPER = 0x00000000; /* No pull-up, pull-down for PGx pins */ GPIOG->PUPDR = 0x00000000; /* Connect PHx pins to FMC Alternate function */ GPIOH->AFR[0] = 0x00C0CC00; GPIOH->AFR[1] = 0xCCCCCCCC; /* Configure PHx pins in Alternate function mode */ GPIOH->MODER = 0xAAAA08A0; /* Configure PHx pins speed to 50 MHz */ GPIOH->OSPEEDR = 0xAAAA08A0; /* Configure PHx pins Output type to push-pull */ GPIOH->OTYPER = 0x00000000; /* No pull-up, pull-down for PHx pins */ GPIOH->PUPDR = 0x00000000; /* Connect PIx pins to FMC Alternate function */ GPIOI->AFR[0] = 0xCCCCCCCC; GPIOI->AFR[1] = 0x00000CC0; /* Configure PIx pins in Alternate function mode */ GPIOI->MODER = 0x0028AAAA; /* Configure PIx pins speed to 50 MHz */ GPIOI->OSPEEDR = 0x0028AAAA; /* Configure PIx pins Output type to push-pull */ GPIOI->OTYPER = 0x00000000; /* No pull-up, pull-down for PIx pins */ GPIOI->PUPDR = 0x00000000; /*-- FMC Configuration ------------------------------------------------------*/ /* Enable the FMC interface clock */ RCC->AHB3ENR |= 0x00000001; /* Configure and enable SDRAM bank1 */ FMC_Bank5_6->SDCR[0] = 0x000039D0; FMC_Bank5_6->SDTR[0] = 0x01115351; /* SDRAM initialization sequence */ /* Clock enable command */ FMC_Bank5_6->SDCMR = 0x00000011; tmpreg = FMC_Bank5_6->SDSR & 0x00000020; while((tmpreg != 0) & (timeout-- > 0)) { tmpreg = FMC_Bank5_6->SDSR & 0x00000020; } /* Delay */ for (index = 0; index<1000; index++); /* PALL command */ FMC_Bank5_6->SDCMR = 0x00000012; timeout = 0xFFFF; while((tmpreg != 0) & (timeout-- > 0)) { tmpreg = FMC_Bank5_6->SDSR & 0x00000020; } /* Auto refresh command */ FMC_Bank5_6->SDCMR = 0x00000073; timeout = 0xFFFF; while((tmpreg != 0) & (timeout-- > 0)) { tmpreg = FMC_Bank5_6->SDSR & 0x00000020; } /* MRD register program */ FMC_Bank5_6->SDCMR = 0x00046014; timeout = 0xFFFF; while((tmpreg != 0) & (timeout-- > 0)) { tmpreg = FMC_Bank5_6->SDSR & 0x00000020; } /* Set refresh count */ tmpreg = FMC_Bank5_6->SDRTR; FMC_Bank5_6->SDRTR = (tmpreg | (0x0000027C<<1)); /* Disable write protection */ tmpreg = FMC_Bank5_6->SDCR[0]; FMC_Bank5_6->SDCR[0] = (tmpreg & 0xFFFFFDFF); /* Bank1_SDRAM is configured as follow: FMC_SDRAMTimingInitStructure.FMC_LoadToActiveDelay = 2; FMC_SDRAMTimingInitStructure.FMC_ExitSelfRefreshDelay = 6; FMC_SDRAMTimingInitStructure.FMC_SelfRefreshTime = 4; FMC_SDRAMTimingInitStructure.FMC_RowCycleDelay = 6; FMC_SDRAMTimingInitStructure.FMC_WriteRecoveryTime = 2; FMC_SDRAMTimingInitStructure.FMC_RPDelay = 2; FMC_SDRAMTimingInitStructure.FMC_RCDDelay = 2; FMC_SDRAMInitStructure.FMC_Bank = SDRAM_BANK; FMC_SDRAMInitStructure.FMC_ColumnBitsNumber = FMC_ColumnBits_Number_8b; FMC_SDRAMInitStructure.FMC_RowBitsNumber = FMC_RowBits_Number_11b; FMC_SDRAMInitStructure.FMC_SDMemoryDataWidth = FMC_SDMemory_Width_16b; FMC_SDRAMInitStructure.FMC_InternalBankNumber = FMC_InternalBank_Number_4; FMC_SDRAMInitStructure.FMC_CASLatency = FMC_CAS_Latency_3; FMC_SDRAMInitStructure.FMC_WriteProtection = FMC_Write_Protection_Disable; FMC_SDRAMInitStructure.FMC_SDClockPeriod = FMC_SDClock_Period_2; FMC_SDRAMInitStructure.FMC_ReadBurst = FMC_Read_Burst_disable; FMC_SDRAMInitStructure.FMC_ReadPipeDelay = FMC_ReadPipe_Delay_1; FMC_SDRAMInitStructure.FMC_SDRAMTimingStruct = &FMC_SDRAMTimingInitStructure; */ } #endif /* DATA_IN_ExtSDRAM */ /** * @} */ /** * @} */ /** * @} */ /************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
{ "pile_set_name": "Github" }
<?xml version="1.0" encoding="UTF-8"?> <rsp stat="ok"> <photo id="44070187" secret="5e5a50b675" server="32" farm="1" dateuploaded="1126980258" isfavorite="0" license="4" safety_level="0" rotation="0" originalsecret="5e5a50b675" originalformat="jpg" views="5031" media="photo"> <owner nsid="94571281@N00" username="jonrawlinson" realname="Jon Rawlinson" location="" iconserver="1" iconfarm="1"/> <title>$650,000 ride! Enzo, Ferrari</title> <description>ahhh, only in Italy! The incredible Enzo Ferrari!</description> <visibility ispublic="1" isfriend="0" isfamily="0"/> <dates posted="1126980258" taken="2005-09-17 22:49:09" takengranularity="0" lastupdate="1303625900"/> <editability cancomment="0" canaddmeta="0"/> <publiceditability cancomment="1" canaddmeta="0"/> <usage candownload="1" canblog="0" canprint="0" canshare="1"/> <comments>12</comments> <notes/> <people haspeople="0"/> <tags> <tag id="104308-44070187-2411732" author="94571281@N00" raw="jonrawlinson" machine_tag="0">jonrawlinson </tag> <tag id="104308-44070187-121" author="94571281@N00" raw="travel" machine_tag="0">travel</tag> <tag id="104308-44070187-4835592" author="94571281@N00" raw="radblog" machine_tag="0">radblog</tag> <tag id="104308-44070187-292353" author="94571281@N00" raw="rawlinson" machine_tag="0">rawlinson</tag> <tag id="104308-44070187-4771215" author="94571281@N00" raw="theradblog" machine_tag="0">theradblog</tag> <tag id="104308-44070187-1178648" author="94571281@N00" raw="enzoferrari" machine_tag="0">enzoferrari</tag> <tag id="104308-44070187-16596" author="94571281@N00" raw="enzo" machine_tag="0">enzo</tag> <tag id="104308-44070187-9505" author="94571281@N00" raw="ferrari" machine_tag="0">ferrari</tag> <tag id="104308-44070187-227" author="94571281@N00" raw="red" machine_tag="0">red</tag> <tag id="104308-44070187-501" author="94571281@N00" raw="hot" machine_tag="0">hot</tag> <tag id="104308-44070187-525" author="94571281@N00" raw="rome" machine_tag="0">rome</tag> <tag id="104308-44070187-297" author="94571281@N00" raw="italy" machine_tag="0">italy</tag> <tag id="104308-44070187-733200" author="94571281@N00" raw="650000" machine_tag="0">650000</tag> <tag id="104308-44070187-4009" author="94571281@N00" raw="ride" machine_tag="0">ride</tag> <tag id="104308-44070187-16971" author="94571281@N00" raw="whip" machine_tag="0">whip</tag> <tag id="104308-44070187-49721" author="94571281@N00" raw="smokin" machine_tag="0">smokin</tag> <tag id="104308-44070187-8117" author="94571281@N00" raw="fast" machine_tag="0">fast</tag> <tag id="104308-44070187-53450" author="94571281@N00" raw="supercar" machine_tag="0">supercar</tag> <tag id="104308-44070187-37194751" author="94571281@N00" raw="jonrawlinson.com" machine_tag="0"> jonrawlinsoncom </tag> <tag id="104308-44070187-41629469" author="94571281@N00" raw="theradblog.com" machine_tag="0"> theradblogcom </tag> </tags> <urls> <url type="photopage">http://www.flickr.com/photos/london/44070187/</url> </urls> </photo> </rsp>
{ "pile_set_name": "Github" }
{{- if .Values.compass.enabled }} apiVersion: v1 kind: Service metadata: labels: app: {{ .Values.compass.name }} service: {{ .Values.compass.name }} name: {{ .Values.compass.name }} namespace: {{ .Release.Namespace }} spec: ports: {{ range $i, $var := .Values.compass.service.ports -}} - name: {{ $var.name }} port: {{ $var.port }} targetPort: {{ $var.port }} {{ end }} selector: app: {{ .Values.compass.name }} type: {{ .Values.compass.service.type}} {{- end }}
{ "pile_set_name": "Github" }
// Copyright 2013 Google Inc. All Rights Reserved. // Copyright 2017 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. // This code originated in the github.com/golang/glog package. package log import ( "testing" "time" "github.com/cockroachdb/cockroach/pkg/util/timeutil" ) // TestLogFilenameParsing ensures that logName and parseLogFilename work as // advertised. func TestLogFilenameParsing(t *testing.T) { testCases := []time.Time{ timeutil.Now(), timeutil.Now().AddDate(-10, 0, 0), timeutil.Now().AddDate(0, 0, -1), } for i, testCase := range testCases { filename, _ := logName(program, testCase) details, err := ParseLogFilename(filename) if err != nil { t.Fatal(err) } if a, e := timeutil.Unix(0, details.Time).Format(time.RFC3339), testCase.Format(time.RFC3339); a != e { t.Errorf("%d: Times do not match, expected:%s - actual:%s", i, e, a) } } } // TestSelectFiles checks that selectFiles correctly filters and orders // filesInfos. func TestSelectFiles(t *testing.T) { testFiles := []FileInfo{} year2000 := time.Date(2000, time.January, 1, 1, 0, 0, 0, time.UTC) year2050 := time.Date(2050, time.January, 1, 1, 0, 0, 0, time.UTC) year2200 := time.Date(2200, time.January, 1, 1, 0, 0, 0, time.UTC) for i := 0; i < 100; i++ { fileTime := year2000.AddDate(i, 0, 0) name, _ := logName(program, fileTime) testfile := FileInfo{ Name: name, Details: FileDetails{ Time: fileTime.UnixNano(), }, } testFiles = append(testFiles, testfile) } testCases := []struct { EndTimestamp int64 ExpectedCount int }{ {year2200.UnixNano(), 100}, {year2050.UnixNano(), 51}, {year2000.UnixNano(), 1}, } for i, testCase := range testCases { actualFiles := selectFiles(testFiles, testCase.EndTimestamp) previousTimestamp := year2200.UnixNano() if len(actualFiles) != testCase.ExpectedCount { t.Errorf("%d: expected %d files, actual %d", i, testCase.ExpectedCount, len(actualFiles)) } for _, file := range actualFiles { if file.Details.Time > previousTimestamp { t.Errorf("%d: returned files are not in the correct order", i) } if file.Details.Time > testCase.EndTimestamp { t.Errorf("%d: did not filter by endTime", i) } previousTimestamp = file.Details.Time } } }
{ "pile_set_name": "Github" }
"use strict"; exports.__esModule = true; var _hasInstance = require("../core-js/symbol/has-instance"); var _hasInstance2 = _interopRequireDefault(_hasInstance); var _symbol = require("../core-js/symbol"); var _symbol2 = _interopRequireDefault(_symbol); function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } exports.default = function (left, right) { if (right != null && typeof _symbol2.default !== "undefined" && right[_hasInstance2.default]) { return right[_hasInstance2.default](left); } else { return left instanceof right; } };
{ "pile_set_name": "Github" }
/* * Copyright (c) 2014, The Linux Foundation. All rights reserved. * Copyright (C) 2013 Red Hat * Author: Rob Clark <[email protected]> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program. If not, see <http://www.gnu.org/licenses/>. */ #include "mdp5_kms.h" #include "mdp5_smp.h" struct mdp5_smp { struct drm_device *dev; uint8_t reserved[MAX_CLIENTS]; /* fixed MMBs allocation per client */ int blk_cnt; int blk_size; /* register cache */ u32 alloc_w[22]; u32 alloc_r[22]; u32 pipe_reqprio_fifo_wm0[SSPP_MAX]; u32 pipe_reqprio_fifo_wm1[SSPP_MAX]; u32 pipe_reqprio_fifo_wm2[SSPP_MAX]; }; static inline struct mdp5_kms *get_kms(struct mdp5_smp *smp) { struct msm_drm_private *priv = smp->dev->dev_private; return to_mdp5_kms(to_mdp_kms(priv->kms)); } static inline u32 pipe2client(enum mdp5_pipe pipe, int plane) { #define CID_UNUSED 0 if (WARN_ON(plane >= pipe2nclients(pipe))) return CID_UNUSED; /* * Note on SMP clients: * For ViG pipes, fetch Y/Cr/Cb-components clients are always * consecutive, and in that order. * * e.g.: * if mdp5_cfg->smp.clients[SSPP_VIG0] = N, * Y plane's client ID is N * Cr plane's client ID is N + 1 * Cb plane's client ID is N + 2 */ return mdp5_cfg->smp.clients[pipe] + plane; } /* allocate blocks for the specified request: */ static int smp_request_block(struct mdp5_smp *smp, struct mdp5_smp_state *state, u32 cid, int nblks) { void *cs = state->client_state[cid]; int i, avail, cnt = smp->blk_cnt; uint8_t reserved; /* we shouldn't be requesting blocks for an in-use client: */ WARN_ON(bitmap_weight(cs, cnt) > 0); reserved = smp->reserved[cid]; if (reserved) { nblks = max(0, nblks - reserved); DBG("%d MMBs allocated (%d reserved)", nblks, reserved); } avail = cnt - bitmap_weight(state->state, cnt); if (nblks > avail) { dev_err(smp->dev->dev, "out of blks (req=%d > avail=%d)\n", nblks, avail); return -ENOSPC; } for (i = 0; i < nblks; i++) { int blk = find_first_zero_bit(state->state, cnt); set_bit(blk, cs); set_bit(blk, state->state); } return 0; } static void set_fifo_thresholds(struct mdp5_smp *smp, enum mdp5_pipe pipe, int nblks) { u32 smp_entries_per_blk = smp->blk_size / (128 / BITS_PER_BYTE); u32 val; /* 1/4 of SMP pool that is being fetched */ val = (nblks * smp_entries_per_blk) / 4; smp->pipe_reqprio_fifo_wm0[pipe] = val * 1; smp->pipe_reqprio_fifo_wm1[pipe] = val * 2; smp->pipe_reqprio_fifo_wm2[pipe] = val * 3; } /* * NOTE: looks like if horizontal decimation is used (if we supported that) * then the width used to calculate SMP block requirements is the post- * decimated width. Ie. SMP buffering sits downstream of decimation (which * presumably happens during the dma from scanout buffer). */ uint32_t mdp5_smp_calculate(struct mdp5_smp *smp, const struct mdp_format *format, u32 width, bool hdecim) { struct mdp5_kms *mdp5_kms = get_kms(smp); int rev = mdp5_cfg_get_hw_rev(mdp5_kms->cfg); int i, hsub, nplanes, nlines; u32 fmt = format->base.pixel_format; uint32_t blkcfg = 0; nplanes = drm_format_num_planes(fmt); hsub = drm_format_horz_chroma_subsampling(fmt); /* different if BWC (compressed framebuffer?) enabled: */ nlines = 2; /* Newer MDPs have split/packing logic, which fetches sub-sampled * U and V components (splits them from Y if necessary) and packs * them together, writes to SMP using a single client. */ if ((rev > 0) && (format->chroma_sample > CHROMA_FULL)) { fmt = DRM_FORMAT_NV24; nplanes = 2; /* if decimation is enabled, HW decimates less on the * sub sampled chroma components */ if (hdecim && (hsub > 1)) hsub = 1; } for (i = 0; i < nplanes; i++) { int n, fetch_stride, cpp; cpp = drm_format_plane_cpp(fmt, i); fetch_stride = width * cpp / (i ? hsub : 1); n = DIV_ROUND_UP(fetch_stride * nlines, smp->blk_size); /* for hw rev v1.00 */ if (rev == 0) n = roundup_pow_of_two(n); blkcfg |= (n << (8 * i)); } return blkcfg; } int mdp5_smp_assign(struct mdp5_smp *smp, struct mdp5_smp_state *state, enum mdp5_pipe pipe, uint32_t blkcfg) { struct mdp5_kms *mdp5_kms = get_kms(smp); struct drm_device *dev = mdp5_kms->dev; int i, ret; for (i = 0; i < pipe2nclients(pipe); i++) { u32 cid = pipe2client(pipe, i); int n = blkcfg & 0xff; if (!n) continue; DBG("%s[%d]: request %d SMP blocks", pipe2name(pipe), i, n); ret = smp_request_block(smp, state, cid, n); if (ret) { dev_err(dev->dev, "Cannot allocate %d SMP blocks: %d\n", n, ret); return ret; } blkcfg >>= 8; } state->assigned |= (1 << pipe); return 0; } /* Release SMP blocks for all clients of the pipe */ void mdp5_smp_release(struct mdp5_smp *smp, struct mdp5_smp_state *state, enum mdp5_pipe pipe) { int i; int cnt = smp->blk_cnt; for (i = 0; i < pipe2nclients(pipe); i++) { u32 cid = pipe2client(pipe, i); void *cs = state->client_state[cid]; /* update global state: */ bitmap_andnot(state->state, state->state, cs, cnt); /* clear client's state */ bitmap_zero(cs, cnt); } state->released |= (1 << pipe); } /* NOTE: SMP_ALLOC_* regs are *not* double buffered, so release has to * happen after scanout completes. */ static unsigned update_smp_state(struct mdp5_smp *smp, u32 cid, mdp5_smp_state_t *assigned) { int cnt = smp->blk_cnt; unsigned nblks = 0; u32 blk, val; for_each_set_bit(blk, *assigned, cnt) { int idx = blk / 3; int fld = blk % 3; val = smp->alloc_w[idx]; switch (fld) { case 0: val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT0__MASK; val |= MDP5_SMP_ALLOC_W_REG_CLIENT0(cid); break; case 1: val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT1__MASK; val |= MDP5_SMP_ALLOC_W_REG_CLIENT1(cid); break; case 2: val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT2__MASK; val |= MDP5_SMP_ALLOC_W_REG_CLIENT2(cid); break; } smp->alloc_w[idx] = val; smp->alloc_r[idx] = val; nblks++; } return nblks; } static void write_smp_alloc_regs(struct mdp5_smp *smp) { struct mdp5_kms *mdp5_kms = get_kms(smp); int i, num_regs; num_regs = smp->blk_cnt / 3 + 1; for (i = 0; i < num_regs; i++) { mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_W_REG(i), smp->alloc_w[i]); mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_R_REG(i), smp->alloc_r[i]); } } static void write_smp_fifo_regs(struct mdp5_smp *smp) { struct mdp5_kms *mdp5_kms = get_kms(smp); int i; for (i = 0; i < mdp5_kms->num_hwpipes; i++) { struct mdp5_hw_pipe *hwpipe = mdp5_kms->hwpipes[i]; enum mdp5_pipe pipe = hwpipe->pipe; mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_0(pipe), smp->pipe_reqprio_fifo_wm0[pipe]); mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_1(pipe), smp->pipe_reqprio_fifo_wm1[pipe]); mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_2(pipe), smp->pipe_reqprio_fifo_wm2[pipe]); } } void mdp5_smp_prepare_commit(struct mdp5_smp *smp, struct mdp5_smp_state *state) { enum mdp5_pipe pipe; for_each_set_bit(pipe, &state->assigned, sizeof(state->assigned) * 8) { unsigned i, nblks = 0; for (i = 0; i < pipe2nclients(pipe); i++) { u32 cid = pipe2client(pipe, i); void *cs = state->client_state[cid]; nblks += update_smp_state(smp, cid, cs); DBG("assign %s:%u, %u blks", pipe2name(pipe), i, nblks); } set_fifo_thresholds(smp, pipe, nblks); } write_smp_alloc_regs(smp); write_smp_fifo_regs(smp); state->assigned = 0; } void mdp5_smp_complete_commit(struct mdp5_smp *smp, struct mdp5_smp_state *state) { enum mdp5_pipe pipe; for_each_set_bit(pipe, &state->released, sizeof(state->released) * 8) { DBG("release %s", pipe2name(pipe)); set_fifo_thresholds(smp, pipe, 0); } write_smp_fifo_regs(smp); state->released = 0; } void mdp5_smp_dump(struct mdp5_smp *smp, struct drm_printer *p) { struct mdp5_kms *mdp5_kms = get_kms(smp); struct mdp5_hw_pipe_state *hwpstate; struct mdp5_smp_state *state; int total = 0, i, j; drm_printf(p, "name\tinuse\tplane\n"); drm_printf(p, "----\t-----\t-----\n"); if (drm_can_sleep()) drm_modeset_lock(&mdp5_kms->state_lock, NULL); /* grab these *after* we hold the state_lock */ hwpstate = &mdp5_kms->state->hwpipe; state = &mdp5_kms->state->smp; for (i = 0; i < mdp5_kms->num_hwpipes; i++) { struct mdp5_hw_pipe *hwpipe = mdp5_kms->hwpipes[i]; struct drm_plane *plane = hwpstate->hwpipe_to_plane[hwpipe->idx]; enum mdp5_pipe pipe = hwpipe->pipe; for (j = 0; j < pipe2nclients(pipe); j++) { u32 cid = pipe2client(pipe, j); void *cs = state->client_state[cid]; int inuse = bitmap_weight(cs, smp->blk_cnt); drm_printf(p, "%s:%d\t%d\t%s\n", pipe2name(pipe), j, inuse, plane ? plane->name : NULL); total += inuse; } } drm_printf(p, "TOTAL:\t%d\t(of %d)\n", total, smp->blk_cnt); drm_printf(p, "AVAIL:\t%d\n", smp->blk_cnt - bitmap_weight(state->state, smp->blk_cnt)); if (drm_can_sleep()) drm_modeset_unlock(&mdp5_kms->state_lock); } void mdp5_smp_destroy(struct mdp5_smp *smp) { kfree(smp); } struct mdp5_smp *mdp5_smp_init(struct mdp5_kms *mdp5_kms, const struct mdp5_smp_block *cfg) { struct mdp5_smp_state *state = &mdp5_kms->state->smp; struct mdp5_smp *smp = NULL; int ret; smp = kzalloc(sizeof(*smp), GFP_KERNEL); if (unlikely(!smp)) { ret = -ENOMEM; goto fail; } smp->dev = mdp5_kms->dev; smp->blk_cnt = cfg->mmb_count; smp->blk_size = cfg->mmb_size; /* statically tied MMBs cannot be re-allocated: */ bitmap_copy(state->state, cfg->reserved_state, smp->blk_cnt); memcpy(smp->reserved, cfg->reserved, sizeof(smp->reserved)); return smp; fail: if (smp) mdp5_smp_destroy(smp); return ERR_PTR(ret); }
{ "pile_set_name": "Github" }
/****************************************************************************** * Copyright 2017 The Apollo Authors. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. *****************************************************************************/ #pragma once #include "modules/drivers/canbus/can_comm/protocol_data.h" #include "modules/drivers/proto/conti_radar.pb.h" namespace apollo { namespace drivers { namespace conti_radar { using apollo::drivers::ContiRadar; class ClusterQualityInfo702 : public apollo::drivers::canbus::ProtocolData<ContiRadar> { public: static const uint32_t ID; ClusterQualityInfo702(); void Parse(const std::uint8_t* bytes, int32_t length, ContiRadar* conti_radar) const override; private: int target_id(const std::uint8_t* bytes, int32_t length) const; int longitude_dist_rms(const std::uint8_t* bytes, int32_t length) const; int lateral_dist_rms(const std::uint8_t* bytes, int32_t length) const; int longitude_vel_rms(const std::uint8_t* bytes, int32_t length) const; int pdh0(const std::uint8_t* bytes, int32_t length) const; int ambig_state(const std::uint8_t* bytes, int32_t length) const; int invalid_state(const std::uint8_t* bytes, int32_t length) const; int lateral_vel_rms(const std::uint8_t* bytes, int32_t length) const; }; } // namespace conti_radar } // namespace drivers } // namespace apollo
{ "pile_set_name": "Github" }
/* ** Copyright (C) 2006-2012 Erik de Castro Lopo <[email protected]> ** ** This program is free software; you can redistribute it and/or modify ** it under the terms of the GNU General Public License as published by ** the Free Software Foundation; either version 2 of the License, or ** (at your option) any later version. ** ** This program is distributed in the hope that it will be useful, ** but WITHOUT ANY WARRANTY; without even the implied warranty of ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ** GNU General Public License for more details. ** ** You should have received a copy of the GNU General Public License ** along with this program; if not, write to the Free Software ** Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include "sfconfig.h" #include <cstdio> #include <cstdlib> #include <cstring> #include <sndfile.hh> #include "utils.h" static short sbuffer [100] ; static int ibuffer [100] ; static float fbuffer [100] ; static double dbuffer [100] ; static void ceeplusplus_wchar_test (void) { #if 0 LPCWSTR filename = L"wchar_test.wav" ; print_test_name (__func__, "ceeplusplus_wchar_test.wav") ; /* Use this scope to make sure the created file is closed. */ { SndfileHandle file (filename, SFM_WRITE, SF_FORMAT_WAV | SF_FORMAT_PCM_16, 2, 44100) ; if (file.refCount () != 1) { printf ("\n\n%s %d : Error : Reference count (%d) should be 1.\n\n", __func__, __LINE__, file.refCount ()) ; exit (1) ; } ; /* This should check that the file did in fact get created with a ** wchar_t * filename. */ exit_if_true ( GetFileAttributesW (filename) == INVALID_FILE_ATTRIBUTES, "\n\nLine %d : GetFileAttributes failed.\n\n", __LINE__ ) ; } /* Use this because the file was created with CreateFileW. */ DeleteFileW (filename) ; puts ("ok") ; #endif } /* ceeplusplus_wchar_test */ static void create_file (const char * filename, int format) { SndfileHandle file ; if (file.refCount () != 0) { printf ("\n\n%s %d : Error : Reference count (%d) should be zero.\n\n", __func__, __LINE__, file.refCount ()) ; exit (1) ; } ; file = SndfileHandle (filename, SFM_WRITE, format, 2, 48000) ; if (file.refCount () != 1) { printf ("\n\n%s %d : Error : Reference count (%d) should be 1.\n\n", __func__, __LINE__, file.refCount ()) ; exit (1) ; } ; file.setString (SF_STR_TITLE, filename) ; /* Item write. */ file.write (sbuffer, ARRAY_LEN (sbuffer)) ; file.write (ibuffer, ARRAY_LEN (ibuffer)) ; file.write (fbuffer, ARRAY_LEN (fbuffer)) ; file.write (dbuffer, ARRAY_LEN (dbuffer)) ; /* Frame write. */ file.writef (sbuffer, ARRAY_LEN (sbuffer) / file.channels ()) ; file.writef (ibuffer, ARRAY_LEN (ibuffer) / file.channels ()) ; file.writef (fbuffer, ARRAY_LEN (fbuffer) / file.channels ()) ; file.writef (dbuffer, ARRAY_LEN (dbuffer) / file.channels ()) ; /* RAII takes care of the SndfileHandle. */ } /* create_file */ static void check_title (const SndfileHandle & file, const char * filename) { const char *title = NULL ; title = file.getString (SF_STR_TITLE) ; if (title == NULL) { printf ("\n\n%s %d : Error : No title.\n\n", __func__, __LINE__) ; exit (1) ; } ; if (strcmp (filename, title) != 0) { printf ("\n\n%s %d : Error : title '%s' should be '%s'\n\n", __func__, __LINE__, title, filename) ; exit (1) ; } ; return ; } /* check_title */ static void read_file (const char * filename, int format) { SndfileHandle file ; sf_count_t count ; if (file) { printf ("\n\n%s %d : Error : should not be here.\n\n", __func__, __LINE__) ; exit (1) ; } ; file = SndfileHandle (filename) ; if (1) { SndfileHandle file2 = file ; if (file.refCount () != 2 || file2.refCount () != 2) { printf ("\n\n%s %d : Error : Reference count (%d) should be two.\n\n", __func__, __LINE__, file.refCount ()) ; exit (1) ; } ; } ; if (file.refCount () != 1) { printf ("\n\n%s %d : Error : Reference count (%d) should be one.\n\n", __func__, __LINE__, file.refCount ()) ; exit (1) ; } ; if (! file) { printf ("\n\n%s %d : Error : should not be here.\n\n", __func__, __LINE__) ; exit (1) ; } ; if (file.format () != format) { printf ("\n\n%s %d : Error : format 0x%08x should be 0x%08x.\n\n", __func__, __LINE__, file.format (), format) ; exit (1) ; } ; if (file.channels () != 2) { printf ("\n\n%s %d : Error : channels %d should be 2.\n\n", __func__, __LINE__, file.channels ()) ; exit (1) ; } ; if (file.frames () != ARRAY_LEN (sbuffer) * 4) { printf ("\n\n%s %d : Error : frames %ld should be %lu.\n\n", __func__, __LINE__, (long) file.frames (), (long) ARRAY_LEN (sbuffer) * 4 / 2) ; exit (1) ; } ; switch (format & SF_FORMAT_TYPEMASK) { case SF_FORMAT_AU : break ; default : check_title (file, filename) ; break ; } ; /* Item read. */ file.read (sbuffer, ARRAY_LEN (sbuffer)) ; file.read (ibuffer, ARRAY_LEN (ibuffer)) ; file.read (fbuffer, ARRAY_LEN (fbuffer)) ; file.read (dbuffer, ARRAY_LEN (dbuffer)) ; /* Frame read. */ file.readf (sbuffer, ARRAY_LEN (sbuffer) / file.channels ()) ; file.readf (ibuffer, ARRAY_LEN (ibuffer) / file.channels ()) ; file.readf (fbuffer, ARRAY_LEN (fbuffer) / file.channels ()) ; file.readf (dbuffer, ARRAY_LEN (dbuffer) / file.channels ()) ; count = file.seek (file.frames () - 10, SEEK_SET) ; if (count != file.frames () - 10) { printf ("\n\n%s %d : Error : offset (%ld) should be %ld\n\n", __func__, __LINE__, (long) count, (long) (file.frames () - 10)) ; exit (1) ; } ; count = file.read (sbuffer, ARRAY_LEN (sbuffer)) ; if (count != 10 * file.channels ()) { printf ("\n\n%s %d : Error : count (%ld) should be %ld\n\n", __func__, __LINE__, (long) count, (long) (10 * file.channels ())) ; exit (1) ; } ; /* RAII takes care of the SndfileHandle. */ } /* read_file */ static void ceeplusplus_test (const char *filename, int format) { print_test_name ("ceeplusplus_test", filename) ; create_file (filename, format) ; read_file (filename, format) ; remove (filename) ; puts ("ok") ; } /* ceeplusplus_test */ static void ceeplusplus_extra_test (void) { SndfileHandle file ; const char * filename = "bad_file_name.wav" ; int error ; print_test_name ("ceeplusplus_extra_test", filename) ; file = SndfileHandle (filename) ; error = file.error () ; if (error == 0) { printf ("\n\n%s %d : error should not be zero.\n\n", __func__, __LINE__) ; exit (1) ; } ; if (file.strError () == NULL) { printf ("\n\n%s %d : strError should not return NULL.\n\n", __func__, __LINE__) ; exit (1) ; } ; if (file.seek (0, SEEK_SET) != 0) { printf ("\n\n%s %d : bad seek ().\n\n", __func__, __LINE__) ; exit (1) ; } ; puts ("ok") ; } /* ceeplusplus_extra_test */ static void ceeplusplus_rawhandle_test (const char *filename) { SNDFILE* handle ; { SndfileHandle file (filename) ; handle = file.rawHandle () ; sf_read_float (handle, fbuffer, ARRAY_LEN (fbuffer)) ; } } /* ceeplusplus_rawhandle_test */ static void ceeplusplus_takeOwnership_test (const char *filename) { SNDFILE* handle ; { SndfileHandle file (filename) ; handle = file.takeOwnership () ; } if (sf_read_float (handle, fbuffer, ARRAY_LEN (fbuffer)) <= 0) { printf ("\n\n%s %d : error when taking ownership of handle.\n\n", __func__, __LINE__) ; exit (1) ; } if (sf_close (handle) != 0) { printf ("\n\n%s %d : cannot close file.\n\n", __func__, __LINE__) ; exit (1) ; } SndfileHandle file (filename) ; SndfileHandle file2 (file) ; if (file2.takeOwnership ()) { printf ("\n\n%s %d : taking ownership of shared handle is not allowed.\n\n", __func__, __LINE__) ; exit (1) ; } } /* ceeplusplus_takeOwnership_test */ static void ceeplusplus_handle_test (const char *filename, int format) { print_test_name ("ceeplusplus_handle_test", filename) ; create_file (filename, format) ; if (0) ceeplusplus_rawhandle_test (filename) ; ceeplusplus_takeOwnership_test (filename) ; remove (filename) ; puts ("ok") ; } /* ceeplusplus_test */ int main (void) { ceeplusplus_test ("cpp_test.wav", SF_FORMAT_WAV | SF_FORMAT_PCM_16) ; ceeplusplus_test ("cpp_test.aiff", SF_FORMAT_AIFF | SF_FORMAT_PCM_S8) ; ceeplusplus_test ("cpp_test.au", SF_FORMAT_AU | SF_FORMAT_FLOAT) ; ceeplusplus_extra_test () ; ceeplusplus_handle_test ("cpp_test.wav", SF_FORMAT_WAV | SF_FORMAT_PCM_16) ; ceeplusplus_wchar_test () ; return 0 ; } /* main */
{ "pile_set_name": "Github" }
# Monolog - Logging for PHP [![Build Status](https://img.shields.io/travis/Seldaek/monolog.svg)](https://travis-ci.org/Seldaek/monolog) [![Total Downloads](https://img.shields.io/packagist/dt/monolog/monolog.svg)](https://packagist.org/packages/monolog/monolog) [![Latest Stable Version](https://img.shields.io/packagist/v/monolog/monolog.svg)](https://packagist.org/packages/monolog/monolog) [![Reference Status](https://www.versioneye.com/php/monolog:monolog/reference_badge.svg)](https://www.versioneye.com/php/monolog:monolog/references) Monolog sends your logs to files, sockets, inboxes, databases and various web services. See the complete list of handlers below. Special handlers allow you to build advanced logging strategies. This library implements the [PSR-3](https://github.com/php-fig/fig-standards/blob/master/accepted/PSR-3-logger-interface.md) interface that you can type-hint against in your own libraries to keep a maximum of interoperability. You can also use it in your applications to make sure you can always use another compatible logger at a later time. As of 1.11.0 Monolog public APIs will also accept PSR-3 log levels. Internally Monolog still uses its own level scheme since it predates PSR-3. ## Installation Install the latest version with ```bash $ composer require monolog/monolog ``` ## Basic Usage ```php <?php use Monolog\Logger; use Monolog\Handler\StreamHandler; // create a log channel $log = new Logger('name'); $log->pushHandler(new StreamHandler('path/to/your.log', Logger::WARNING)); // add records to the log $log->addWarning('Foo'); $log->addError('Bar'); ``` ## Documentation - [Usage Instructions](doc/01-usage.md) - [Handlers, Formatters and Processors](doc/02-handlers-formatters-processors.md) - [Utility classes](doc/03-utilities.md) - [Extending Monolog](doc/04-extending.md) ## Third Party Packages Third party handlers, formatters and processors are [listed in the wiki](https://github.com/Seldaek/monolog/wiki/Third-Party-Packages). You can also add your own there if you publish one. ## About ### Requirements - Monolog works with PHP 5.3 or above, and is also tested to work with HHVM. ### Submitting bugs and feature requests Bugs and feature request are tracked on [GitHub](https://github.com/Seldaek/monolog/issues) ### Framework Integrations - Frameworks and libraries using [PSR-3](https://github.com/php-fig/fig-standards/blob/master/accepted/PSR-3-logger-interface.md) can be used very easily with Monolog since it implements the interface. - [Symfony2](http://symfony.com) comes out of the box with Monolog. - [Silex](http://silex.sensiolabs.org/) comes out of the box with Monolog. - [Laravel 4 & 5](http://laravel.com/) come out of the box with Monolog. - [Lumen](http://lumen.laravel.com/) comes out of the box with Monolog. - [PPI](http://www.ppi.io/) comes out of the box with Monolog. - [CakePHP](http://cakephp.org/) is usable with Monolog via the [cakephp-monolog](https://github.com/jadb/cakephp-monolog) plugin. - [Slim](http://www.slimframework.com/) is usable with Monolog via the [Slim-Monolog](https://github.com/Flynsarmy/Slim-Monolog) log writer. - [XOOPS 2.6](http://xoops.org/) comes out of the box with Monolog. - [Aura.Web_Project](https://github.com/auraphp/Aura.Web_Project) comes out of the box with Monolog. - [Nette Framework](http://nette.org/en/) can be used with Monolog via [Kdyby/Monolog](https://github.com/Kdyby/Monolog) extension. - [Proton Micro Framework](https://github.com/alexbilbie/Proton) comes out of the box with Monolog. ### Author Jordi Boggiano - <[email protected]> - <http://twitter.com/seldaek><br /> See also the list of [contributors](https://github.com/Seldaek/monolog/contributors) which participated in this project. ### License Monolog is licensed under the MIT License - see the `LICENSE` file for details ### Acknowledgements This library is heavily inspired by Python's [Logbook](http://packages.python.org/Logbook/) library, although most concepts have been adjusted to fit to the PHP world.
{ "pile_set_name": "Github" }
// Targeted by JavaCPP version 1.5.4: DO NOT EDIT THIS FILE package org.bytedeco.arrow; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; import static org.bytedeco.javacpp.presets.javacpp.*; import static org.bytedeco.arrow.global.arrow.*; /** \brief Status outcome object (success or error) * * The Status object is an object holding the outcome of an operation. * The outcome is represented as a StatusCode, either success * (StatusCode::OK) or an error (any other of the StatusCode enumeration values). * * Additionally, if an error occurred, a specific error message is generally * attached. */ @Namespace("arrow") @NoOffset @Properties(inherit = org.bytedeco.arrow.presets.arrow.class) public class Status extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Status(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Status(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public Status position(long position) { return (Status)super.position(position); } @Override public Status getPointer(long i) { return new Status(this).position(position + i); } // Create a success status. public Status() { super((Pointer)null); allocate(); } @NoException private native void allocate(); public Status(StatusCode code, @StdString String msg) { super((Pointer)null); allocate(code, msg); } private native void allocate(StatusCode code, @StdString String msg); public Status(@Cast("arrow::StatusCode") byte code, @StdString BytePointer msg) { super((Pointer)null); allocate(code, msg); } private native void allocate(@Cast("arrow::StatusCode") byte code, @StdString BytePointer msg); /** \brief Pluggable constructor for use by sub-systems. detail cannot be null. */ public Status(StatusCode code, @StdString String msg, @SharedPtr StatusDetail detail) { super((Pointer)null); allocate(code, msg, detail); } private native void allocate(StatusCode code, @StdString String msg, @SharedPtr StatusDetail detail); public Status(@Cast("arrow::StatusCode") byte code, @StdString BytePointer msg, @SharedPtr StatusDetail detail) { super((Pointer)null); allocate(code, msg, detail); } private native void allocate(@Cast("arrow::StatusCode") byte code, @StdString BytePointer msg, @SharedPtr StatusDetail detail); // Copy the specified status. public Status(@Const @ByRef Status s) { super((Pointer)null); allocate(s); } private native void allocate(@Const @ByRef Status s); public native @ByRef @Name("operator =") Status put(@Const @ByRef Status s); // Move the specified status. public native @Cast("bool") boolean Equals(@Const @ByRef Status s); // AND the statuses. public native @ByVal @Name("operator &") @NoException Status and(@Const @ByRef Status s); public native @ByRef @Name("operator &=") @NoException Status andPut(@Const @ByRef Status s); /** Return a success status */ public static native @ByVal Status OK(); /** Return an error status for out-of-memory conditions */ /** Return an error status for failed key lookups (e.g. column name in a table) */ /** Return an error status for type errors (such as mismatching data types) */ /** Return an error status for unknown errors */ /** Return an error status when an operation or a combination of operation and * data types is unimplemented */ /** Return an error status for invalid data (for example a string that fails parsing) */ /** Return an error status when an index is out of bounds */ /** Return an error status when a container's capacity would exceed its limits */ /** Return an error status when some IO-related operation failed */ /** Return an error status when some (de)serialization operation failed */ /** Return true iff the status indicates success. */ public native @Cast("bool") boolean ok(); /** Return true iff the status indicates an out-of-memory error. */ public native @Cast("bool") boolean IsOutOfMemory(); /** Return true iff the status indicates a key lookup error. */ public native @Cast("bool") boolean IsKeyError(); /** Return true iff the status indicates invalid data. */ public native @Cast("bool") boolean IsInvalid(); /** Return true iff the status indicates an IO-related failure. */ public native @Cast("bool") boolean IsIOError(); /** Return true iff the status indicates a container reaching capacity limits. */ public native @Cast("bool") boolean IsCapacityError(); /** Return true iff the status indicates an out of bounds index. */ public native @Cast("bool") boolean IsIndexError(); /** Return true iff the status indicates a type error. */ public native @Cast("bool") boolean IsTypeError(); /** Return true iff the status indicates an unknown error. */ public native @Cast("bool") boolean IsUnknownError(); /** Return true iff the status indicates an unimplemented operation. */ public native @Cast("bool") boolean IsNotImplemented(); /** Return true iff the status indicates a (de)serialization failure */ public native @Cast("bool") boolean IsSerializationError(); /** Return true iff the status indicates a R-originated error. */ public native @Cast("bool") boolean IsRError(); public native @Cast("bool") boolean IsCodeGenError(); public native @Cast("bool") boolean IsExpressionValidationError(); public native @Cast("bool") boolean IsExecutionError(); /// public native @Cast("bool") boolean IsAlreadyExists(); /** \brief Return a string representation of this status suitable for printing. * * The string "OK" is returned for success. */ public native @StdString String ToString(); /** \brief Return a string representation of the status code, without the message * text or POSIX code information. */ public native @StdString String CodeAsString(); public static native @StdString String CodeAsString(StatusCode arg0); public static native @StdString BytePointer CodeAsString(@Cast("arrow::StatusCode") byte arg0); /** \brief Return the StatusCode value attached to this status. */ public native StatusCode code(); /** \brief Return the specific error message attached to this status. */ public native @StdString String message(); /** \brief Return the status detail attached to this message. */ public native @SharedPtr StatusDetail detail(); /** \brief Return a new Status copying the existing status, but * updating with the existing detail. */ public native @ByVal Status WithDetail(@SharedPtr StatusDetail new_detail); /** \brief Return a new Status with changed message, copying the * existing status code and detail. */ public native void Abort(@StdString String message); public native void Abort(@StdString BytePointer message); }
{ "pile_set_name": "Github" }
<?xml version="1.0" encoding="utf-8"?> <!-- ~ Copyright (c) 2019 Hai Zhang <[email protected]> ~ All Rights Reserved. --> <View xmlns:android="http://schemas.android.com/apk/res/android" android:id="@+id/swatch" android:layout_width="36dp" android:layout_height="36dp" android:background="@drawable/color_preference_widget_background" />
{ "pile_set_name": "Github" }
/** * @file wizwiki_w7500.c * @brief board ID for the WIZnet WIZwiki-W7500 board * * DAPLink Interface Firmware * Copyright (c) 2009-2016, ARM Limited, All Rights Reserved * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); you may * not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ const char *board_id = "2201";
{ "pile_set_name": "Github" }
########################################################### # # strace # ########################################################### # You must replace "strace" and "STRACE" with the lower case name and # upper case name of your new package. Some places below will say # "Do not change this" - that does not include this global change, # which must always be done to ensure we have unique names. # # STRACE_VERSION, STRACE_SITE and STRACE_SOURCE define # the upstream location of the source code for the package. # STRACE_DIR is the directory which is created when the source # archive is unpacked. # STRACE_UNZIP is the command used to unzip the source. # It is usually "zcat" (for .gz) or "bzcat" (for .bz2) # # You should change all these variables to suit your package. # STRACE_VERSION ?= 4.10 STRACE_IPK_VERSION ?= 1 STRACE_SITE=http://$(SOURCEFORGE_MIRROR)/sourceforge/strace STRACE_SOURCE?=strace-$(STRACE_VERSION).tar.xz STRACE_DIR=strace-$(STRACE_VERSION) STRACE_UNZIP?=xzcat STRACE_MAINTAINER=Christopher <[email protected]> STRACE_DESCRIPTION=Traces all system calls a program makes. STRACE_SECTION=utility STRACE_PRIORITY=optional STRACE_DEPENDS= STRACE_SUGGESTS= STRACE_CONFLICTS= # # STRACE_PATCHES should list any patches, in the the order in # which they should be applied to the source code. # # STRACE_PATCHES?=#$(STRACE_SOURCE_DIR)/CTL_PROC.patch # # If the compilation of the package requires additional # compilation or linking flags, then list them here. # STRACE_CPPFLAGS= STRACE_CPPFLAGS_PRE?=-I$(SOURCE_DIR)/strace/include STRACE_LDFLAGS= # # STRACE_BUILD_DIR is the directory in which the build is done. # STRACE_SOURCE_DIR is the directory which holds all the # patches and ipkg control files. # STRACE_IPK_DIR is the directory in which the ipk is built. # STRACE_IPK is the name of the resulting ipk files. # # You should not change any of these variables. # STRACE_BUILD_DIR=$(BUILD_DIR)/strace STRACE_SOURCE_DIR=$(SOURCE_DIR)/strace STRACE_IPK_DIR=$(BUILD_DIR)/strace-$(STRACE_VERSION)-ipk STRACE_IPK=$(BUILD_DIR)/strace_$(STRACE_VERSION)-$(STRACE_IPK_VERSION)_$(TARGET_ARCH).ipk .PHONY: strace-source strace-unpack strace strace-stage strace-ipk strace-clean strace-dirclean strace-check # # This is the dependency on the source code. If the source is missing, # then it will be fetched from the site using wget. # $(DL_DIR)/$(STRACE_SOURCE): $(WGET) -P $(@D) $(STRACE_SITE)/$(@F) || \ $(WGET) -P $(@D) $(SOURCES_NLO_SITE)/$(@F) # # The source code depends on it existing within the download directory. # This target will be called by the top level Makefile to download the # source code's archive (.tar.gz, .bz2, etc.) # strace-source: $(DL_DIR)/$(STRACE_SOURCE) $(STRACE_PATCHES) # # This target unpacks the source code in the build directory. # If the source archive is not .tar.gz or .tar.bz2, then you will need # to change the commands here. Patches to the source code are also # applied in this target as required. # # This target also configures the build within the build directory. # Flags such as LDFLAGS and CPPFLAGS should be passed into configure # and NOT $(MAKE) below. Passing it to configure causes configure to # correctly BUILD the Makefile with the right paths, where passing it # to Make causes it to override the default search paths of the compiler. # # If the compilation of the package requires other packages to be staged # first, then do that first (e.g. "$(MAKE) <bar>-stage <baz>-stage"). # $(STRACE_BUILD_DIR)/.configured: $(DL_DIR)/$(STRACE_SOURCE) $(STRACE_PATCHES) make/strace.mk rm -rf $(BUILD_DIR)/$(STRACE_DIR) $(@D) $(STRACE_UNZIP) $(DL_DIR)/$(STRACE_SOURCE) | tar -C $(BUILD_DIR) -xvf - if test -n "$(STRACE_PATCHES)" ; \ then cat $(STRACE_PATCHES) | \ $(PATCH) -d $(BUILD_DIR)/$(STRACE_DIR) -p1 ; \ fi mv $(BUILD_DIR)/$(STRACE_DIR) $(@D) (cd $(@D); \ $(TARGET_CONFIGURE_OPTS) \ CPPFLAGS="$(STRACE_CPPFLAGS_PRE) $(STAGING_CPPFLAGS) $(STRACE_CPPFLAGS)" \ LDFLAGS="$(STAGING_LDFLAGS) $(STRACE_LDFLAGS)" \ ./configure \ --build=$(GNU_HOST_NAME) \ --host=$(GNU_TARGET_NAME) \ --target=$(GNU_TARGET_NAME) \ --prefix=$(TARGET_PREFIX) \ --disable-nls \ --disable-static \ ) touch $@ strace-unpack: $(STRACE_BUILD_DIR)/.configured # # This builds the actual binary. You should change the target to refer # directly to the main binary which is built. # $(STRACE_BUILD_DIR)/.built: $(STRACE_BUILD_DIR)/.configured rm -f $@ $(MAKE) -C $(@D) touch $@ # # You should change the dependency to refer directly to the main binary # which is built. # strace: $(STRACE_BUILD_DIR)/.built # # If you are building a library, then you need to stage it too. # # # This rule creates a control file for ipkg. It is no longer # necessary to create a seperate control file under sources/strace # $(STRACE_IPK_DIR)/CONTROL/control: @$(INSTALL) -d $(@D) @rm -f $@ @echo "Package: strace" >>$@ @echo "Architecture: $(TARGET_ARCH)" >>$@ @echo "Priority: $(STRACE_PRIORITY)" >>$@ @echo "Section: $(STRACE_SECTION)" >>$@ @echo "Version: $(STRACE_VERSION)-$(STRACE_IPK_VERSION)" >>$@ @echo "Maintainer: $(STRACE_MAINTAINER)" >>$@ @echo "Source: $(STRACE_SITE)/$(STRACE_SOURCE)" >>$@ @echo "Description: $(STRACE_DESCRIPTION)" >>$@ @echo "Depends: $(STRACE_DEPENDS)" >>$@ @echo "Suggests: $(STRACE_SUGGESTS)" >>$@ @echo "Conflicts: $(STRACE_CONFLICTS)" >>$@ # # This builds the IPK file. # # Binaries should be installed into $(STRACE_IPK_DIR)$(TARGET_PREFIX)/sbin or $(STRACE_IPK_DIR)$(TARGET_PREFIX)/bin # (use the location in a well-known Linux distro as a guide for choosing sbin or bin). # Libraries and include files should be installed into $(STRACE_IPK_DIR)$(TARGET_PREFIX)/{lib,include} # Configuration files should be installed in $(STRACE_IPK_DIR)$(TARGET_PREFIX)/etc/strace/... # Documentation files should be installed in $(STRACE_IPK_DIR)$(TARGET_PREFIX)/doc/strace/... # Daemon startup scripts should be installed in $(STRACE_IPK_DIR)$(TARGET_PREFIX)/etc/init.d/S??strace # # You may need to patch your application to make it use these locations. # $(STRACE_IPK): $(STRACE_BUILD_DIR)/.built rm -rf $(STRACE_IPK_DIR) $(BUILD_DIR)/strace_*_$(TARGET_ARCH).ipk $(INSTALL) -d $(STRACE_IPK_DIR)$(TARGET_PREFIX)/bin $(STRIP_COMMAND) $(STRACE_BUILD_DIR)/strace -o $(STRACE_IPK_DIR)$(TARGET_PREFIX)/bin/strace # $(INSTALL) -d $(STRACE_IPK_DIR)/CONTROL $(MAKE) $(STRACE_IPK_DIR)/CONTROL/control # sed -e "s/@ARCH@/$(TARGET_ARCH)/" \ # -e "s/@VERSION@/$(STRACE_VERSION)/" \ # -e "s/@RELEASE@/$(STRACE_IPK_VERSION)/" \ # $(STRACE_SOURCE_DIR)/control > $(STRACE_IPK_DIR)/CONTROL/control cd $(BUILD_DIR); $(IPKG_BUILD) $(STRACE_IPK_DIR) # # This is called from the top level makefile to create the IPK file. # strace-ipk: $(STRACE_IPK) # # This is called from the top level makefile to clean all of the built files. # strace-clean: rm -f $(STRACE_BUILD_DIR)/.built -$(MAKE) -C $(STRACE_BUILD_DIR) clean # # This is called from the top level makefile to clean all dynamically created # directories. # strace-dirclean: rm -rf $(BUILD_DIR)/$(STRACE_DIR) $(STRACE_BUILD_DIR) $(STRACE_IPK_DIR) $(STRACE_IPK) # # Some sanity check for the package. # strace-check: $(STRACE_IPK) perl scripts/optware-check-package.pl --target=$(OPTWARE_TARGET) $^
{ "pile_set_name": "Github" }
// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. syntax = "proto3"; package google.ads.googleads.v4.resources; import "google/ads/googleads/v4/common/matching_function.proto"; import "google/ads/googleads/v4/enums/feed_link_status.proto"; import "google/ads/googleads/v4/enums/placeholder_type.proto"; import "google/api/field_behavior.proto"; import "google/api/resource.proto"; import "google/protobuf/wrappers.proto"; import "google/api/annotations.proto"; option csharp_namespace = "Google.Ads.GoogleAds.V4.Resources"; option go_package = "google.golang.org/genproto/googleapis/ads/googleads/v4/resources;resources"; option java_multiple_files = true; option java_outer_classname = "AdGroupFeedProto"; option java_package = "com.google.ads.googleads.v4.resources"; option objc_class_prefix = "GAA"; option php_namespace = "Google\\Ads\\GoogleAds\\V4\\Resources"; option ruby_package = "Google::Ads::GoogleAds::V4::Resources"; // Proto file describing the AdGroupFeed resource. // An ad group feed. message AdGroupFeed { option (google.api.resource) = { type: "googleads.googleapis.com/AdGroupFeed" pattern: "customers/{customer}/adGroupFeeds/{ad_group_feed}" }; // Immutable. The resource name of the ad group feed. // Ad group feed resource names have the form: // // `customers/{customer_id}/adGroupFeeds/{ad_group_id}~{feed_id} string resource_name = 1 [ (google.api.field_behavior) = IMMUTABLE, (google.api.resource_reference) = { type: "googleads.googleapis.com/AdGroupFeed" } ]; // Immutable. The feed being linked to the ad group. google.protobuf.StringValue feed = 2 [ (google.api.field_behavior) = IMMUTABLE, (google.api.resource_reference) = { type: "googleads.googleapis.com/Feed" } ]; // Immutable. The ad group being linked to the feed. google.protobuf.StringValue ad_group = 3 [ (google.api.field_behavior) = IMMUTABLE, (google.api.resource_reference) = { type: "googleads.googleapis.com/AdGroup" } ]; // Indicates which placeholder types the feed may populate under the connected // ad group. Required. repeated google.ads.googleads.v4.enums.PlaceholderTypeEnum.PlaceholderType placeholder_types = 4; // Matching function associated with the AdGroupFeed. // The matching function is used to filter the set of feed items selected. // Required. google.ads.googleads.v4.common.MatchingFunction matching_function = 5; // Output only. Status of the ad group feed. // This field is read-only. google.ads.googleads.v4.enums.FeedLinkStatusEnum.FeedLinkStatus status = 6 [(google.api.field_behavior) = OUTPUT_ONLY]; }
{ "pile_set_name": "Github" }
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # org.apache.ws.security.crypto.provider=org.apache.ws.security.components.crypto.Merlin org.apache.ws.security.crypto.merlin.keystore.type=jks org.apache.ws.security.crypto.merlin.keystore.password=password org.apache.ws.security.crypto.merlin.keystore.alias=bob org.apache.ws.security.crypto.merlin.keystore.file=keys/bob.jks
{ "pile_set_name": "Github" }
using System; using System.Web.Mvc; using Glimpse.Mvc.Model; using Glimpse.Test.Common; using Xunit; using Xunit.Extensions; namespace Glimpse.Test.Mvc.Model { public class ViewModelSummaryShould { [Theory, AutoMock] public void SetModelType(ViewDataDictionary viewData, TempDataDictionary tempData, string displayMode, Type displayModeType) { var sut = new ViewModelSummary(viewData, tempData, typeof(ViewModelSummary), true, displayMode, displayModeType); Assert.Equal(true, sut.IsValid); Assert.Equal(typeof(ViewModelSummary), sut.ModelType); } [Theory, AutoMock] public void ReturnViewDataKeys(TempDataDictionary tempData, string displayMode, Type displayModeType) { var viewData = new ViewDataDictionary { { "A", 1 }, { "B", 2 }, { "C", 3 } }; var sut = new ViewModelSummary(viewData, tempData, typeof(ViewModelSummary), true, displayMode, displayModeType); Assert.Contains("A", sut.ViewDataKeys); Assert.Contains("B", sut.ViewDataKeys); Assert.Contains("C", sut.ViewDataKeys); } [Theory, AutoMock] public void ReturnTempDataKeys(ViewDataDictionary viewData, string displayMode, Type displayModeType) { var tempData = new TempDataDictionary { { "A", 1 }, { "B", 2 }, { "C", 3 } }; var sut = new ViewModelSummary(viewData, tempData, typeof(ViewModelSummary), true, displayMode, displayModeType); Assert.Contains("A", sut.TempDataKeys); Assert.Contains("B", sut.TempDataKeys); Assert.Contains("C", sut.TempDataKeys); } [Theory, AutoMock] public void SetDisplayMode(ViewDataDictionary viewData, TempDataDictionary tempData, string displayMode, Type displayModeType) { var sut = new ViewModelSummary(viewData, tempData, typeof(ViewModelSummary), true, displayMode, displayModeType); Assert.Equal(displayMode, sut.DisplayModeId); } [Theory, AutoMock] public void SetDisplayModeType(ViewDataDictionary viewData, TempDataDictionary tempData, string displayMode, Type displayModeType) { var sut = new ViewModelSummary(viewData, tempData, typeof(ViewModelSummary), true, displayMode, displayModeType); Assert.Equal(displayModeType, sut.DisplayModeType); Assert.True(sut.HasDisplayMode); } } }
{ "pile_set_name": "Github" }
class Empty(Exception): """Exception for requesting data from an empty collection""" pass
{ "pile_set_name": "Github" }
/* * \brief Protective MBR partition table definitions * \author Josef Soentgen * \date 2018-05-03 */ /* * Copyright (C) 2018 Genode Labs GmbH * * This file is part of the Genode OS framework, which is distributed * under the terms of the GNU Affero General Public License version 3. */ #ifndef _PMBR_H_ #define _PMBR_H_ /* Genode includes */ #include <base/fixed_stdint.h> namespace Protective_mbr { enum { TYPE_PROTECTIVE = 0xEE, }; /** * Partition table entry format */ struct Partition { Genode::uint8_t unused[4] { }; Genode::uint8_t type { }; Genode::uint8_t unused2[3] { }; Genode::uint32_t lba { }; Genode::uint32_t sectors { }; } __attribute__((packed)); /** * Master boot record header */ struct Header { Genode::uint8_t unused[446] { }; Partition partitions[4] { }; Genode::uint16_t magic { 0xaa55 }; } __attribute__((packed)); } #endif /* _PMBR_H_ */
{ "pile_set_name": "Github" }
-- Copyright 2020 Stanford University -- -- Licensed under the Apache License, Version 2.0 (the "License"); -- you may not use this file except in compliance with the License. -- You may obtain a copy of the License at -- -- http://www.apache.org/licenses/LICENSE-2.0 -- -- Unless required by applicable law or agreed to in writing, software -- distributed under the License is distributed on an "AS IS" BASIS, -- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -- See the License for the specific language governing permissions and -- limitations under the License. import "regent" local c = regentlib.c struct ret { v : int, id : uint64, } __demand(__inline) task inc1(x : int) : int return x + 1 end __demand(__inline) task dec1(x : int) : ret return ret { v = x - 1, id = c.legion_context_get_unique_id(__context()) } end __demand(__inline) task f(x : int) : ret return dec1(inc1(x + 5)) end __forbid(__inline) task g(x : int) : ret return ret { v = x + 5, id = c.legion_context_get_unique_id(__context()) } end __demand(__inline) task h() regentlib.c.printf("called h\n") return c.legion_context_get_unique_id(__context()) end task main() var id_main = c.legion_context_get_unique_id(__context()) var id_h = h() regentlib.assert(id_h == id_main, "test failed") for i = 0, 10 do var ret_f, ret_g = f(i), g(i) regentlib.assert(ret_f.v == ret_g.v, "test failed") regentlib.assert(id_main == ret_f.id, "test failed") regentlib.assert(id_main ~= ret_g.id, "test failed") end end regentlib.start(main)
{ "pile_set_name": "Github" }
/* Copyright 2016 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package v1beta1 import ( authorizationapi "k8s.io/api/authorization/v1beta1" ) type LocalSubjectAccessReviewExpansion interface { Create(sar *authorizationapi.LocalSubjectAccessReview) (result *authorizationapi.LocalSubjectAccessReview, err error) } func (c *localSubjectAccessReviews) Create(sar *authorizationapi.LocalSubjectAccessReview) (result *authorizationapi.LocalSubjectAccessReview, err error) { result = &authorizationapi.LocalSubjectAccessReview{} err = c.client.Post(). Namespace(c.ns). Resource("localsubjectaccessreviews"). Body(sar). Do(). Into(result) return }
{ "pile_set_name": "Github" }
<!DOCTYPE HTML> <!-- Any copyright is dedicated to the Public Domain. http://creativecommons.org/publicdomain/zero/1.0/ --> <html class="reftest-wait"><head> <meta charset="utf-8"> <title>CSS Grid Test: test 027 dynamic remove/insert second item</title> <link rel="author" title="Mats Palmgren" href="https://bugzilla.mozilla.org/show_bug.cgi?id=1144096"> <link rel="help" href="https://drafts.csswg.org/css-grid/#pagination"> <link rel="match" href="grid-fragmentation-027-ref.html"> <script src="support/dyn.js"></script> <script> function runTest(text) { document.body.innerHTML = text; dyn3('.grid'); document.documentElement.removeAttribute("class"); } </script> </head> <body onload='dynamicTest("grid-fragmentation-027.html", runTest)'></body> </html>
{ "pile_set_name": "Github" }
/* $Id$ * * Copyright (c) 2010 Anders Wallin (anders.e.e.wallin "at" gmail.com). * * This file is part of OpenCAMlib * (see https://github.com/aewallin/opencamlib). * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation, either version 2.1 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <sstream> #include <cmath> // uncomment to disable assert() calls // #define NDEBUG #include <cassert> #include "point.hpp" #include "ellipseposition.hpp" #include "ellipse.hpp" #include "numeric.hpp" namespace ocl { //******** EllipsePosition ********************** */ EllipsePosition::EllipsePosition() { diangle = 0.0; setD(); } void EllipsePosition::setDiangle(double dia) { assert( !std::isnan(dia) ); diangle = dia; setD(); } void EllipsePosition::setD() { // set (s,t) to angle corresponding to diangle // see: http://www.freesteel.co.uk/wpblog/2009/06/encoding-2d-angles-without-trigonometry/ // see: http://www.anderswallin.net/2010/07/radians-vs-diamondangle/ // return P2( (a < 2 ? 1-a : a-3), // (a < 3 ? ((a > 1) ? 2-a : a) : a-4) double d = diangle; assert( !std::isnan(d) ); while ( d > 4.0 ) // make d a diangle in [0,4] d -= 4.0; while ( d < 0.0) d+=4.0; assert( d >= 0.0 && d <= 4.0 ); // now we should be in [0,4] Point p( (d < 2 ? 1-d : d-3) , (d < 3 ? ((d > 1) ? 2-d : d) : d-4) ); // now we have a vector pointing in the right direction // but it is not normalized p.normalize(); s = p.x; t = p.y; assert( this->isValid() ); } // check that s and t values are OK bool EllipsePosition::isValid() const { if ( isZero_tol( square(s) + square(t) - 1.0 ) ) return true; else { std::cout << " EllipsePosition=" << *this << "\n"; std::cout << " square(s) + square(t) - 1.0 = " << square(s) + square(t) - 1.0 << " !!\n"; return false; } } EllipsePosition& EllipsePosition::operator=(const EllipsePosition &pos) { s = pos.s; t = pos.t; diangle = pos.diangle; return *this; } std::string EllipsePosition::str() const { std::ostringstream o; o << *this; return o.str(); } std::ostream& operator<<(std::ostream &stream, EllipsePosition pos) { stream << "("<< pos.s <<" ," << pos.t << ")"; return stream; } }//end namespace //end file ellipseposition.cpp
{ "pile_set_name": "Github" }
program where_01 implicit none real :: a(10), b(10) where (a >= 0) b = 1 else where b = 0 end where where (a >= 0) b = 1 elsewhere b = 0 end where where (a >= 0) b = 1 elsewhere b = 0 endwhere end program
{ "pile_set_name": "Github" }
//===-- ARMBuildAttributes.h - ARM Build Attributes -------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file contains enumerations and support routines for ARM build attributes // as defined in ARM ABI addenda document (ABI release 2.08). // // ELF for the ARM Architecture r2.09 - November 30, 2012 // // http://infocenter.arm.com/help/topic/com.arm.doc.ihi0044e/IHI0044E_aaelf.pdf // //===----------------------------------------------------------------------===// #ifndef LLVM_SUPPORT_ARMBUILDATTRIBUTES_H #define LLVM_SUPPORT_ARMBUILDATTRIBUTES_H namespace llvm { class StringRef; namespace ARMBuildAttrs { enum SpecialAttr { // This is for the .cpu asm attr. It translates into one or more // AttrType (below) entries in the .ARM.attributes section in the ELF. SEL_CPU }; enum AttrType { // Rest correspond to ELF/.ARM.attributes File = 1, CPU_raw_name = 4, CPU_name = 5, CPU_arch = 6, CPU_arch_profile = 7, ARM_ISA_use = 8, THUMB_ISA_use = 9, FP_arch = 10, WMMX_arch = 11, Advanced_SIMD_arch = 12, PCS_config = 13, ABI_PCS_R9_use = 14, ABI_PCS_RW_data = 15, ABI_PCS_RO_data = 16, ABI_PCS_GOT_use = 17, ABI_PCS_wchar_t = 18, ABI_FP_rounding = 19, ABI_FP_denormal = 20, ABI_FP_exceptions = 21, ABI_FP_user_exceptions = 22, ABI_FP_number_model = 23, ABI_align_needed = 24, ABI_align_preserved = 25, ABI_enum_size = 26, ABI_HardFP_use = 27, ABI_VFP_args = 28, ABI_WMMX_args = 29, ABI_optimization_goals = 30, ABI_FP_optimization_goals = 31, compatibility = 32, CPU_unaligned_access = 34, FP_HP_extension = 36, ABI_FP_16bit_format = 38, MPextension_use = 42, // recoded from 70 (ABI r2.08) DIV_use = 44, also_compatible_with = 65, conformance = 67, Virtualization_use = 68, /// Legacy Tags Section = 2, // deprecated (ABI r2.09) Symbol = 3, // deprecated (ABI r2.09) ABI_align8_needed = 24, // renamed to ABI_align_needed (ABI r2.09) ABI_align8_preserved = 25, // renamed to ABI_align_preserved (ABI r2.09) nodefaults = 64, // deprecated (ABI r2.09) T2EE_use = 66, // deprecated (ABI r2.09) MPextension_use_old = 70 // recoded to MPextension_use (ABI r2.08) }; StringRef AttrTypeAsString(unsigned Attr, bool HasTagPrefix = true); StringRef AttrTypeAsString(AttrType Attr, bool HasTagPrefix = true); int AttrTypeFromString(StringRef Tag); // Magic numbers for .ARM.attributes enum AttrMagic { Format_Version = 0x41 }; // Legal Values for CPU_arch, (=6), uleb128 enum CPUArch { Pre_v4 = 0, v4 = 1, // e.g. SA110 v4T = 2, // e.g. ARM7TDMI v5T = 3, // e.g. ARM9TDMI v5TE = 4, // e.g. ARM946E_S v5TEJ = 5, // e.g. ARM926EJ_S v6 = 6, // e.g. ARM1136J_S v6KZ = 7, // e.g. ARM1176JZ_S v6T2 = 8, // e.g. ARM1156T2F_S v6K = 9, // e.g. ARM1136J_S v7 = 10, // e.g. Cortex A8, Cortex M3 v6_M = 11, // e.g. Cortex M1 v6S_M = 12, // v6_M with the System extensions v7E_M = 13, // v7_M with DSP extensions v8 = 14 // v8, AArch32 }; enum CPUArchProfile { // (=7), uleb128 Not_Applicable = 0, // pre v7, or cross-profile code ApplicationProfile = (0x41), // 'A' (e.g. for Cortex A8) RealTimeProfile = (0x52), // 'R' (e.g. for Cortex R4) MicroControllerProfile = (0x4D), // 'M' (e.g. for Cortex M3) SystemProfile = (0x53) // 'S' Application or real-time profile }; // The following have a lot of common use cases enum { Not_Allowed = 0, Allowed = 1, // Tag_ARM_ISA_use (=8), uleb128 // Tag_THUMB_ISA_use, (=9), uleb128 AllowThumb32 = 2, // 32-bit Thumb (implies 16-bit instructions) // Tag_FP_arch (=10), uleb128 (formerly Tag_VFP_arch = 10) AllowFPv2 = 2, // v2 FP ISA permitted (implies use of the v1 FP ISA) AllowFPv3A = 3, // v3 FP ISA permitted (implies use of the v2 FP ISA) AllowFPv3B = 4, // v3 FP ISA permitted, but only D0-D15, S0-S31 AllowFPv4A = 5, // v4 FP ISA permitted (implies use of v3 FP ISA) AllowFPv4B = 6, // v4 FP ISA was permitted, but only D0-D15, S0-S31 AllowFPARMv8A = 7, // Use of the ARM v8-A FP ISA was permitted AllowFPARMv8B = 8, // Use of the ARM v8-A FP ISA was permitted, but only // D0-D15, S0-S31 // Tag_WMMX_arch, (=11), uleb128 AllowWMMXv1 = 1, // The user permitted this entity to use WMMX v1 AllowWMMXv2 = 2, // The user permitted this entity to use WMMX v2 // Tag_Advanced_SIMD_arch, (=12), uleb128 AllowNeon = 1, // SIMDv1 was permitted AllowNeon2 = 2, // SIMDv2 was permitted (Half-precision FP, MAC operations) AllowNeonARMv8 = 3, // ARM v8-A SIMD was permitted // Tag_ABI_PCS_R9_use, (=14), uleb128 R9IsGPR = 0, // R9 used as v6 (just another callee-saved register) R9IsSB = 1, // R9 used as a global static base rgister R9IsTLSPointer = 2, // R9 used as a thread local storage pointer R9Reserved = 3, // R9 not used by code associated with attributed entity // Tag_ABI_PCS_RW_data, (=15), uleb128 AddressRWPCRel = 1, // Address RW static data PC-relative AddressRWSBRel = 2, // Address RW static data SB-relative AddressRWNone = 3, // No RW static data permitted // Tag_ABI_PCS_RO_data, (=14), uleb128 AddressROPCRel = 1, // Address RO static data PC-relative AddressRONone = 2, // No RO static data permitted // Tag_ABI_PCS_GOT_use, (=17), uleb128 AddressDirect = 1, // Address imported data directly AddressGOT = 2, // Address imported data indirectly (via GOT) // Tag_ABI_PCS_wchar_t, (=18), uleb128 WCharProhibited = 0, // wchar_t is not used WCharWidth2Bytes = 2, // sizeof(wchar_t) == 2 WCharWidth4Bytes = 4, // sizeof(wchar_t) == 4 // Tag_ABI_FP_denormal, (=20), uleb128 PositiveZero = 0, IEEEDenormals = 1, PreserveFPSign = 2, // sign when flushed-to-zero is preserved // Tag_ABI_FP_number_model, (=23), uleb128 AllowRTABI = 2, // numbers, infinities, and one quiet NaN (see [RTABI]) AllowIEE754 = 3, // this code to use all the IEEE 754-defined FP encodings // Tag_ABI_enum_size, (=26), uleb128 EnumProhibited = 0, // The user prohibited the use of enums when building // this entity. EnumSmallest = 1, // Enum is smallest container big enough to hold all // values. Enum32Bit = 2, // Enum is at least 32 bits. Enum32BitABI = 3, // Every enumeration visible across an ABI-complying // interface contains a value needing 32 bits to encode // it; other enums can be containerized. // Tag_ABI_HardFP_use, (=27), uleb128 HardFPImplied = 0, // FP use should be implied by Tag_FP_arch HardFPSinglePrecision = 1, // Single-precision only // Tag_ABI_VFP_args, (=28), uleb128 BaseAAPCS = 0, HardFPAAPCS = 1, // Tag_FP_HP_extension, (=36), uleb128 AllowHPFP = 1, // Allow use of Half Precision FP // Tag_FP_16bit_format, (=38), uleb128 FP16FormatIEEE = 1, // Tag_MPextension_use, (=42), uleb128 AllowMP = 1, // Allow use of MP extensions // Tag_DIV_use, (=44), uleb128 // Note: AllowDIVExt must be emitted if and only if the permission to use // hardware divide cannot be conveyed using AllowDIVIfExists or DisallowDIV AllowDIVIfExists = 0, // Allow hardware divide if available in arch, or no // info exists. DisallowDIV = 1, // Hardware divide explicitly disallowed. AllowDIVExt = 2, // Allow hardware divide as optional architecture // extension above the base arch specified by // Tag_CPU_arch and Tag_CPU_arch_profile. // Tag_Virtualization_use, (=68), uleb128 AllowTZ = 1, AllowVirtualization = 2, AllowTZVirtualization = 3 }; } // namespace ARMBuildAttrs } // namespace llvm #endif
{ "pile_set_name": "Github" }
<?xml version="1.0" encoding="ISO-8859-1"?> <!-- - - This file is part of the OpenLink Software Virtuoso Open-Source (VOS) - project. - - Copyright (C) 1998-2020 OpenLink Software - - This project is free software; you can redistribute it and/or modify it - under the terms of the GNU General Public License as published by the - Free Software Foundation; only version 2 of the License, dated June 1991. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - You should have received a copy of the GNU General Public License along - with this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - --> <refentry id="fn_udt_defines_field"> <refmeta> <refentrytitle>udt_defines_field</refentrytitle> <refmiscinfo>type</refmiscinfo> </refmeta> <refnamediv> <refname>udt_defines_field</refname> <refpurpose>Determines whether a user defined type contains a specified member.</refpurpose> </refnamediv> <refsynopsisdiv> <funcsynopsis id="fsyn_udt_defines_field"> <funcprototype id="fproto_udt_defines_field"> <funcdef>integer <function>udt_defines_field</function></funcdef> <paramdef>in <parameter>udt</parameter> any</paramdef> <paramdef>in <parameter>member_name</parameter> varchar</paramdef> </funcprototype> </funcsynopsis> </refsynopsisdiv> <refsect1 id="desc_udt_defines_field"> <title>Description</title> <para>This function is used to determine whether the supplied member_name is a member contained by the supplied udt.</para> </refsect1> <refsect1 id="params_udt_defines_field"> <title>Parameters</title> <refsect2><title>udt</title> <para>A user defined type name as varchar or type instance.</para> </refsect2> <refsect2><title>member_name</title> <para>The requested member name.</para> </refsect2> </refsect1> <refsect1 id="ret_udt_defines_field"><title>Return Types</title> <para>This function returns either 1 (true) or 0 (false). 1 (true) is returned if the udt contains a member whose name is equal to the value of member_name, or 0 otherwise.</para> </refsect1> <!-- <refsect1 id="errors_udt_defines_field"> <title>Errors</title> <para>This function can generate the following errors:</para> <errorcode></errorcode> </refsect1> --> <refsect1 id="examples_udt_defines_field"> <title>Examples</title> <example id="ex_udt_defines_field"><title>Simple Use</title> <screen><![CDATA[ select udt_defines_field (new SER_UDT(), 'A'); ]]></screen> <para>returns 1</para> <screen><![CDATA[ select udt_defines_field (new SER_UDT_SUB(), 'A'); ]]></screen> <para>returns 1</para> <screen><![CDATA[ select udt_defines_field (new SER_UDT(), 'B'); ]]></screen> <para>returns 0;</para> </example> </refsect1> <refsect1 id="seealso_udt_defines_field"> <title>See Also</title> <para><link linkend="fn_udt_get"><function>udt_get()</function></link></para> <para><link linkend="fn_udt_implements_method"><function>udt_implements_method()</function></link></para> <para><link linkend="fn_udt_instance_of"><function>udt_instance_of()</function></link></para> <para><link linkend="fn_udt_set"><function>udt_set()</function></link></para> </refsect1> </refentry>
{ "pile_set_name": "Github" }
/** * Mupen64 - tlb.c * Copyright (C) 2002 Hacktarux * * Mupen64 homepage: http://mupen64.emulation64.com * email address: [email protected] * * If you want to contribute to the project please contact * me first (maybe someone is already making what you are * planning to do). * * * This program is free software; you can redistribute it and/ * or modify it under the terms of the GNU General Public Li- * cence as published by the Free Software Foundation; either * version 2 of the Licence, or any later version. * * This program is distributed in the hope that it will be use- * ful, but WITHOUT ANY WARRANTY; without even the implied war- * ranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * See the GNU General Public Licence for more details. * * You should have received a copy of the GNU General Public * Licence along with this program; if not, write to the Free * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, * USA. * **/ #include "r4300.h" #include "macros.h" #include "ops.h" #include "recomph.h" #include "interupt.h" #include "Invalid_Code.h" #include "../main/md5.h" #include "../gc_memory/memory.h" #include "../gc_memory/TLB-Cache.h" #include "ARAM-blocks.h" #include <zlib.h> uLong ZEXPORT adler32(uLong adler, const Bytef *buf, uInt len); void TLBR() { int index; index = Index & 0x1F; PageMask = tlb_e[index].mask << 13; EntryHi = ((tlb_e[index].vpn2 << 13) | tlb_e[index].asid); EntryLo0 = (tlb_e[index].pfn_even << 6) | (tlb_e[index].c_even << 3) | (tlb_e[index].d_even << 2) | (tlb_e[index].v_even << 1) | tlb_e[index].g; EntryLo1 = (tlb_e[index].pfn_odd << 6) | (tlb_e[index].c_odd << 3) | (tlb_e[index].d_odd << 2) | (tlb_e[index].v_odd << 1) | tlb_e[index].g; PC++; } void TLBWI() { unsigned int i; PowerPC_block* temp_block; if (tlb_e[Index&0x3F].v_even) { for (i=tlb_e[Index&0x3F].start_even>>12; i<=tlb_e[Index&0x3F].end_even>>12; i++) { temp_block = blocks_get(i); #ifdef USE_TLB_CACHE unsigned long paddr = TLBCache_get_r(i); if(!invalid_code_get(i) && (invalid_code_get(paddr>>12) || invalid_code_get((paddr>>12)+0x20000))) #else if(!invalid_code_get(i) &&(invalid_code_get(tlb_LUT_r[i]>>12) || invalid_code_get((tlb_LUT_r[i]>>12)+0x20000))) #endif invalid_code_set(i, 1); if (!invalid_code_get(i)) { /*int j; md5_state_t state; md5_byte_t digest[16]; md5_init(&state); md5_append(&state, (const md5_byte_t*)&rdram[(tlb_LUT_r[i]&0x7FF000)/4], 0x1000); md5_finish(&state, digest); for (j=0; j<16; j++) blocks[i]->md5[j] = digest[j];*/ #ifdef USE_TLB_CACHE temp_block->adler32 = adler32(0, (const Bytef*)&rdram[(paddr&0x7FF000)/4], 0x1000); #else temp_block->adler32 = adler32(0, (const Bytef*)&rdram[(tlb_LUT_r[i]&0x7FF000)/4], 0x1000); #endif invalid_code_set(i, 1); } else if (temp_block) { /*int j; for (j=0; j<16; j++) blocks[i]->md5[j] = 0;*/ temp_block->adler32 = 0; } #ifdef USE_TLB_CACHE TLBCache_set_r(i, 0); #else tlb_LUT_r[i] = 0; #endif } if (tlb_e[Index&0x3F].d_even) for (i=tlb_e[Index&0x3F].start_even>>12; i<=tlb_e[Index&0x3F].end_even>>12; i++) #ifdef USE_TLB_CACHE TLBCache_set_w(i, 0); #else tlb_LUT_w[i] = 0; #endif } if (tlb_e[Index&0x3F].v_odd) { for (i=tlb_e[Index&0x3F].start_odd>>12; i<=tlb_e[Index&0x3F].end_odd>>12; i++) { temp_block = blocks_get(i); #ifdef USE_TLB_CACHE unsigned long paddr = TLBCache_get_r(i); if(!invalid_code_get(i) && (invalid_code_get(paddr>>12) || invalid_code_get((paddr>>12)+0x20000))) #else if(!invalid_code_get(i) &&(invalid_code_get(tlb_LUT_r[i]>>12) || invalid_code_get((tlb_LUT_r[i]>>12)+0x20000))) #endif invalid_code_set(i, 1); if (!invalid_code_get(i)) { /*int j; md5_state_t state; md5_byte_t digest[16]; md5_init(&state); md5_append(&state, (const md5_byte_t*)&rdram[(tlb_LUT_r[i]&0x7FF000)/4], 0x1000); md5_finish(&state, digest); for (j=0; j<16; j++) blocks[i]->md5[j] = digest[j];*/ #ifdef USE_TLB_CACHE temp_block->adler32 = adler32(0, (const Bytef*)&rdram[(paddr&0x7FF000)/4], 0x1000); #else temp_block->adler32 = adler32(0, (const Bytef*)&rdram[(tlb_LUT_r[i]&0x7FF000)/4], 0x1000); #endif invalid_code_set(i, 1); } else if (temp_block) { /*int j; for (j=0; j<16; j++) blocks[i]->md5[j] = 0;*/ temp_block->adler32 = 0; } #ifdef USE_TLB_CACHE TLBCache_set_r(i, 0); #else tlb_LUT_r[i] = 0; #endif } if (tlb_e[Index&0x3F].d_odd) for (i=tlb_e[Index&0x3F].start_odd>>12; i<=tlb_e[Index&0x3F].end_odd>>12; i++) #ifdef USE_TLB_CACHE TLBCache_set_w(i, 0); #else tlb_LUT_w[i] = 0; #endif } tlb_e[Index&0x3F].g = (EntryLo0 & EntryLo1 & 1); tlb_e[Index&0x3F].pfn_even = (EntryLo0 & 0x3FFFFFC0) >> 6; tlb_e[Index&0x3F].pfn_odd = (EntryLo1 & 0x3FFFFFC0) >> 6; tlb_e[Index&0x3F].c_even = (EntryLo0 & 0x38) >> 3; tlb_e[Index&0x3F].c_odd = (EntryLo1 & 0x38) >> 3; tlb_e[Index&0x3F].d_even = (EntryLo0 & 0x4) >> 2; tlb_e[Index&0x3F].d_odd = (EntryLo1 & 0x4) >> 2; tlb_e[Index&0x3F].v_even = (EntryLo0 & 0x2) >> 1; tlb_e[Index&0x3F].v_odd = (EntryLo1 & 0x2) >> 1; tlb_e[Index&0x3F].asid = (EntryHi & 0xFF); tlb_e[Index&0x3F].vpn2 = (EntryHi & 0xFFFFE000) >> 13; //tlb_e[Index&0x3F].r = (EntryHi & 0xC000000000000000LL) >> 62; tlb_e[Index&0x3F].mask = (PageMask & 0x1FFE000) >> 13; tlb_e[Index&0x3F].start_even = tlb_e[Index&0x3F].vpn2 << 13; tlb_e[Index&0x3F].end_even = tlb_e[Index&0x3F].start_even+ (tlb_e[Index&0x3F].mask << 12) + 0xFFF; tlb_e[Index&0x3F].phys_even = tlb_e[Index&0x3F].pfn_even << 12; if (tlb_e[Index&0x3F].v_even) { if (tlb_e[Index&0x3F].start_even < tlb_e[Index&0x3F].end_even && !(tlb_e[Index&0x3F].start_even >= 0x80000000 && tlb_e[Index&0x3F].end_even < 0xC0000000) && tlb_e[Index&0x3F].phys_even < 0x20000000) { for (i=tlb_e[Index&0x3F].start_even;i<tlb_e[Index&0x3F].end_even;i++){ #ifdef USE_TLB_CACHE TLBCache_set_r(i>>12, 0x80000000 | (tlb_e[Index&0x3F].phys_even + (i - tlb_e[Index&0x3F].start_even))); #else tlb_LUT_r[i>>12] = 0x80000000 | (tlb_e[Index&0x3F].phys_even + (i - tlb_e[Index&0x3F].start_even)); #endif } if (tlb_e[Index&0x3F].d_even) for (i=tlb_e[Index&0x3F].start_even;i<tlb_e[Index&0x3F].end_even;i++) #ifdef USE_TLB_CACHE TLBCache_set_w(i>>12, 0x80000000 | (tlb_e[Index&0x3F].phys_even + (i - tlb_e[Index&0x3F].start_even))); #else tlb_LUT_w[i>>12] = 0x80000000 | (tlb_e[Index&0x3F].phys_even + (i - tlb_e[Index&0x3F].start_even)); #endif } for (i=tlb_e[Index&0x3F].start_even>>12; i<=tlb_e[Index&0x3F].end_even>>12; i++) { temp_block = blocks_get(i); /*if (blocks[i] && (blocks[i]->md5[0] || blocks[i]->md5[1] || blocks[i]->md5[2] || blocks[i]->md5[3])) { int j; int equal = 1; md5_state_t state; md5_byte_t digest[16]; md5_init(&state); md5_append(&state, (const md5_byte_t*)&rdram[(tlb_LUT_r[i]&0x7FF000)/4], 0x1000); md5_finish(&state, digest); for (j=0; j<16; j++) if (digest[j] != blocks[i]->md5[j]) equal = 0; if (equal) invalid_code_set(i, 0); }*/ if(temp_block && temp_block->adler32) { #ifdef USE_TLB_CACHE unsigned long paddr = TLBCache_get_r(i); if(temp_block->adler32 == adler32(0,(const Bytef*)&rdram[(paddr&0x7FF000)/4],0x1000)) #else if(temp_block->adler32 == adler32(0,(const Bytef*)&rdram[(tlb_LUT_r[i]&0x7FF000)/4],0x1000)) #endif invalid_code_set(i, 0); } } } tlb_e[Index&0x3F].start_odd = tlb_e[Index&0x3F].end_even+1; tlb_e[Index&0x3F].end_odd = tlb_e[Index&0x3F].start_odd+ (tlb_e[Index&0x3F].mask << 12) + 0xFFF; tlb_e[Index&0x3F].phys_odd = tlb_e[Index&0x3F].pfn_odd << 12; if (tlb_e[Index&0x3F].v_odd) { if (tlb_e[Index&0x3F].start_odd < tlb_e[Index&0x3F].end_odd && !(tlb_e[Index&0x3F].start_odd >= 0x80000000 && tlb_e[Index&0x3F].end_odd < 0xC0000000) && tlb_e[Index&0x3F].phys_odd < 0x20000000) { for (i=tlb_e[Index&0x3F].start_odd;i<tlb_e[Index&0x3F].end_odd;i++) #ifdef USE_TLB_CACHE TLBCache_set_r(i>>12, 0x80000000 | (tlb_e[Index&0x3F].phys_odd + (i - tlb_e[Index&0x3F].start_odd))); #else tlb_LUT_r[i>>12] = 0x80000000 | (tlb_e[Index&0x3F].phys_odd + (i - tlb_e[Index&0x3F].start_odd)); #endif if (tlb_e[Index&0x3F].d_odd) for (i=tlb_e[Index&0x3F].start_odd;i<tlb_e[Index&0x3F].end_odd;i++) #ifdef USE_TLB_CACHE TLBCache_set_w(i>>12, 0x80000000 | (tlb_e[Index&0x3F].phys_odd + (i - tlb_e[Index&0x3F].start_odd))); #else tlb_LUT_w[i>>12] = 0x80000000 | (tlb_e[Index&0x3F].phys_odd + (i - tlb_e[Index&0x3F].start_odd)); #endif } for (i=tlb_e[Index&0x3F].start_odd>>12; i<=tlb_e[Index&0x3F].end_odd>>12; i++) { temp_block = blocks_get(i); /*if (blocks[i] && (blocks[i]->md5[0] || blocks[i]->md5[1] || blocks[i]->md5[2] || blocks[i]->md5[3])) { int j; int equal = 1; md5_state_t state; md5_byte_t digest[16]; md5_init(&state); md5_append(&state, (const md5_byte_t*)&rdram[(tlb_LUT_r[i]&0x7FF000)/4], 0x1000); md5_finish(&state, digest); for (j=0; j<16; j++) if (digest[j] != blocks[i]->md5[j]) equal = 0; if (equal) invalid_code_set(i, 0); }*/ if(temp_block && temp_block->adler32) { #ifdef USE_TLB_CACHE if(temp_block->adler32 == adler32(0,(const Bytef*)&rdram[(TLBCache_get_r(i)&0x7FF000)/4],0x1000)) #else if(temp_block->adler32 == adler32(0,(const Bytef*)&rdram[(tlb_LUT_r[i]&0x7FF000)/4],0x1000)) #endif invalid_code_set(i, 0); } } } PC++; } void TLBWR() { unsigned int i; update_count(); PowerPC_block* temp_block; Random = (Count/2 % (32 - Wired)) + Wired; if (tlb_e[Random].v_even) { for (i=tlb_e[Random].start_even>>12; i<=tlb_e[Random].end_even>>12; i++) { temp_block = blocks_get(i); #ifdef USE_TLB_CACHE unsigned long paddr = TLBCache_get_r(i); if(!invalid_code_get(i) && (invalid_code_get(paddr>>12) || invalid_code_get((paddr>>12)+0x20000))) #else if(!invalid_code_get(i) &&(invalid_code_get(tlb_LUT_r[i]>>12) || invalid_code_get((tlb_LUT_r[i]>>12)+0x20000))) #endif invalid_code_set(i, 1); if (!invalid_code_get(i)) { /*int j; md5_state_t state; md5_byte_t digest[16]; md5_init(&state); md5_append(&state, (const md5_byte_t*)&rdram[(tlb_LUT_r[i]&0x7FF000)/4], 0x1000); md5_finish(&state, digest); for (j=0; j<16; j++) blocks[i]->md5[j] = digest[j];*/ #ifdef USE_TLB_CACHE temp_block->adler32 = adler32(0, (const Bytef*)&rdram[(paddr&0x7FF000)/4], 0x1000); #else temp_block->adler32 = adler32(0, (const Bytef*)&rdram[(tlb_LUT_r[i]&0x7FF000)/4], 0x1000); #endif invalid_code_set(i, 1); } else if (temp_block) { /*int j; for (j=0; j<16; j++) blocks[i]->md5[j] = 0;*/ temp_block->adler32 = 0; } #ifdef USE_TLB_CACHE TLBCache_set_r(i, 0); #else tlb_LUT_r[i] = 0; #endif } if (tlb_e[Random].d_even) for (i=tlb_e[Random].start_even>>12; i<=tlb_e[Random].end_even>>12; i++) #ifdef USE_TLB_CACHE TLBCache_set_w(i, 0); #else tlb_LUT_w[i] = 0; #endif } if (tlb_e[Random].v_odd) { for (i=tlb_e[Random].start_odd>>12; i<=tlb_e[Random].end_odd>>12; i++) { temp_block = blocks_get(i); #ifdef USE_TLB_CACHE unsigned long paddr = TLBCache_get_r(i); if(!invalid_code_get(i) && (invalid_code_get(paddr>>12) || invalid_code_get((paddr>>12)+0x20000))) #else if(!invalid_code_get(i) &&(invalid_code_get(tlb_LUT_r[i]>>12) || invalid_code_get((tlb_LUT_r[i]>>12)+0x20000))) #endif invalid_code_set(i, 1); if (!invalid_code_get(i)) { /*int j; md5_state_t state; md5_byte_t digest[16]; md5_init(&state); md5_append(&state, (const md5_byte_t*)&rdram[(tlb_LUT_r[i]&0x7FF000)/4], 0x1000); md5_finish(&state, digest); for (j=0; j<16; j++) blocks[i]->md5[j] = digest[j];*/ #ifdef USE_TLB_CACHE temp_block->adler32 = adler32(0, (const Bytef*)&rdram[(paddr&0x7FF000)/4], 0x1000); #else temp_block->adler32 = adler32(0, (const Bytef*)&rdram[(tlb_LUT_r[i]&0x7FF000)/4], 0x1000); #endif invalid_code_set(i, 1); } else if (temp_block) { /*int j; for (j=0; j<16; j++) blocks[i]->md5[j] = 0;*/ temp_block->adler32 = 0; } #ifdef USE_TLB_CACHE TLBCache_set_r(i, 0); #else tlb_LUT_r[i] = 0; #endif } if (tlb_e[Random].d_odd) for (i=tlb_e[Random].start_odd>>12; i<=tlb_e[Random].end_odd>>12; i++) #ifdef USE_TLB_CACHE TLBCache_set_w(i, 0); #else tlb_LUT_w[i] = 0; #endif } tlb_e[Random].g = (EntryLo0 & EntryLo1 & 1); tlb_e[Random].pfn_even = (EntryLo0 & 0x3FFFFFC0) >> 6; tlb_e[Random].pfn_odd = (EntryLo1 & 0x3FFFFFC0) >> 6; tlb_e[Random].c_even = (EntryLo0 & 0x38) >> 3; tlb_e[Random].c_odd = (EntryLo1 & 0x38) >> 3; tlb_e[Random].d_even = (EntryLo0 & 0x4) >> 2; tlb_e[Random].d_odd = (EntryLo1 & 0x4) >> 2; tlb_e[Random].v_even = (EntryLo0 & 0x2) >> 1; tlb_e[Random].v_odd = (EntryLo1 & 0x2) >> 1; tlb_e[Random].asid = (EntryHi & 0xFF); tlb_e[Random].vpn2 = (EntryHi & 0xFFFFE000) >> 13; //tlb_e[Random].r = (EntryHi & 0xC000000000000000LL) >> 62; tlb_e[Random].mask = (PageMask & 0x1FFE000) >> 13; tlb_e[Random].start_even = tlb_e[Random].vpn2 << 13; tlb_e[Random].end_even = tlb_e[Random].start_even+ (tlb_e[Random].mask << 12) + 0xFFF; tlb_e[Random].phys_even = tlb_e[Random].pfn_even << 12; if (tlb_e[Random].v_even) { if (tlb_e[Random].start_even < tlb_e[Random].end_even && !(tlb_e[Random].start_even >= 0x80000000 && tlb_e[Random].end_even < 0xC0000000) && tlb_e[Random].phys_even < 0x20000000) { for (i=tlb_e[Random].start_even;i<tlb_e[Random].end_even;i++) #ifdef USE_TLB_CACHE TLBCache_set_r(i>>12, 0x80000000 | (tlb_e[Random].phys_even + (i - tlb_e[Random].start_even))); #else tlb_LUT_r[i>>12] = 0x80000000 | (tlb_e[Random].phys_even + (i - tlb_e[Random].start_even)); #endif if (tlb_e[Random].d_even) for (i=tlb_e[Random].start_even;i<tlb_e[Random].end_even;i++) #ifdef USE_TLB_CACHE TLBCache_set_w(i>>12, 0x80000000 | (tlb_e[Random].phys_even + (i - tlb_e[Random].start_even))); #else tlb_LUT_w[i>>12] = 0x80000000 | (tlb_e[Random].phys_even + (i - tlb_e[Random].start_even)); #endif } for (i=tlb_e[Random].start_even>>12; i<=tlb_e[Random].end_even>>12; i++) { temp_block = blocks_get(i); /*if (blocks[i] && (blocks[i]->md5[0] || blocks[i]->md5[1] || blocks[i]->md5[2] || blocks[i]->md5[3])) { int j; int equal = 1; md5_state_t state; md5_byte_t digest[16]; md5_init(&state); md5_append(&state, (const md5_byte_t*)&rdram[(tlb_LUT_r[i]&0x7FF000)/4], 0x1000); md5_finish(&state, digest); for (j=0; j<16; j++) if (digest[j] != blocks[i]->md5[j]) equal = 0; if (equal) invalid_code_set(i, 0); }*/ if(temp_block && temp_block->adler32) { #ifdef USE_TLB_CACHE if(temp_block->adler32 == adler32(0,(const Bytef*)&rdram[(TLBCache_get_r(i)&0x7FF000)/4],0x1000)) #else if(temp_block->adler32 == adler32(0,(const Bytef*)&rdram[(tlb_LUT_r[i]&0x7FF000)/4],0x1000)) #endif invalid_code_set(i, 0); } } } tlb_e[Random].start_odd = tlb_e[Random].end_even+1; tlb_e[Random].end_odd = tlb_e[Random].start_odd+ (tlb_e[Random].mask << 12) + 0xFFF; tlb_e[Random].phys_odd = tlb_e[Random].pfn_odd << 12; if (tlb_e[Random].v_odd) { if (tlb_e[Random].start_odd < tlb_e[Random].end_odd && !(tlb_e[Random].start_odd >= 0x80000000 && tlb_e[Random].end_odd < 0xC0000000) && tlb_e[Random].phys_odd < 0x20000000) { for (i=tlb_e[Random].start_odd;i<tlb_e[Random].end_odd;i++) #ifdef USE_TLB_CACHE TLBCache_set_r(i>>12, 0x80000000 | (tlb_e[Random].phys_odd + (i - tlb_e[Random].start_odd))); #else tlb_LUT_r[i>>12] = 0x80000000 | (tlb_e[Random].phys_odd + (i - tlb_e[Random].start_odd)); #endif if (tlb_e[Random].d_odd) for (i=tlb_e[Random].start_odd;i<tlb_e[Random].end_odd;i++) #ifdef USE_TLB_CACHE TLBCache_set_w(i>>12, 0x80000000 | (tlb_e[Random].phys_odd + (i - tlb_e[Random].start_odd))); #else tlb_LUT_w[i>>12] = 0x80000000 | (tlb_e[Random].phys_odd + (i - tlb_e[Random].start_odd)); #endif } for (i=tlb_e[Random].start_odd>>12; i<=tlb_e[Random].end_odd>>12; i++) { temp_block = blocks_get(i); /*if (blocks[i] && (blocks[i]->md5[0] || blocks[i]->md5[1] || blocks[i]->md5[2] || blocks[i]->md5[3])) { int j; int equal = 1; md5_state_t state; md5_byte_t digest[16]; md5_init(&state); md5_append(&state, (const md5_byte_t*)&rdram[(tlb_LUT_r[i]&0x7FF000)/4], 0x1000); md5_finish(&state, digest); for (j=0; j<16; j++) if (digest[j] != blocks[i]->md5[j]) equal = 0; if (equal) invalid_code_set(i, 0); }*/ if(temp_block && temp_block->adler32) { #ifdef USE_TLB_CACHE if(temp_block->adler32 == adler32(0,(const Bytef*)&rdram[(TLBCache_get_r(i)&0x7FF000)/4],0x1000)) #else if(temp_block->adler32 == adler32(0,(const Bytef*)&rdram[(tlb_LUT_r[i]&0x7FF000)/4],0x1000)) #endif invalid_code_set(i, 0); } } } PC++; } void TLBP() { int i; Index |= 0x80000000; for (i=0; i<32; i++) { if (((tlb_e[i].vpn2 & (~tlb_e[i].mask)) == (((EntryHi & 0xFFFFE000) >> 13) & (~tlb_e[i].mask))) && ((tlb_e[i].g) || (tlb_e[i].asid == (EntryHi & 0xFF)))) { Index = i; break; } } PC++; } int jump_marker = 0; void ERET() { update_count(); if (Status & 0x4) { printf ("erreur dans ERET\n"); stop=1; } else { Status &= 0xFFFFFFFD; jump_to(EPC); } llbit = 0; check_interupt(); last_addr = PC->addr; if (next_interupt <= Count) gen_interupt(); }
{ "pile_set_name": "Github" }
package org.dcache.services.info.gathers; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Required; import java.util.ArrayList; import java.util.Date; import java.util.List; import java.util.Map; import dmg.cells.nucleus.CellLifeCycleAware; import dmg.cells.nucleus.EnvironmentAware; import dmg.cells.nucleus.UOID; import org.dcache.util.NDC; import org.dcache.services.info.base.StateExhibitor; import org.dcache.services.info.base.StateUpdateManager; import static com.google.common.base.Preconditions.checkState; /** * This thread is responsible for scheduling various data-gathering activity. * Multiple DataGatheringActivity instances can be registered, each will operate * independently. The frequency at which they trigger, or even whether they are * periodic, is completely under the control of the DGA. * <p> * These DataGatheringActivities can (in principle) do anything when * triggered, but will typically send one or more messages to dCache. * * @author Paul Millar <[email protected]> */ public class DataGatheringScheduler implements Runnable, EnvironmentAware, CellLifeCycleAware { private static final long FIVE_MINUTES = 5*60*1000; private static final Logger LOGGER_SCHED = LoggerFactory.getLogger(DataGatheringScheduler.class); private static final Logger LOGGER_RA = LoggerFactory.getLogger(RegisteredActivity.class); private boolean _timeToQuit; private final List<RegisteredActivity> _activity = new ArrayList<>(); private Map<String,Object> _environment; private Iterable<DgaFactoryService> _factories; private StateUpdateManager _sum; private StateExhibitor _exhibitor; private MessageSender _sender; private MessageMetadataRepository<UOID> _repository; private Thread _thread; /** * Class holding a periodically repeated DataGatheringActivity * @author Paul Millar <[email protected]> */ private static class RegisteredActivity { /** Min. delay (in ms). We prevent Schedulables from triggering more frequently than this */ private static final long MINIMUM_DGA_DELAY = 50; private final Schedulable _dga; /** The delay until this DataGatheringActivity should be next triggered */ private Date _nextTriggered; /** Whether we should include this activity when scheduling next activity */ private boolean _enabled = true; /** * Create a new PeriodicActvity, with specified DataGatheringActivity, that * is triggered with a fixed period. The initial delay is a randomly chosen * fraction of the period. * @param dga the DataGatheringActivity to be triggered periodically * @param period the period between successive triggering in milliseconds. */ RegisteredActivity(Schedulable dga) { _dga = dga; updateNextTrigger(); } /** * Try to make sure we don't hit the system with lots of queries at the same * time * @param period */ private void updateNextTrigger() { Date nextTrigger = _dga.shouldNextBeTriggered(); if (nextTrigger == null) { LOGGER_RA.error("registered dga returned null Date"); nextTrigger = new Date(System.currentTimeMillis() + FIVE_MINUTES); } else { // Safety! Check we wont trigger too quickly if (nextTrigger.getTime() - System.currentTimeMillis() < MINIMUM_DGA_DELAY) { LOGGER_RA.warn("DGA {} triggering too quickly ({}ms): engaging safety.", _dga, nextTrigger.getTime() - System.currentTimeMillis()); nextTrigger = new Date (System.currentTimeMillis() + MINIMUM_DGA_DELAY); } } _nextTriggered = nextTrigger; } /** * Update this PeriodicActivity so it's trigger time is <i>now</i>. */ public void shouldTriggerNow() { _nextTriggered = new Date(); } /** * Check the status of this activity. If the time has elapsed, * this will cause the DataGatheringActivity to be triggered * and the timer to be reset. * @return true if the DataGatheringActivity was triggered. */ boolean checkAndTrigger(Date now) { if (!_enabled) { return false; } if (now.before(_nextTriggered)) { return false; } NDC.push(_dga.toString()); _dga.trigger(); NDC.pop(); updateNextTrigger(); return true; } /** * Calculate the duration until the event has triggered. * @return duration, in milliseconds, until event or zero if it * should have been triggered already. */ long getDelay() { long delay = _nextTriggered.getTime() - System.currentTimeMillis(); return delay > 0 ? delay : 0; } /** * Return the time this will be next triggered. * @return */ long getNextTriggered() { return _nextTriggered.getTime(); } boolean isEnabled() { return _enabled; } void disable() { _enabled = false; } /** * Enable a periodic activity. */ void enable() { if (!_enabled) { _enabled = true; updateNextTrigger(); } } /** * A human-understandable name for this DGA * @return the underlying DGA's name */ @Override public String toString() { return _dga.toString(); } /** * Render current status into a human-understandable form. * @return single-line String describing current status. */ public String getStatus() { StringBuilder sb = new StringBuilder(); sb.append(this.toString()); sb.append(" ["); sb.append(_enabled ? "enabled" : "disabled"); if (_enabled) { sb.append(String .format(", next %1$.1fs", getDelay() / 1000.0)); } sb.append("]"); return sb.toString(); } } @Override public synchronized void afterStart() { checkState(_thread == null, "DataGatheringScheduler already started"); for (DgaFactoryService factory : _factories) { if (factory instanceof EnvironmentAware) { ((EnvironmentAware)factory).setEnvironment(_environment); } for (Schedulable dga : factory.createDgas(_exhibitor, _sender, _sum, _repository)) { _activity.add(new RegisteredActivity(dga)); } } _thread = new Thread(this); _thread.setName("DGA-Scheduler"); _thread.start(); } @Override public void setEnvironment(Map<String,Object> environment) { _environment = environment; } @Required public void setDgaFactories(Iterable<DgaFactoryService> factories) { _factories = factories; } @Required public void setStateUpdateManager(StateUpdateManager sum) { _sum = sum; } @Required public void setStateExhibitor(StateExhibitor exhibitor) { _exhibitor = exhibitor; } @Required public void setMessageSender(MessageSender sender) { _sender = sender; } @Required public void setMessageMetadataRepository(MessageMetadataRepository<UOID> repository) { _repository = repository; } /** * Main loop for this thread triggering DataGatheringActivity. */ @Override public void run() { long delay; Date now = new Date(); LOGGER_SCHED.debug("DGA Scheduler thread starting."); synchronized (_activity) { do { now.setTime(System.currentTimeMillis()); for (RegisteredActivity pa : _activity) { pa.checkAndTrigger(now); } delay = getWaitTimeout(); try { _activity.wait(delay); } catch (InterruptedException e) { Thread.currentThread().interrupt(); } } while (!_timeToQuit); } LOGGER_SCHED.debug("DGA Scheduler thread shutting down."); } /** * Search through out list of activity and find the one that matches this name. * <p> * This method assumes that the current thread already owns the _allActivity * monitor * @param name the name of the activity to fine * @return the corresponding PeriodicActivity object, or null if not found. */ private RegisteredActivity findActivity(String name) { RegisteredActivity foundPA = null; for (RegisteredActivity pa : _activity) { if (pa.toString().equals(name)) { foundPA = pa; break; } } return foundPA; } /** * Enable a data-gathering activity, based on a human-readable name. * @param name - name of the DGA. * @return null if successful or an error message if there was a problem. */ public String enableActivity(String name) { RegisteredActivity pa; boolean haveEnabled = false; synchronized (_activity) { pa = findActivity(name); if (pa != null && !pa._enabled) { pa.enable(); _activity.notify(); haveEnabled = true; } } return haveEnabled ? null : pa == null ? "Unknown DGA " + name : "DGA " + name + " already enabled"; } /** * Disabled a data-gathering activity, based on a human-readable name. * @param name - name of the DGA. * @return null if successful or an error message if there was a problem. */ public String disableActivity(String name) { RegisteredActivity pa; boolean haveDisabled = false; synchronized (_activity) { pa = findActivity(name); if (pa != null && pa._enabled) { pa.disable(); _activity.notify(); haveDisabled = true; } } return haveDisabled ? null : pa == null ? "Unknown DGA " + name : "DGA " + name + " already disabled"; } /** * Trigger a periodic activity right now. * @param name the PeriodicActivity to trigger * @return null if successful, an error message if there was a problem. */ public String triggerActivity(String name) { RegisteredActivity pa; synchronized (_activity) { pa = findActivity(name); if (pa != null) { pa.shouldTriggerNow(); _activity.notify(); } } return pa != null ? null : "Unknown DGA " + name; } /** * Request that this thread sends no more requests * for data. */ @Override public void beforeStop() { LOGGER_SCHED.debug("Requesting DGA Scheduler to shutdown."); synchronized (_activity) { _timeToQuit = true; _activity.notify(); } } /** * Calculate the delay, in milliseconds, until the next * PeriodicActivity is to be triggered, or 0 if there is * no registered Schedulable objects. * <p> * <i>NB</i> we assume that the current thread has already obtained the monitor for * _allActivity! * @return delay, in milliseconds, until next trigger or zero if there * is no recorded delay. */ private long getWaitTimeout() { long earliestTrig=0; synchronized (_activity) { for (RegisteredActivity thisPa : _activity) { if (!thisPa.isEnabled()) { continue; } long thisTrig = thisPa.getNextTriggered(); if (thisTrig < earliestTrig || earliestTrig == 0) { earliestTrig = thisTrig; } } } long delay = 0; if (earliestTrig > 0) { delay = earliestTrig - System.currentTimeMillis(); delay = delay < 1 ? 1 : delay; // enforce >1 to distinguish between "should trigger now" and "no registered activity". } return delay; } /** * Return a human-readable list of known activity. * @return */ public List<String> listActivity() { List<String> activityList = new ArrayList<>(); synchronized (_activity) { for (RegisteredActivity thisRa : _activity) { activityList.add(thisRa.getStatus()); } } return activityList; } }
{ "pile_set_name": "Github" }
{ "@context": "https://linkedsoftwaredependencies.org/bundles/npm/@comunica/actor-rdf-parse-html-script/^1.0.0/components/context.jsonld", "@id": "npmd:@comunica/actor-rdf-parse-html-script", "@type": "Module", "requireName": "@comunica/actor-rdf-parse-html-script", "import": [ "files-carphs:components/Actor/RdfParse/HtmlScript.jsonld" ] }
{ "pile_set_name": "Github" }
/* [auto_generated] boost/numeric/odeint/integrate/detail/integrate_n_steps.hpp [begin_description] integrate steps implementation [end_description] Copyright 2012-2015 Mario Mulansky Copyright 2012 Christoph Koke Copyright 2012 Karsten Ahnert Distributed under the Boost Software License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) */ #ifndef BOOST_NUMERIC_ODEINT_INTEGRATE_DETAIL_INTEGRATE_N_STEPS_HPP_INCLUDED #define BOOST_NUMERIC_ODEINT_INTEGRATE_DETAIL_INTEGRATE_N_STEPS_HPP_INCLUDED #include <boost/numeric/odeint/util/unwrap_reference.hpp> #include <boost/numeric/odeint/stepper/stepper_categories.hpp> #include <boost/numeric/odeint/integrate/detail/integrate_adaptive.hpp> #include <boost/numeric/odeint/util/unit_helper.hpp> #include <boost/numeric/odeint/util/detail/less_with_sign.hpp> namespace boost { namespace numeric { namespace odeint { namespace detail { // forward declaration template< class Stepper , class System , class State , class Time , class Observer > size_t integrate_adaptive_checked( Stepper stepper , System system , State &start_state , Time &start_time , Time end_time , Time &dt , Observer observer, controlled_stepper_tag ); /* basic version */ template< class Stepper , class System , class State , class Time , class Observer> Time integrate_n_steps( Stepper stepper , System system , State &start_state , Time start_time , Time dt , size_t num_of_steps , Observer observer , stepper_tag ) { typename odeint::unwrap_reference< Observer >::type &obs = observer; typename odeint::unwrap_reference< Stepper >::type &st = stepper; Time time = start_time; for( size_t step = 0; step < num_of_steps ; ++step ) { obs( start_state , time ); st.do_step( system , start_state , time , dt ); // direct computation of the time avoids error propagation happening when using time += dt // we need clumsy type analysis to get boost units working here time = start_time + static_cast< typename unit_value_type<Time>::type >( step+1 ) * dt; } obs( start_state , time ); return time; } /* controlled version */ template< class Stepper , class System , class State , class Time , class Observer > Time integrate_n_steps( Stepper stepper , System system , State &start_state , Time start_time , Time dt , size_t num_of_steps , Observer observer , controlled_stepper_tag ) { typename odeint::unwrap_reference< Observer >::type &obs = observer; Time time = start_time; Time time_step = dt; for( size_t step = 0; step < num_of_steps ; ++step ) { obs( start_state , time ); // integrate_adaptive_checked uses the given checker to throw if an overflow occurs detail::integrate_adaptive(stepper, system, start_state, time, static_cast<Time>(time + time_step), dt, null_observer(), controlled_stepper_tag()); // direct computation of the time avoids error propagation happening when using time += dt // we need clumsy type analysis to get boost units working here time = start_time + static_cast< typename unit_value_type<Time>::type >(step+1) * time_step; } obs( start_state , time ); return time; } /* dense output version */ template< class Stepper , class System , class State , class Time , class Observer > Time integrate_n_steps( Stepper stepper , System system , State &start_state , Time start_time , Time dt , size_t num_of_steps , Observer observer , dense_output_stepper_tag ) { typename odeint::unwrap_reference< Observer >::type &obs = observer; typename odeint::unwrap_reference< Stepper >::type &st = stepper; Time time = start_time; const Time end_time = start_time + static_cast< typename unit_value_type<Time>::type >(num_of_steps) * dt; st.initialize( start_state , time , dt ); size_t step = 0; while( step < num_of_steps ) { while( less_with_sign( time , st.current_time() , st.current_time_step() ) ) { st.calc_state( time , start_state ); obs( start_state , time ); ++step; // direct computation of the time avoids error propagation happening when using time += dt // we need clumsy type analysis to get boost units working here time = start_time + static_cast< typename unit_value_type<Time>::type >(step) * dt; } // we have not reached the end, do another real step if( less_with_sign( static_cast<Time>(st.current_time()+st.current_time_step()) , end_time , st.current_time_step() ) ) { st.do_step( system ); } else if( less_with_sign( st.current_time() , end_time , st.current_time_step() ) ) { // do the last step ending exactly on the end point st.initialize( st.current_state() , st.current_time() , static_cast<Time>(end_time - st.current_time()) ); st.do_step( system ); } } // make sure we really end exactly where we should end while( st.current_time() < end_time ) { if( less_with_sign( end_time , static_cast<Time>(st.current_time()+st.current_time_step()) , st.current_time_step() ) ) st.initialize( st.current_state() , st.current_time() , static_cast<Time>(end_time - st.current_time()) ); st.do_step( system ); } // observation at final point obs( st.current_state() , end_time ); return time; } } } } } #endif /* BOOST_NUMERIC_ODEINT_INTEGRATE_DETAIL_INTEGRATE_N_STEPS_HPP_INCLUDED */
{ "pile_set_name": "Github" }
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt # Makefile for utility work on coverage.py. default: @echo "* No default action *" clean: -rm -f *.pyd */*.pyd -rm -f *.so */*.so -PYTHONPATH=. python tests/test_farm.py clean -rm -rf build coverage.egg-info dist htmlcov -rm -f *.pyc */*.pyc */*/*.pyc */*/*/*.pyc */*/*/*/*.pyc */*/*/*/*/*.pyc -rm -f *.pyo */*.pyo */*/*.pyo */*/*/*.pyo */*/*/*/*.pyo */*/*/*/*/*.pyo -rm -f *.bak */*.bak */*/*.bak */*/*/*.bak */*/*/*/*.bak */*/*/*/*/*.bak -rm -f *$$py.class */*$$py.class */*/*$$py.class */*/*/*$$py.class */*/*/*/*$$py.class */*/*/*/*/*$$py.class -rm -rf __pycache__ */__pycache__ */*/__pycache__ */*/*/__pycache__ */*/*/*/__pycache__ */*/*/*/*/__pycache__ -rm -f coverage/*,cover -rm -f MANIFEST -rm -f .coverage .coverage.* coverage.xml .metacov* .noseids -rm -f tests/zipmods.zip -rm -rf tests/eggsrc/build tests/eggsrc/dist tests/eggsrc/*.egg-info -rm -f setuptools-*.egg distribute-*.egg distribute-*.tar.gz -rm -rf doc/_build doc/_spell sterile: clean -rm -rf .tox* LINTABLE = coverage igor.py setup.py tests ci/*.py lint: -pylint $(LINTABLE) python -m tabnanny $(LINTABLE) python igor.py check_eol spell: -pylint --disable=all --enable=spelling $(LINTABLE) pep8: pep8 --filename=*.py --repeat $(LINTABLE) test: tox -e py27,py34 $(ARGS) metacov: COVERAGE_COVERAGE=yes tox $(ARGS) metahtml: python igor.py combine_html # Kitting kit: python setup.py sdist --formats=gztar,zip wheel: tox -c tox_wheels.ini $(ARGS) kit_upload: twine upload dist/* kit_local: cp -v dist/* `awk -F "=" '/find-links/ {print $$2}' ~/.pip/pip.conf` # pip caches wheels of things it has installed. Clean them out so we # don't go crazy trying to figure out why our new code isn't installing. find ~/Library/Caches/pip/wheels -name 'coverage-*' -delete download_appveyor: python ci/download_appveyor.py nedbat/coveragepy pypi: python setup.py register build_ext: python setup.py build_ext install: python setup.py install uninstall: -rm -rf $(PYHOME)/lib/site-packages/coverage* -rm -rf $(PYHOME)/scripts/coverage* # Documentation SPHINXBUILD = sphinx-build SPHINXOPTS = -a -E doc WEBHOME = ~/web/stellated/ WEBSAMPLE = $(WEBHOME)/files/sample_coverage_html WEBSAMPLEBETA = $(WEBHOME)/files/sample_coverage_html_beta docreqs: pip install -r doc/requirements.pip dochtml: $(SPHINXBUILD) -b html $(SPHINXOPTS) doc/_build/html @echo @echo "Build finished. The HTML pages are in doc/_build/html." docspell: $(SPHINXBUILD) -b spelling $(SPHINXOPTS) doc/_spell publish: rm -f $(WEBSAMPLE)/*.* mkdir -p $(WEBSAMPLE) cp doc/sample_html/*.* $(WEBSAMPLE) publishbeta: rm -f $(WEBSAMPLEBETA)/*.* mkdir -p $(WEBSAMPLEBETA) cp doc/sample_html_beta/*.* $(WEBSAMPLEBETA)
{ "pile_set_name": "Github" }
using CubeWorld.Tiles; using CubeWorld.Utils; using CubeWorld.Serialization; namespace CubeWorld.Tiles.Rules { public class TileRuleConditionNearTypeAmout : TileRuleCondition { public int minValue; public byte tileType; public TileRuleConditionNearTypeAmout() { } public TileRuleConditionNearTypeAmout(int minValue, byte tileType) { this.minValue = minValue; this.tileType = tileType; } public override bool Validate(TileManager tileManager, Tile tile, TilePosition pos) { tileManager.world.stats.checkedConditions++; int amount = 0; foreach (TilePosition delta in Manhattan.GetTilesAtDistance(1)) if (tileManager.IsValidTile(pos + delta) && tileManager.GetTileType(pos + delta) == tileType) amount++; return amount >= minValue; } public override void Serialize(Serializer serializer) { base.Serialize(serializer); serializer.Serialize(ref minValue, "minValue"); serializer.Serialize(ref tileType, "tileType"); } } }
{ "pile_set_name": "Github" }
<?xml version="1.0" encoding="UTF-8"?> <document type="com.apple.InterfaceBuilder3.CocoaTouch.XIB" version="3.0" toolsVersion="12118" systemVersion="16E195" targetRuntime="iOS.CocoaTouch" propertyAccessControl="none" useAutolayout="YES" useTraitCollections="YES" colorMatched="YES"> <device id="retina4_7" orientation="portrait"> <adaptation id="fullscreen"/> </device> <dependencies> <deployment identifier="iOS"/> <plugIn identifier="com.apple.InterfaceBuilder.IBCocoaTouchPlugin" version="12086"/> <capability name="documents saved in the Xcode 8 format" minToolsVersion="8.0"/> </dependencies> <objects> <placeholder placeholderIdentifier="IBFilesOwner" id="-1" userLabel="File's Owner" customClass="ReferenceViewController"> <connections> <outlet property="view" destination="i5M-Pr-FkT" id="sfx-zR-JGt"/> </connections> </placeholder> <placeholder placeholderIdentifier="IBFirstResponder" id="-2" customClass="UIResponder"/> <view clearsContextBeforeDrawing="NO" contentMode="scaleToFill" id="i5M-Pr-FkT"> <rect key="frame" x="0.0" y="0.0" width="375" height="667"/> <autoresizingMask key="autoresizingMask" widthSizable="YES" heightSizable="YES"/> <subviews> <label opaque="NO" userInteractionEnabled="NO" contentMode="left" horizontalHuggingPriority="251" verticalHuggingPriority="251" text="游戏介绍" textAlignment="center" lineBreakMode="tailTruncation" baselineAdjustment="alignBaselines" adjustsFontSizeToFit="NO" translatesAutoresizingMaskIntoConstraints="NO" id="tqv-hb-vlQ"> <rect key="frame" x="149" y="66" width="77.5" height="21"/> <constraints> <constraint firstAttribute="width" constant="77.5" id="HJM-XC-37x"/> <constraint firstAttribute="height" constant="21" id="Nk8-ng-QBO"/> </constraints> <fontDescription key="fontDescription" type="system" pointSize="19"/> <color key="textColor" white="0.33333333333333331" alpha="1" colorSpace="calibratedWhite"/> <nil key="highlightedColor"/> </label> <label opaque="NO" userInteractionEnabled="NO" contentMode="left" horizontalHuggingPriority="251" verticalHuggingPriority="251" text=" 通过滑动手势上下左右控制蛇的方向,寻找吃的东西,每吃一口就能得到一定的积分,而且蛇的身子会越吃越长,身子越长玩的难度就越大,不能碰墙,不能咬到自己的身体,更不能咬自己的尾巴。

 本游戏不设关卡。
 向经典致敬。" lineBreakMode="clip" numberOfLines="0" baselineAdjustment="alignBaselines" adjustsFontSizeToFit="NO" translatesAutoresizingMaskIntoConstraints="NO" id="NhL-aB-nim"> <rect key="frame" x="37.5" y="113" width="300" height="169"/> <constraints> <constraint firstAttribute="width" constant="300" id="f4p-rl-MAS"/> <constraint firstAttribute="height" constant="169" id="kKy-UU-JgT"/> </constraints> <fontDescription key="fontDescription" name=".AppleSystemUIFont" family=".AppleSystemUIFont" pointSize="16"/> <color key="textColor" white="0.33333333333333331" alpha="1" colorSpace="calibratedWhite"/> <nil key="highlightedColor"/> </label> <button opaque="NO" contentMode="scaleToFill" contentHorizontalAlignment="center" contentVerticalAlignment="center" lineBreakMode="middleTruncation" translatesAutoresizingMaskIntoConstraints="NO" id="svQ-qY-LSe"> <rect key="frame" x="170" y="582" width="35" height="35"/> <constraints> <constraint firstAttribute="height" constant="35" id="kI3-00-VnM"/> <constraint firstAttribute="width" constant="35" id="ndr-Nd-dhj"/> </constraints> <state key="normal" image="close.png"/> <connections> <action selector="dismissAction:" destination="-1" eventType="touchUpInside" id="2NQ-1K-oHJ"/> </connections> </button> </subviews> <color key="backgroundColor" red="1" green="0.80784313730000001" blue="0.94509803920000002" alpha="1" colorSpace="custom" customColorSpace="sRGB"/> <constraints> <constraint firstItem="NhL-aB-nim" firstAttribute="centerX" secondItem="i5M-Pr-FkT" secondAttribute="centerX" id="T6t-UM-7IG"/> <constraint firstItem="svQ-qY-LSe" firstAttribute="centerX" secondItem="i5M-Pr-FkT" secondAttribute="centerX" id="e9U-hE-vo0"/> <constraint firstItem="NhL-aB-nim" firstAttribute="top" secondItem="tqv-hb-vlQ" secondAttribute="bottom" constant="26" id="eB4-5B-lM7"/> <constraint firstAttribute="bottom" secondItem="svQ-qY-LSe" secondAttribute="bottom" constant="50" id="ue0-Js-ds6"/> <constraint firstItem="tqv-hb-vlQ" firstAttribute="centerX" secondItem="i5M-Pr-FkT" secondAttribute="centerX" id="ujf-i4-RS3"/> <constraint firstItem="tqv-hb-vlQ" firstAttribute="top" secondItem="i5M-Pr-FkT" secondAttribute="top" constant="66" id="ule-HV-pZR"/> </constraints> <point key="canvasLocation" x="234.5" y="41.5"/> </view> </objects> <resources> <image name="close.png" width="128" height="128"/> </resources> </document>
{ "pile_set_name": "Github" }
loopback_users.guest = false listeners.tcp.default = 5672 management.listener.port = 15672 management.listener.ssl = false auth_backends.1 = http ## This configures rabbitmq_auth_backend_cache that delegates to ## the HTTP backend. If using this, make sure to comment the ## auth_backends.1 line above. ## # auth_backends.1 = cache # # auth_cache.cached_backend = http # auth_cache.cache_ttl = 5000 auth_http.http_method = get auth_http.user_path = http://auth-backend:8000/auth/user auth_http.vhost_path = http://auth-backend:8000/auth/vhost auth_http.resource_path = http://auth-backend:8000/auth/resource auth_http.topic_path = http://auth-backend:8000/auth/topic
{ "pile_set_name": "Github" }
#include <stdbool.h> #include <uv.h> #include "queue.h" // client message #define INPUT '0' #define RESIZE_TERMINAL '1' #define JSON_DATA '{' // server message #define OUTPUT '0' #define SET_WINDOW_TITLE '1' #define SET_PREFERENCES '2' // url paths struct endpoints { char *ws; char *index; char *token; char *parent; }; extern volatile bool force_exit; extern struct lws_context *context; extern struct server *server; extern struct endpoints endpoints; typedef enum { STATE_INIT, STATE_KILL, STATE_EXIT } proc_state; struct pss_http { char path[128]; char *buffer; char *ptr; size_t len; }; struct pty_proc { char **args; int argc; pid_t pid; int status; proc_state state; int pty; char *pty_buffer; ssize_t pty_len; uv_pipe_t pipe; LIST_ENTRY(pty_proc) entry; }; struct pss_tty { bool initialized; int initial_cmd_index; bool authenticated; char address[50]; char path[20]; struct lws *wsi; char *buffer; size_t len; struct pty_proc *proc; }; struct server { int client_count; // client count char *prefs_json; // client preferences char *credential; // encoded basic auth credential char *index; // custom index.html char *command; // full command line char **argv; // command with arguments int argc; // command + arguments count int sig_code; // close signal char sig_name[20]; // human readable signal string bool url_arg; // allow client to send cli arguments in URL bool readonly; // whether not allow clients to write to the TTY bool check_origin; // whether allow websocket connection from different origin int max_clients; // maximum clients to support bool once; // whether accept only one client and exit on disconnection char socket_path[255]; // UNIX domain socket path char terminal_type[30]; // terminal type to report uv_loop_t *loop; // the libuv event loop uv_signal_t watcher; // SIGCHLD watcher LIST_HEAD(proc, pty_proc) procs; // started process list };
{ "pile_set_name": "Github" }
# Makefile for the dss1_divert ISDN module # Each configuration option enables a list of files. obj-$(CONFIG_ISDN_DIVERSION) += dss1_divert.o # Multipart objects. dss1_divert-y := isdn_divert.o divert_procfs.o divert_init.o
{ "pile_set_name": "Github" }
{ "domain": "ci", "tags": [ "country", "geo" ], "whoisServer": "whois.nic.ci", "nameServers": [ "any.nic.ci", "censvrns0001.ird.fr", "ci.hosting.nic.fr", "ns-ci.afrinic.net", "ns.nic.ci", "phloem.uoregon.edu" ], "policies": [ { "type": "idn-disallowed" } ] }
{ "pile_set_name": "Github" }
/* * CAAM/SEC 4.x transport/backend driver * JobR backend functionality * * Copyright 2008-2012 Freescale Semiconductor, Inc. */ #include <linux/of_irq.h> #include <linux/of_address.h> #include "compat.h" #include "ctrl.h" #include "regs.h" #include "jr.h" #include "desc.h" #include "intern.h" struct jr_driver_data { /* List of Physical JobR's with the Driver */ struct list_head jr_list; spinlock_t jr_alloc_lock; /* jr_list lock */ } ____cacheline_aligned; static struct jr_driver_data driver_data; static int caam_reset_hw_jr(struct device *dev) { struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); unsigned int timeout = 100000; /* * mask interrupts since we are going to poll * for reset completion status */ clrsetbits_32(&jrp->rregs->rconfig_lo, 0, JRCFG_IMSK); /* initiate flush (required prior to reset) */ wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET); while (((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) == JRINT_ERR_HALT_INPROGRESS) && --timeout) cpu_relax(); if ((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) != JRINT_ERR_HALT_COMPLETE || timeout == 0) { dev_err(dev, "failed to flush job ring %d\n", jrp->ridx); return -EIO; } /* initiate reset */ timeout = 100000; wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET); while ((rd_reg32(&jrp->rregs->jrcommand) & JRCR_RESET) && --timeout) cpu_relax(); if (timeout == 0) { dev_err(dev, "failed to reset job ring %d\n", jrp->ridx); return -EIO; } /* unmask interrupts */ clrsetbits_32(&jrp->rregs->rconfig_lo, JRCFG_IMSK, 0); return 0; } /* * Shutdown JobR independent of platform property code */ static int caam_jr_shutdown(struct device *dev) { struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); dma_addr_t inpbusaddr, outbusaddr; int ret; ret = caam_reset_hw_jr(dev); tasklet_kill(&jrp->irqtask); /* Release interrupt */ free_irq(jrp->irq, dev); /* Free rings */ inpbusaddr = rd_reg64(&jrp->rregs->inpring_base); outbusaddr = rd_reg64(&jrp->rregs->outring_base); dma_free_coherent(dev, sizeof(dma_addr_t) * JOBR_DEPTH, jrp->inpring, inpbusaddr); dma_free_coherent(dev, sizeof(struct jr_outentry) * JOBR_DEPTH, jrp->outring, outbusaddr); kfree(jrp->entinfo); return ret; } static int caam_jr_remove(struct platform_device *pdev) { int ret; struct device *jrdev; struct caam_drv_private_jr *jrpriv; jrdev = &pdev->dev; jrpriv = dev_get_drvdata(jrdev); /* * Return EBUSY if job ring already allocated. */ if (atomic_read(&jrpriv->tfm_count)) { dev_err(jrdev, "Device is busy\n"); return -EBUSY; } /* Remove the node from Physical JobR list maintained by driver */ spin_lock(&driver_data.jr_alloc_lock); list_del(&jrpriv->list_node); spin_unlock(&driver_data.jr_alloc_lock); /* Release ring */ ret = caam_jr_shutdown(jrdev); if (ret) dev_err(jrdev, "Failed to shut down job ring\n"); irq_dispose_mapping(jrpriv->irq); return ret; } /* Main per-ring interrupt handler */ static irqreturn_t caam_jr_interrupt(int irq, void *st_dev) { struct device *dev = st_dev; struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); u32 irqstate; /* * Check the output ring for ready responses, kick * tasklet if jobs done. */ irqstate = rd_reg32(&jrp->rregs->jrintstatus); if (!irqstate) return IRQ_NONE; /* * If JobR error, we got more development work to do * Flag a bug now, but we really need to shut down and * restart the queue (and fix code). */ if (irqstate & JRINT_JR_ERROR) { dev_err(dev, "job ring error: irqstate: %08x\n", irqstate); BUG(); } /* mask valid interrupts */ clrsetbits_32(&jrp->rregs->rconfig_lo, 0, JRCFG_IMSK); /* Have valid interrupt at this point, just ACK and trigger */ wr_reg32(&jrp->rregs->jrintstatus, irqstate); preempt_disable(); tasklet_schedule(&jrp->irqtask); preempt_enable(); return IRQ_HANDLED; } /* Deferred service handler, run as interrupt-fired tasklet */ static void caam_jr_dequeue(unsigned long devarg) { int hw_idx, sw_idx, i, head, tail; struct device *dev = (struct device *)devarg; struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); void (*usercall)(struct device *dev, u32 *desc, u32 status, void *arg); u32 *userdesc, userstatus; void *userarg; while (rd_reg32(&jrp->rregs->outring_used)) { head = ACCESS_ONCE(jrp->head); spin_lock(&jrp->outlock); sw_idx = tail = jrp->tail; hw_idx = jrp->out_ring_read_index; for (i = 0; CIRC_CNT(head, tail + i, JOBR_DEPTH) >= 1; i++) { sw_idx = (tail + i) & (JOBR_DEPTH - 1); if (jrp->outring[hw_idx].desc == caam_dma_to_cpu(jrp->entinfo[sw_idx].desc_addr_dma)) break; /* found */ } /* we should never fail to find a matching descriptor */ BUG_ON(CIRC_CNT(head, tail + i, JOBR_DEPTH) <= 0); /* Unmap just-run descriptor so we can post-process */ dma_unmap_single(dev, caam_dma_to_cpu(jrp->outring[hw_idx].desc), jrp->entinfo[sw_idx].desc_size, DMA_TO_DEVICE); /* mark completed, avoid matching on a recycled desc addr */ jrp->entinfo[sw_idx].desc_addr_dma = 0; /* Stash callback params for use outside of lock */ usercall = jrp->entinfo[sw_idx].callbk; userarg = jrp->entinfo[sw_idx].cbkarg; userdesc = jrp->entinfo[sw_idx].desc_addr_virt; userstatus = caam32_to_cpu(jrp->outring[hw_idx].jrstatus); /* * Make sure all information from the job has been obtained * before telling CAAM that the job has been removed from the * output ring. */ mb(); /* set done */ wr_reg32(&jrp->rregs->outring_rmvd, 1); jrp->out_ring_read_index = (jrp->out_ring_read_index + 1) & (JOBR_DEPTH - 1); /* * if this job completed out-of-order, do not increment * the tail. Otherwise, increment tail by 1 plus the * number of subsequent jobs already completed out-of-order */ if (sw_idx == tail) { do { tail = (tail + 1) & (JOBR_DEPTH - 1); } while (CIRC_CNT(head, tail, JOBR_DEPTH) >= 1 && jrp->entinfo[tail].desc_addr_dma == 0); jrp->tail = tail; } spin_unlock(&jrp->outlock); /* Finally, execute user's callback */ usercall(dev, userdesc, userstatus, userarg); } /* reenable / unmask IRQs */ clrsetbits_32(&jrp->rregs->rconfig_lo, JRCFG_IMSK, 0); } /** * caam_jr_alloc() - Alloc a job ring for someone to use as needed. * * returns : pointer to the newly allocated physical * JobR dev can be written to if successful. **/ struct device *caam_jr_alloc(void) { struct caam_drv_private_jr *jrpriv, *min_jrpriv = NULL; struct device *dev = ERR_PTR(-ENODEV); int min_tfm_cnt = INT_MAX; int tfm_cnt; spin_lock(&driver_data.jr_alloc_lock); if (list_empty(&driver_data.jr_list)) { spin_unlock(&driver_data.jr_alloc_lock); return ERR_PTR(-ENODEV); } list_for_each_entry(jrpriv, &driver_data.jr_list, list_node) { tfm_cnt = atomic_read(&jrpriv->tfm_count); if (tfm_cnt < min_tfm_cnt) { min_tfm_cnt = tfm_cnt; min_jrpriv = jrpriv; } if (!min_tfm_cnt) break; } if (min_jrpriv) { atomic_inc(&min_jrpriv->tfm_count); dev = min_jrpriv->dev; } spin_unlock(&driver_data.jr_alloc_lock); return dev; } EXPORT_SYMBOL(caam_jr_alloc); /** * caam_jr_free() - Free the Job Ring * @rdev - points to the dev that identifies the Job ring to * be released. **/ void caam_jr_free(struct device *rdev) { struct caam_drv_private_jr *jrpriv = dev_get_drvdata(rdev); atomic_dec(&jrpriv->tfm_count); } EXPORT_SYMBOL(caam_jr_free); /** * caam_jr_enqueue() - Enqueue a job descriptor head. Returns 0 if OK, * -EBUSY if the queue is full, -EIO if it cannot map the caller's * descriptor. * @dev: device of the job ring to be used. This device should have * been assigned prior by caam_jr_register(). * @desc: points to a job descriptor that execute our request. All * descriptors (and all referenced data) must be in a DMAable * region, and all data references must be physical addresses * accessible to CAAM (i.e. within a PAMU window granted * to it). * @cbk: pointer to a callback function to be invoked upon completion * of this request. This has the form: * callback(struct device *dev, u32 *desc, u32 stat, void *arg) * where: * @dev: contains the job ring device that processed this * response. * @desc: descriptor that initiated the request, same as * "desc" being argued to caam_jr_enqueue(). * @status: untranslated status received from CAAM. See the * reference manual for a detailed description of * error meaning, or see the JRSTA definitions in the * register header file * @areq: optional pointer to an argument passed with the * original request * @areq: optional pointer to a user argument for use at callback * time. **/ int caam_jr_enqueue(struct device *dev, u32 *desc, void (*cbk)(struct device *dev, u32 *desc, u32 status, void *areq), void *areq) { struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); struct caam_jrentry_info *head_entry; int head, tail, desc_size; dma_addr_t desc_dma; desc_size = (caam32_to_cpu(*desc) & HDR_JD_LENGTH_MASK) * sizeof(u32); desc_dma = dma_map_single(dev, desc, desc_size, DMA_TO_DEVICE); if (dma_mapping_error(dev, desc_dma)) { dev_err(dev, "caam_jr_enqueue(): can't map jobdesc\n"); return -EIO; } spin_lock_bh(&jrp->inplock); head = jrp->head; tail = ACCESS_ONCE(jrp->tail); if (!rd_reg32(&jrp->rregs->inpring_avail) || CIRC_SPACE(head, tail, JOBR_DEPTH) <= 0) { spin_unlock_bh(&jrp->inplock); dma_unmap_single(dev, desc_dma, desc_size, DMA_TO_DEVICE); return -EBUSY; } head_entry = &jrp->entinfo[head]; head_entry->desc_addr_virt = desc; head_entry->desc_size = desc_size; head_entry->callbk = (void *)cbk; head_entry->cbkarg = areq; head_entry->desc_addr_dma = desc_dma; jrp->inpring[jrp->inp_ring_write_index] = cpu_to_caam_dma(desc_dma); /* * Guarantee that the descriptor's DMA address has been written to * the next slot in the ring before the write index is updated, since * other cores may update this index independently. */ smp_wmb(); jrp->inp_ring_write_index = (jrp->inp_ring_write_index + 1) & (JOBR_DEPTH - 1); jrp->head = (head + 1) & (JOBR_DEPTH - 1); /* * Ensure that all job information has been written before * notifying CAAM that a new job was added to the input ring. */ wmb(); wr_reg32(&jrp->rregs->inpring_jobadd, 1); spin_unlock_bh(&jrp->inplock); return 0; } EXPORT_SYMBOL(caam_jr_enqueue); /* * Init JobR independent of platform property detection */ static int caam_jr_init(struct device *dev) { struct caam_drv_private_jr *jrp; dma_addr_t inpbusaddr, outbusaddr; int i, error; jrp = dev_get_drvdata(dev); tasklet_init(&jrp->irqtask, caam_jr_dequeue, (unsigned long)dev); /* Connect job ring interrupt handler. */ error = request_irq(jrp->irq, caam_jr_interrupt, IRQF_SHARED, dev_name(dev), dev); if (error) { dev_err(dev, "can't connect JobR %d interrupt (%d)\n", jrp->ridx, jrp->irq); goto out_kill_deq; } error = caam_reset_hw_jr(dev); if (error) goto out_free_irq; error = -ENOMEM; jrp->inpring = dma_alloc_coherent(dev, sizeof(*jrp->inpring) * JOBR_DEPTH, &inpbusaddr, GFP_KERNEL); if (!jrp->inpring) goto out_free_irq; jrp->outring = dma_alloc_coherent(dev, sizeof(*jrp->outring) * JOBR_DEPTH, &outbusaddr, GFP_KERNEL); if (!jrp->outring) goto out_free_inpring; jrp->entinfo = kcalloc(JOBR_DEPTH, sizeof(*jrp->entinfo), GFP_KERNEL); if (!jrp->entinfo) goto out_free_outring; for (i = 0; i < JOBR_DEPTH; i++) jrp->entinfo[i].desc_addr_dma = !0; /* Setup rings */ jrp->inp_ring_write_index = 0; jrp->out_ring_read_index = 0; jrp->head = 0; jrp->tail = 0; wr_reg64(&jrp->rregs->inpring_base, inpbusaddr); wr_reg64(&jrp->rregs->outring_base, outbusaddr); wr_reg32(&jrp->rregs->inpring_size, JOBR_DEPTH); wr_reg32(&jrp->rregs->outring_size, JOBR_DEPTH); jrp->ringsize = JOBR_DEPTH; spin_lock_init(&jrp->inplock); spin_lock_init(&jrp->outlock); /* Select interrupt coalescing parameters */ clrsetbits_32(&jrp->rregs->rconfig_lo, 0, JOBR_INTC | (JOBR_INTC_COUNT_THLD << JRCFG_ICDCT_SHIFT) | (JOBR_INTC_TIME_THLD << JRCFG_ICTT_SHIFT)); return 0; out_free_outring: dma_free_coherent(dev, sizeof(struct jr_outentry) * JOBR_DEPTH, jrp->outring, outbusaddr); out_free_inpring: dma_free_coherent(dev, sizeof(dma_addr_t) * JOBR_DEPTH, jrp->inpring, inpbusaddr); dev_err(dev, "can't allocate job rings for %d\n", jrp->ridx); out_free_irq: free_irq(jrp->irq, dev); out_kill_deq: tasklet_kill(&jrp->irqtask); return error; } /* * Probe routine for each detected JobR subsystem. */ static int caam_jr_probe(struct platform_device *pdev) { struct device *jrdev; struct device_node *nprop; struct caam_job_ring __iomem *ctrl; struct caam_drv_private_jr *jrpriv; static int total_jobrs; int error; jrdev = &pdev->dev; jrpriv = devm_kmalloc(jrdev, sizeof(*jrpriv), GFP_KERNEL); if (!jrpriv) return -ENOMEM; dev_set_drvdata(jrdev, jrpriv); /* save ring identity relative to detection */ jrpriv->ridx = total_jobrs++; nprop = pdev->dev.of_node; /* Get configuration properties from device tree */ /* First, get register page */ ctrl = of_iomap(nprop, 0); if (!ctrl) { dev_err(jrdev, "of_iomap() failed\n"); return -ENOMEM; } jrpriv->rregs = (struct caam_job_ring __iomem __force *)ctrl; if (sizeof(dma_addr_t) == sizeof(u64)) { if (caam_dpaa2) error = dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(49)); else if (of_device_is_compatible(nprop, "fsl,sec-v5.0-job-ring")) error = dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(40)); else error = dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(36)); } else { error = dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(32)); } if (error) { dev_err(jrdev, "dma_set_mask_and_coherent failed (%d)\n", error); iounmap(ctrl); return error; } /* Identify the interrupt */ jrpriv->irq = irq_of_parse_and_map(nprop, 0); /* Now do the platform independent part */ error = caam_jr_init(jrdev); /* now turn on hardware */ if (error) { irq_dispose_mapping(jrpriv->irq); iounmap(ctrl); return error; } jrpriv->dev = jrdev; spin_lock(&driver_data.jr_alloc_lock); list_add_tail(&jrpriv->list_node, &driver_data.jr_list); spin_unlock(&driver_data.jr_alloc_lock); atomic_set(&jrpriv->tfm_count, 0); return 0; } static const struct of_device_id caam_jr_match[] = { { .compatible = "fsl,sec-v4.0-job-ring", }, { .compatible = "fsl,sec4.0-job-ring", }, {}, }; MODULE_DEVICE_TABLE(of, caam_jr_match); static struct platform_driver caam_jr_driver = { .driver = { .name = "caam_jr", .of_match_table = caam_jr_match, }, .probe = caam_jr_probe, .remove = caam_jr_remove, }; static int __init jr_driver_init(void) { spin_lock_init(&driver_data.jr_alloc_lock); INIT_LIST_HEAD(&driver_data.jr_list); return platform_driver_register(&caam_jr_driver); } static void __exit jr_driver_exit(void) { platform_driver_unregister(&caam_jr_driver); } module_init(jr_driver_init); module_exit(jr_driver_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("FSL CAAM JR request backend"); MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");
{ "pile_set_name": "Github" }