prompt
large_stringlengths 70
991k
| completion
large_stringlengths 0
1.02k
|
---|---|
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
from . import models<|fim▁hole|><|fim▁end|> | from .hooks import set_default_map_settings |
<|file_name|>index.js<|end_file_name|><|fim▁begin|><|fim▁hole|>
export default (store) => ({
path: 'admin/positions/add',
/* Async getComponent is only invoked when route matches */
getComponent (nextState, cb) {
/* Webpack - use 'require.ensure' to create a split point
and embed an async module loader (jsonp) when bundling */
require.ensure([], (require) => {
/* Webpack - use require callback to define
dependencies for bundling */
const AddPosition = require('./AddPosition.component').default
// const reducer = null;
/* Add the reducer to the store on key 'counter' */
// injectReducer(store, { key: 'addPositions', reducer })
/* Return getComponent */
cb(null, AddPosition)
/* Webpack named bundle */
}, 'addPositions')
}
})<|fim▁end|> | import { injectReducer } from '../../../../store/reducers' |
<|file_name|>main.go<|end_file_name|><|fim▁begin|>package main
import (
"flag"
"fmt"
"net/http"
. "gopkg.in/go-on/lib.v2/types"
. "gopkg.in/go-on/lib.v2/html"
"gopkg.in/go-on/lib.v2/internal/bootstrap/bs3"
"gopkg.in/go-on/cdncache.v1"
)
var (
port = flag.Int("port", 8083, "port of the http server")
mountPoint = flag.String("mountpoint", "", "mount point for the cached files (leave empty to prevent caching)")
)
func main() {
flag.Parse()
cdn := cdncache.CDN(*mountPoint)
http.Handle("/",<|fim▁hole|> CharsetUtf8(),
HttpEquiv("X-UA-Compatible", "IE=edge"),
Viewport("width=device-width, initial-scale=1"),
TITLE("Starter Template for Bootstrap"),
CssHref(cdn(bs3.CDN_3_1_1_min)),
CssHref(cdn(bs3.CDN_3_1_1_theme_min)),
CssHref(cdn("http://getbootstrap.com/examples/starter-template/starter-template.css")),
HTMLString(fmt.Sprintf(`
<!--[if lt IE 9]>
<script src="%s"></script>
<script src="%s"></script>
<![endif]-->
`,
cdn("https://oss.maxcdn.com/libs/html5shiv/3.7.0/html5shiv.js"),
cdn("https://oss.maxcdn.com/libs/respond.js/1.4.2/respond.min.js"),
),
),
),
BODY(
DIV(bs3.Navbar, bs3.Navbar_inverse, bs3.Navbar_fixed_top,
Attrs_("role", "navigation"),
DIV(bs3.Container,
DIV(bs3.Navbar_header,
BUTTON(bs3.Navbar_toggle,
Type_("button"),
Attrs_("data-toggle", "collapse", "data-target", ".navbar-collapse"),
SPAN(bs3.Sr_only, "Toogle navigation"),
SPAN(bs3.Icon_bar),
SPAN(bs3.Icon_bar),
SPAN(bs3.Icon_bar),
),
AHref("#", bs3.Navbar_brand, "Project name"),
),
DIV(bs3.Collapse, bs3.Navbar_collapse,
UL(bs3.Nav, bs3.Navbar_nav,
LI(bs3.Active, AHref("#", "Home")),
LI(AHref("#about", "About")),
LI(AHref("#contact", "Contact")),
),
),
),
),
DIV(bs3.Container,
DIV(Class("starter-template"),
H1("Bootstrap starter template"),
P(bs3.Lead,
"Use this document as a way to quickly start any new project.",
BR(),
"All you get is this text and a mostly barebones HTML document.",
),
),
),
JsSrc(cdn("//code.jquery.com/jquery-1.11.0.min.js")),
JsSrc(cdn(bs3.CDN_3_1_1_js_min)),
),
),
)
fmt.Printf("listening on localhost: %d\n", *port)
http.ListenAndServe(fmt.Sprintf(":%d", *port), nil)
}<|fim▁end|> | HTML5(
Lang_("en"),
HEAD( |
<|file_name|>GameRoom.java<|end_file_name|><|fim▁begin|>package work.notech.poker.room;
import work.notech.poker.logic.Cards;
import java.util.List;
public class GameRoom {
private Integer roomIds;
private List<Integer> clientIds;
private Cards cards;
<|fim▁hole|>
public void setRoomIds(Integer roomIds) {
this.roomIds = roomIds;
}
public List<Integer> getClientIds() {
return clientIds;
}
public void setClientIds(List<Integer> clientIds) {
this.clientIds = clientIds;
}
public Cards getCards() {
return cards;
}
public void setCards(Cards cards) {
this.cards = cards;
}
}<|fim▁end|> | public Integer getRoomIds() {
return roomIds;
} |
<|file_name|>index-loader-syntax.js<|end_file_name|><|fim▁begin|>import one from './index-loader-syntax.css';
import two from 'button.modules.css!=!./index-loader-syntax-sass.css';
// Hash should be different
import three from './button.module.scss!=!./base64-loader?LmZvbyB7IGNvbG9yOiByZWQ7IH0=!./simple.js?foo=bar';
import four from './other.module.scss!=!./base64-loader?LmZvbyB7IGNvbG9yOiByZWQ7IH0=!./simple.js?foo=baz';<|fim▁hole|>
export default [...one, ...two, ...three, ...four];<|fim▁end|> |
__export__ = [...one, ...two, ...three, ...four]; |
<|file_name|>issue-3477.rs<|end_file_name|><|fim▁begin|>// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
fn main() {
let _p: char = 100; //~ ERROR mismatched types: expected `char`, found<|fim▁hole|>}<|fim▁end|> | |
<|file_name|>number_of_lines_between_tokens.py<|end_file_name|><|fim▁begin|>from vsg.rule_group import length
from vsg import violation
class number_of_lines_between_tokens(length.Rule):
'''
Checks the number of lines between tokens do not exceed a specified number
Parameters
----------
name : string
The group the rule belongs to.
identifier : string
unique identifier. Usually in the form of 00N.
'''
def __init__(self, name, identifier, oLeftToken, oRightToken, iLines):
length.Rule.__init__(self, name=name, identifier=identifier)
self.length = iLines
self.oLeftToken = oLeftToken
self.oRightToken = oRightToken
def _get_tokens_of_interest(self, oFile):
return oFile.get_line_count_between_tokens(self.oLeftToken, self.oRightToken)
def _analyze(self, lToi):
for oToi in lToi:<|fim▁hole|> sSolution = 'Reduce process to less than ' + str(self.length) + ' lines'
oViolation = violation.New(oToi.get_line_number(), None, sSolution)
self.add_violation(oViolation)<|fim▁end|> | if oToi.get_token_value() > self.length: |
<|file_name|>CursusController.java<|end_file_name|><|fim▁begin|>/*
* Copyright © Région Nord Pas de Calais-Picardie.
*
* This file is part of OPEN ENT NG. OPEN ENT NG is a versatile ENT Project based on the JVM and ENT Core Project.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation (version 3 of the License).
*
* For the sake of explanation, any module that communicate over native
* Web protocols, such as HTTP, with OPEN ENT NG is outside the scope of this
* license and could be license under its own terms. This is merely considered
* normal use of OPEN ENT NG, and does not fall under the heading of "covered work".
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*/
package org.entcore.cursus.controllers;
import java.net.URL;
import java.text.DateFormat;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.Calendar;
import java.util.Date;
import java.util.Map;
import io.vertx.core.http.*;
import org.entcore.common.http.filter.ResourceFilter;
import org.entcore.common.user.UserInfos;
import org.entcore.common.user.UserUtils;
import org.entcore.common.utils.MapFactory;
import org.entcore.cursus.filters.CursusFilter;
import io.vertx.core.Handler;
import io.vertx.core.Vertx;
import io.vertx.core.buffer.Buffer;
import org.vertx.java.core.http.RouteMatcher;
import io.vertx.core.json.JsonArray;
import io.vertx.core.json.JsonObject;
import fr.wseduc.rs.*;
import fr.wseduc.security.ActionType;
import fr.wseduc.security.SecuredAction;
import fr.wseduc.webutils.Either;
import fr.wseduc.webutils.http.BaseController;<|fim▁hole|>public class CursusController extends BaseController {
//Service
private final CursusService service = new CursusService();
//Webservice client & endpoint
private HttpClient cursusClient;
private final URL wsEndpoint;
//Webservice auth request conf
private final JsonObject authConf;
//Auth reply data & wallets list
private Map<String, String> cursusMap;
@Override
public void init(Vertx vertx, JsonObject config, RouteMatcher rm,
Map<String, fr.wseduc.webutils.security.SecuredAction> securedActions) {
super.init(vertx, config, rm, securedActions);
HttpClientOptions cursusClientOptions = new HttpClientOptions()
.setDefaultHost(wsEndpoint.getHost());
if("https".equals(wsEndpoint.getProtocol())){
cursusClientOptions
.setSsl(true)
.setTrustAll(true)
.setDefaultPort(443);
} else {
cursusClientOptions
.setDefaultPort(wsEndpoint.getPort() == -1 ? 80 : wsEndpoint.getPort());
}
cursusClient = vertx.createHttpClient(cursusClientOptions);
cursusMap = MapFactory.getSyncClusterMap("cursusMap", vertx, false);
/*
service.refreshToken(new Handler<Boolean>() {
public void handle(Boolean res) {
if(!res)
log.error("[Cursus][refreshToken] Error while retrieving the Token.");
else
log.info("[Cursus][refreshToken] Token refreshed.");
}
});
*/
if(cursusMap.containsKey("wallets"))
return;
service.refreshWallets(new Handler<Boolean>() {
public void handle(Boolean res) {
if(!res)
log.error("[Cursus][refreshWallets] Error while retrieving the wallets list.");
else
log.info("[Cursus][refreshWallets] Wallets list refreshed.");
}
});
}
public CursusController(URL endpoint, final JsonObject conf){
wsEndpoint = endpoint;
authConf = conf;
}
@Put("/refreshToken")
@SecuredAction(value = "", type = ActionType.RESOURCE)
@ResourceFilter(CursusFilter.class)
public void refreshToken(final HttpServerRequest request){
service.refreshToken(new Handler<Boolean>() {
public void handle(Boolean success) {
if(success){
ok(request);
} else {
badRequest(request);
}
}
});
}
@Put("/refreshWallets")
@SecuredAction(value = "", type = ActionType.RESOURCE)
@ResourceFilter(CursusFilter.class)
public void refreshWallets(final HttpServerRequest request){
service.refreshWallets(new Handler<Boolean>() {
public void handle(Boolean success) {
if(success){
ok(request);
} else {
badRequest(request);
}
}
});
}
@Get("/sales")
@SecuredAction(value = "", type = ActionType.AUTHENTICATED)
public void getSales(final HttpServerRequest request){
final String cardNb = request.params().get("cardNb");
if(cardNb == null){
badRequest(request);
return;
}
service.getUserInfo(cardNb, new Handler<Either<String,JsonArray>>() {
public void handle(Either<String, JsonArray> result) {
if(result.isLeft()){
badRequest(request);
return;
}
final String id = result.right().getValue().getJsonObject(0).getInteger("id").toString();
String birthDateEncoded = result.right().getValue().getJsonObject(0).getString("dateNaissance");
try {
birthDateEncoded = birthDateEncoded.replace("/Date(", "");
birthDateEncoded = birthDateEncoded.substring(0, birthDateEncoded.indexOf("+"));
final Date birthDate = new Date(Long.parseLong(birthDateEncoded));
UserUtils.getUserInfos(eb, request, new Handler<UserInfos>() {
public void handle(UserInfos infos) {
DateFormat format = new SimpleDateFormat("yyyy-MM-dd");
try {
Date sessionBirthDate = format.parse(infos.getBirthDate());
if(sessionBirthDate.compareTo(birthDate) == 0){
service.getSales(id, cardNb, new Handler<Either<String,JsonArray>>() {
public void handle(Either<String, JsonArray> result) {
if(result.isLeft()){
badRequest(request);
return;
}
JsonObject finalResult = new JsonObject()
.put("wallets", new JsonArray(cursusMap.get("wallets")))
.put("sales", result.right().getValue());
renderJson(request, finalResult);
}
});
} else {
badRequest(request);
}
} catch (ParseException e) {
badRequest(request);
return;
}
}
});
} catch(Exception e){
badRequest(request);
}
}
});
}
/**
* Inner service class.
*/
private class CursusService{
public void authWrapper(final Handler<Boolean> handler){
JsonObject authObject = new JsonObject();
if(cursusMap.get("auth") != null)
authObject = new JsonObject(cursusMap.get("auth"));
Long currentDate = Calendar.getInstance().getTimeInMillis();
Long expirationDate = 0l;
if(authObject != null)
expirationDate = authObject.getLong("tokenInit", 0l) + authConf.getLong("tokenDelay", 1800000l);
if(expirationDate < currentDate){
log.info("[Cursus] Token seems to have expired.");
refreshToken(handler);
} else {
handler.handle(true);
}
}
public void refreshToken(final Handler<Boolean> handler){
HttpClientRequest req = cursusClient.post(wsEndpoint.getPath() + "/AuthentificationImpl.svc/json/AuthentificationExtranet", new Handler<HttpClientResponse>() {
public void handle(HttpClientResponse response) {
if(response.statusCode() >= 300){
handler.handle(false);
log.error(response.statusMessage());
return;
}
response.bodyHandler(new Handler<Buffer>() {
public void handle(Buffer body) {
log.info("[Cursus][refreshToken] Token refreshed.");
JsonObject authData = new JsonObject(body.toString());
authData.put("tokenInit", new Date().getTime());
cursusMap.put("auth", authData.encode());
handler.handle(true);
}
});
}
});
req.putHeader(HttpHeaders.ACCEPT, "application/json; charset=UTF-8")
.putHeader(HttpHeaders.CONTENT_TYPE, "application/json");
req.end(authConf.encode());
}
public void refreshWallets(final Handler<Boolean> handler){
authWrapper(new Handler<Boolean>() {
public void handle(Boolean gotToken) {
if(!gotToken){
handler.handle(false);
return;
}
int schoolYear = Calendar.getInstance().get(Calendar.MONTH) < 8 ?
Calendar.getInstance().get(Calendar.YEAR) - 1 :
Calendar.getInstance().get(Calendar.YEAR);
/* JSON */
JsonObject reqBody = new JsonObject();
reqBody
.put("numSite", authConf.getString("numSite"))
.put("tokenId", new JsonObject(cursusMap.get("auth")).getString("tokenId"))
.put("typeListes", new JsonArray()
.add(new JsonObject()
.put("typeListe", "LST_PORTEMONNAIE")
.put("param1", schoolYear + "-" + (schoolYear + 1))
)
);
/* */
/* XML /
String reqBody =
"<tem:GetListes xmlns:tem=\"http://tempuri.org/\" xmlns:wcf=\"http://schemas.datacontract.org/2004/07/WcfExtranetChequeBL.POCO.Parametres\">" +
"<tem:numSite>"+ authConf.getString("numSite") +"</tem:numSite>" +
"<tem:typeListes>" +
"<wcf:RechercheTypeListe>" +
"<wcf:typeListe>LST_PORTEMONNAIE</wcf:typeListe>" +
"<wcf:param1>"+ schoolYear + "-" + (schoolYear + 1) +"</wcf:param1>" +
"</wcf:RechercheTypeListe>" +
"</tem:typeListes>" +
"<tem:tokenId>"+ authData.getString("tokenId") +"</tem:tokenId>" +
"</tem:GetListes>";
/* */
HttpClientRequest req = cursusClient.post(wsEndpoint.getPath() + "/GeneralImpl.svc/json/GetListes", new Handler<HttpClientResponse>() {
public void handle(HttpClientResponse response) {
if(response.statusCode() >= 300){
handler.handle(false);
log.error(response.statusMessage());
return;
}
response.bodyHandler(new Handler<Buffer>() {
public void handle(Buffer body) {
try{
cursusMap.put("wallets", new JsonArray(body.toString()).getJsonObject(0)
.getJsonArray("parametres").encode());
handler.handle(true);
} catch(Exception e){
handler.handle(false);
}
}
});
}
});
req.putHeader(HttpHeaders.ACCEPT, "application/json; charset=UTF-8")
.putHeader(HttpHeaders.CONTENT_TYPE, "application/json");
req.end(reqBody.encode());
}
});
};
public void getUserInfo(final String cardNb, final Handler<Either<String, JsonArray>> handler){
authWrapper(new Handler<Boolean>() {
public void handle(Boolean gotToken) {
if(!gotToken){
handler.handle(new Either.Left<String, JsonArray>("[Cursus][getUserInfo] Issue while retrieving token."));
return;
}
JsonObject reqBody = new JsonObject();
reqBody
.put("numSite", authConf.getString("numSite"))
.put("tokenId", new JsonObject(cursusMap.get("auth")).getString("tokenId"))
.put("filtres", new JsonObject()
.put("numeroCarte", cardNb));
HttpClientRequest req = cursusClient.post(wsEndpoint.getPath() + "/BeneficiaireImpl.svc/json/GetListeBeneficiaire", new Handler<HttpClientResponse>() {
public void handle(HttpClientResponse response) {
if(response.statusCode() >= 300){
handler.handle(new Either.Left<String, JsonArray>("invalid.status.code"));
return;
}
response.bodyHandler(new Handler<Buffer>() {
public void handle(Buffer body) {
handler.handle(new Either.Right<String, JsonArray>(new JsonArray(body.toString())));
}
});
}
});
req.putHeader(HttpHeaders.ACCEPT, "application/json; charset=UTF-8")
.putHeader(HttpHeaders.CONTENT_TYPE, "application/json");
req.end(reqBody.encode());
}
});
}
public void getSales(final String numeroDossier, final String cardNb, final Handler<Either<String, JsonArray>> handler){
authWrapper(new Handler<Boolean>() {
public void handle(Boolean gotToken) {
if(!gotToken){
handler.handle(new Either.Left<String, JsonArray>("[Cursus][getSales] Issue while retrieving token."));
return;
}
JsonObject reqBody = new JsonObject();
reqBody
.put("numeroSite", authConf.getString("numSite"))
.put("tokenId", new JsonObject(cursusMap.get("auth")).getString("tokenId"))
.put("filtresSoldesBeneficiaire", new JsonObject()
.put("numeroDossier", numeroDossier)
.put("numeroCarte", cardNb));
HttpClientRequest req = cursusClient.post(wsEndpoint.getPath() + "/BeneficiaireImpl.svc/json/GetSoldesBeneficiaire", new Handler<HttpClientResponse>() {
public void handle(HttpClientResponse response) {
if(response.statusCode() >= 300){
handler.handle(new Either.Left<String, JsonArray>("invalid.status.code"));
return;
}
response.bodyHandler(new Handler<Buffer>() {
public void handle(Buffer body) {
handler.handle(new Either.Right<String, JsonArray>(new JsonArray(body.toString())));
}
});
}
});
req.putHeader(HttpHeaders.ACCEPT, "application/json; charset=UTF-8")
.putHeader(HttpHeaders.CONTENT_TYPE, "application/json");
req.end(reqBody.encode());
}
});
}
}
}<|fim▁end|> | |
<|file_name|>treeDnd.ts<|end_file_name|><|fim▁begin|>/*---------------------------------------------------------------------------------------------
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for license information.
*--------------------------------------------------------------------------------------------*/
'use strict';
import * as _ from 'vs/base/parts/tree/browser/tree';
import * as Mouse from 'vs/base/browser/mouseEvent';
export class ElementsDragAndDropData implements _.IDragAndDropData {
private elements: any[];
constructor(elements: any[]) {
this.elements = elements;
}
public update(event: Mouse.DragMouseEvent): void {
// no-op
}
public getData(): any {
return this.elements;
}
}
export class ExternalElementsDragAndDropData implements _.IDragAndDropData {
private elements: any[];
constructor(elements: any[]) {
this.elements = elements;
}
public update(event: Mouse.DragMouseEvent): void {
// no-op
}
public getData(): any {<|fim▁hole|>
export class DesktopDragAndDropData implements _.IDragAndDropData {
private types: any[];
private files: any[];
constructor() {
this.types = [];
this.files = [];
}
public update(event: Mouse.DragMouseEvent): void {
if (event.dataTransfer.types) {
this.types = [];
Array.prototype.push.apply(this.types, event.dataTransfer.types);
}
if (event.dataTransfer.files) {
this.files = [];
Array.prototype.push.apply(this.files, event.dataTransfer.files);
this.files = this.files.filter(f => f.size || f.type);
}
}
public getData(): any {
return {
types: this.types,
files: this.files
};
}
}<|fim▁end|> | return this.elements;
}
} |
<|file_name|>RequestQueueManager.js<|end_file_name|><|fim▁begin|>"use strict";
const Utils = require("../../core/Utils");
const RequestQueue = require("./RequestQueue");
const ChainedBucket = require("./ChainedBucket");
class RequestQueueGroup {
constructor(bucketFactory) {
this.queues = {};
this.bucketFactory = bucketFactory || null;
if (!(bucketFactory instanceof BucketFactory)) {
throw new TypeError(
"Param 'bucketFactory' is not an instance of BucketFactory"
);
}
}
get(id) {
if (!this.queues[id]) {
const bucket = (this.bucketFactory && this.bucketFactory.get(id));
this.queues[id] = new RequestQueue(bucket);
}
this.queues[id].id = id;
return this.queues[id];
}
delete(id) {
if (this.bucketFactory)
this.bucketFactory.delete(id);
delete this.queues[id];
}
deleteContaining(id) {
var keys = Object.keys(this.queues);
for (var i = 0, len = keys.length; i < len; i++) {
var key = keys[i];
if (!key || key.indexOf(id) < 0) continue;
this.delete(key);
}
}
}
class BucketFactory {
constructor(manager, size, duration, name, parent) {
this.manager = manager;
this.size = size;
this.duration = duration;
this.name = name;
this.parent = parent || null;
if (!(manager instanceof RequestQueueManager))
throw new TypeError("Param 'manager' is invalid");
if (typeof size !== "number")
throw new TypeError("Param 'size' is not a number");
if (typeof duration !== "number")
throw new TypeError("Param 'duration' is not a number");
if (typeof name !== "string")
throw new TypeError("Param 'name' is not a string");
}
makeName(id) {
return this.name + ":" + id;
}
get(id) {
const parent =
this.parent instanceof BucketFactory ?
this.parent.get(id) :
this.parent;
return this.manager._createBucket(
this.size, this.duration, this.makeName(id), parent
);
}
delete(id) {
delete this.manager.buckets[this.makeName(id)];
}
}
class RequestQueueManager {
constructor(discordie) {
this._discordie = discordie;
this.isBot = true;
this.disabled = false;
this.buckets = {};
this.bucketFactories = {};
// whole API, bucket blocks when client gets a HTTP 429 with global flag
const _bot_global =
this._createBucket(Infinity, 1000, "bot:global");
this.globalBucket = _bot_global;
// msg 10/10s
const _msg =
this._createBucket(10, 10000, "msg", _bot_global);
this.userMessageQueue = new RequestQueue(_msg);
// per-channel bot:msg:dm 10/10s
const _bot_msg_dm =
this._createBucketFactory(10, 10000, "bot:msg:dm", _bot_global);
this.botDirectMessageQueues = new RequestQueueGroup(_bot_msg_dm);
// per-guild bot:msg:server 10/10s
const _bot_msg_server =
this._createBucketFactory(10, 10000, "bot:msg:server", _bot_global);
this.botMessageQueues = new RequestQueueGroup(_bot_msg_server);
// per-guild dmsg 5/1s
const _dmsg =
this._createBucketFactory(5, 1000, "dmsg", _bot_global);
this.messageDeleteQueues = new RequestQueueGroup(_dmsg);
// per-guild bdmsg 1/1s
const _bdmsg =
this._createBucketFactory(1, 1000, "bdmsg", _bot_global);
this.messageBulkDeleteQueues = new RequestQueueGroup(_bdmsg);
// per-guild guild_member 10/10s
const _guild_member =
this._createBucketFactory(10, 10000, "guild_member", _bot_global);
this.guildMemberPatchQueues = new RequestQueueGroup(_guild_member);
// per-guild guild_member_nick 1/1s
const _guild_member_nick =
this._createBucketFactory(1, 1000, "guild_member_nick", _bot_global);
this.guildMemberNickQueues = new RequestQueueGroup(_guild_member_nick);
// all other requests go here with route as key
// bucket size should be set by HTTP headers
const _bot_generic =
this._createBucketFactory(Infinity, 5000, "bot:generic", _bot_global);
this.genericRequestQueues = new RequestQueueGroup(_bot_generic);
discordie.Dispatcher.on("GATEWAY_DISPATCH", e => {
if (!e.data) return;
if (e.type === "READY") {
if (!e.data.user) return;
this.isBot = e.data.user.bot || false;
}
if (e.type === "GUILD_DELETE") {
this._deleteGuildQueues(e.data.id);
}
if (e.type === "CHANNEL_DELETE") {
this._deleteChannelQueues(e.data.id);
}
});
Utils.privatify(this);
}
_reset() {
Object.keys(this.buckets).forEach(k => this.buckets[k].refill());
}
_createBucket(size, duration, name, parent) {
if (!this.buckets[name]) {
this.buckets[name] = new ChainedBucket(size, duration, name, parent);
} else {
this.buckets[name].refill();
}
return this.buckets[name];
}
_createBucketFactory(size, duration, name, parent) {
if (!this.bucketFactories[name]) {
this.bucketFactories[name] =
new BucketFactory(this, size, duration, name, parent);
}
return this.bucketFactories[name];
}
put(request, sendCallback) {
const route = request.path
.replace(/\/\d+$/g, "")
.replace(/\d+/g, ":id");
// convert to route: <- /api/guilds/:guild_id/bans/:user_id
// -> /api/guilds/:id/bans
// <- /api/channels/:channel_id
// -> /api/channels/
const queue = this.genericRequestQueues.get(route);
this._enqueueTo(queue, request, sendCallback);
}
putToRoute(request, route, sendCallback) {
const queue = this.genericRequestQueues.get(route);
this._enqueueTo(queue, request, sendCallback);
}
putMessage(request, channelId, sendCallback) {
const channel = this._discordie._channels.get(channelId);
var queue = this.userMessageQueue;
if (this.isBot && channel) {
if (channel.is_private || !channel.guild_id) {
queue = this.botDirectMessageQueues.get(channelId);
} else {
queue = this.botMessageQueues.get(channel.guild_id);
}
}
this._enqueueTo(queue, request, sendCallback);
}
putDeleteMessage(request, channelId, sendCallback) {
const group = this.messageDeleteQueues;
this._enqueueToGroup(group, request, channelId, sendCallback);
}
putBulkDeleteMessage(request, channelId, sendCallback) {
const group = this.messageBulkDeleteQueues;
this._enqueueToGroup(group, request, channelId, sendCallback);
}
putGuildMemberPatch(request, guildId, sendCallback) {
const queue = this.guildMemberPatchQueues.get(guildId);
this._enqueueTo(queue, request, sendCallback);<|fim▁hole|> }
putGuildMemberNick(request, guildId, sendCallback) {
const queue = this.guildMemberNickQueues.get(guildId);
this._enqueueTo(queue, request, sendCallback);
}
_enqueueToGroup(group, request, channelId, sendCallback) {
const channel = this._discordie._channels.get(channelId);
const guildId = (channel && channel.guild_id) || null;
this._enqueueTo(group.get(guildId), request, sendCallback);
}
_enqueueTo(queue, request, sendCallback) {
if (this.disabled) {
return request.send(sendCallback);
}
queue.enqueue(request, sendCallback);
}
_deleteGuildQueues(guildId) {
const groups = [
this.botMessageQueues,
this.messageDeleteQueues,
this.messageBulkDeleteQueues,
this.guildMemberPatchQueues
];
groups.forEach(g => g.delete(guildId));
this.genericRequestQueues.deleteContaining(guildId);
}
_deleteChannelQueues(channelId) {
this.botDirectMessageQueues.delete(channelId);
this.genericRequestQueues.deleteContaining(channelId);
}
}
module.exports = RequestQueueManager;<|fim▁end|> | |
<|file_name|>KW.py<|end_file_name|><|fim▁begin|># news key word catch
import os
import random
import time
import tushare as ts
import math
import pandas
import threading
from MYSORT import *
from programdiary import *
import Stock_config_kit as Skit
import ForgeModel
COLLECTORSHOWNUM=5
fgt={'a':0.01,'a_2':0.01,'lam':0.01}
DIARYNAME='DIARY_Ver.0.1_ty2_0.01_0.01'
def readsinatime(timestr):
if timestr:
try:
Year=int(time.strftime('%Y',time.gmtime()) )
[F,L]=timestr.split(' ')
[mon,day]=F.split('-')
[hour,minus]=L.split(':')
mon=int(mon)
day=int(day)
hour=int(hour)
minus=int(minus)
# in time.mktime the #6, #7's value do not matter (#0--#8)
except:
print('the timestr is not enough for unpacking process :%s'%timestr)
[Year,mon,day,hour,minus]=[2016,0,0,0,0]
else:
[Year,mon,day,hour,minus]=[2016,0,0,0,0]
return time.mktime( (Year,mon,day,hour,minus,0,0,0,0) )
class loopobj(threading.Thread):
loopflag=0 #1 means stop
def __init__(self):
threading.Thread.__init__(self)
def setlooppara(self,span,funchandle,*funcpara):
self.span=span
self.funch=funchandle
self.funcpara=funcpara
def stoploop(self):
self.loopflag=1
def timeloop(self):
print(self.name+ ': loop start')
while self.loopflag==0:
time.sleep(self.span)
if self.funcpara[0]==None:
self.funch()
else:
#self.funch(self.funcpara)
#print('stock')
parastr=''
for i in range(len( self.funcpara)):
parastr+='self.funcpara[%d]'% (i)
eval('self.funch(%s)'%parastr)
print(self.name+ ' timeloop end')
def run(self):
self.timeloop()
class Keyword:
wordset=None
Counter=0
distribution=None
weight=None
show_dis_flag=False
def __init__(self,wordset,weight):
if(isinstance(wordset,list)):
self.wordset=wordset
self.distribution=[0 for n in range(len( wordset) )] ######################## good!
self.weight=weight
self._sortkeyword()
def modify_keyword(self,mode,M_KW):
if not isinstance(mode,1):
if(mode==1):
# delte keyword from KW
# delte the relavant distri from KW
for KW in M_KW:
I=self.index(KW)
self.wordset.remove(KW)
self.distribution.remove(self.distribution[I])
self.weight.remove(self.weight[I])
# or del self.distribution[I]
elif(mode==2):
for KW in M_KW:
self.wordset.append(KW)
self.distribution.append(0)
self.weight.append(1)
self._sortkeyword(self)
# creat%
# creat$
else:
print('Unknown Mode Number. mode=1 for subtracting keywords,mode=2 for adding keywords')
def modify_distri(self,prey):
ind=0
for ele in prey:
self.distribution[ind]+=ele
ind+=1
def show_distri(self):
# print like 'word': times 'word2': times
i=0
output=''
while i<len(self.distribution):
output+='%s: %s '%(self.wordset[i],self.distribution[i])
i+=1
print(output)
def hunt(self,preystr):
i=0
dis=[0 for n in range(len( self.wordset))]
c_flag=False
for key in self.wordset:
findcounter=False
findhead=-1
for j in range(len(self.wordset)):
findhead=preystr.find( key , findhead+1 )
if findhead == -1:
break
findcounter+=int(bool(1+findhead))
c_flag=True
dis[i]+=findcounter
#self.distribution[i]+=findcounter
i+=1
self.modify_distri(dis)
if c_flag==True and self.show_dis_flag==True:
self.show_distri()
def _sortkeyword(self):
try:
List_one=self.wordset
Capital_L_one=[]
for element in List_one:
try:
Capital_L_one.append( ord( element[0] ) )
except:
print(self.name,' :',element )
[self.wordset,self.distribution,self.weight]=mysort(Capital_L_one,self.wordset,self.distribution,self.weight)
except:
print(self.name,' ',self.wordset)
class stock_info(Keyword,loopobj,ForgeModel.Forge):
name=''
code=''
browsetime=''
Name_W=1
Code_W=1
Area_W=1
Ind_W=2
Con_W=2
def __init__(self,name='',code='',area=[],industry=[],concept=[],a=fgt['a'],a_2=fgt['a_2'],lam=fgt['lam']):
#some time later would add a return to creat more cal
self.name=name
self.code=code
self.area=area
self.industry=industry
self.concept=concept
#self.business=business
ForgeModel.Forge.__init__(self,a,a_2,lam)
Keyword.__init__(self,[code]+[name]+area+industry+concept,self.ini_weight() )
loopobj.__init__(self)
def ini_weight(self):
W=[self.Code_W,self.Name_W]
if self.area:
W.append(self.Area_W)
if isinstance( self.industry,list) and self.industry!=[]:
for i in self.industry:
W.append(self.Ind_W)
if isinstance( self.concept,list) and self.concept!=[]:
for i in self.concept:
W.append(self.Con_W)
return W
def Trum(self,Newsobj):
if Newsobj.latesttime!=self.browsetime:
brt=readsinatime(self.browsetime)
newst=readsinatime(Newsobj.latesttime)
if brt==(0,0,0,0,0,0,0,0,0):
deltat=0
elif newst==(0,0,0,0,0,0,0,0,0):
deltat=0
else:
deltat=newst-brt
self.stimulate_forge_type2(self.distribution,deltat)
self.browsetime=Newsobj.latesttime
self.hunt(Newsobj.latestnew)
else:
pass
class News(loopobj):
name=None
newslength=1
show_c=True
Newsmemory= pandas.DataFrame()
latesttime=''
latestnew=''
def __init__(self,name):
self.name=name
loopobj.__init__(self)
def modify_newspara(self,nl,show_c):
self.newslength=nl
self.show_c=show_c
def Newsget(self):
#PDnews=ts.get_latest_news(top=self.newslength,show_content=self.show_c)
PDnews=ts.get_latest_news(top=self.newslength,show_content=False)
try:
if PDnews.ix[0,'time']!=self.latesttime:
Newsmemory=pandas.concat([self.Newsmemory,PDnews],axis=0) #按行合并
#self.latesttime=PDnews.ix[0,'time']
if self.show_c:
try:
Content=None
C_C=0
while not Content and C_C<5:
Content=ts.latest_content(PDnews.ix[0,'url'])
C_C+=1
except:
print('latest_content api fail to load url:%s'%PDnews.ix[0,'url'])
Content=''
#self.latestnew=PDnews.ix[0,'classify']+PDnews.ix[0,'title']+Content
self.latestnew=PDnews.ix[0,'title']+Content
else:
#self.latestnew=PDnews.ix[0,'classify']+PDnews.ix[0,'title'] #maybe content later
self.latestnew=PDnews.ix[0,'title']
print(PDnews[['classify','title','time']])
if PDnews.ix[0,'time']:
print('old latesttime %s'%self.latesttime)
self.latesttime=PDnews.ix[0,'time']
print('new latesttime %s'%self.latesttime)
except:
print('Get Latest News Error')
class Collector(loopobj):
name=None
dissum=None
showNum=COLLECTORSHOWNUM
def __init__(self,name,diary,configfile):
self.name=name
self.diaryfile=diary
self.configfile=configfile
loopobj.__init__(self)
def info_collect(self,stocklist):
self.dissum=[0 for i in range(len(stocklist))]
#try:
i=0
for i, stock in enumerate( stocklist):
_dissum=0
for j,ele_dis in enumerate( stock.distribution):
_dissum+=ele_dis*stock.weight[j]
self.dissum[i]=_dissum
#except:
# print('Collector Error')
def info_process(self,stocklist):
self._indexlist=[]
self.counterlist=[]
self.orderlist=[]
Ind=0
OldInd=None
indexlist=[n for n in range(len(stocklist)) ]
[indexlist]=mysort(self.dissum,indexlist)
self.dissum.reverse()
indexlist.reverse()
for i in range(self.showNum):
temp=self.dissum[Ind]
self.orderlist.append(temp)
counter=1
if Ind+1<len(self.dissum):
for j in range(Ind+1,len(self.dissum)):
if self.dissum[j]==temp:
counter+=1
else:
OldInd=Ind
Ind=Ind+counter
break
if OldInd!=None :
self.counterlist.append(counter)
self._indexlist.append(indexlist[OldInd:OldInd+counter])
def report(self,stocklist):
reportlist=[]
for i in range( len(self.counterlist) ):
freq=self.counterlist[i]
order=self.orderlist[i]
if order!=0 and freq != len(stocklist):
SNameList=[]
for j in range( len( self._indexlist[i] )):
ind=self._indexlist[i][j]
SNameList.append(stocklist[ind].name)
restr='Order: %.5f , Freq: %d, Stock: %s'%(order,freq,','.join(SNameList))
reportlist.append(restr)
else:
reportlist.append('Order is Zero or Frequency is the lenth of stocklist')
self.diaryfile.get_message(reportlist)
self.diaryfile.update_txtdiary()
def change_conf(self,stocklist):
SNameList=[]
for i in range( len(self.counterlist) ):
if self.orderlist[i]!=0 and self.counterlist[i] != len(stocklist):
for j in range( len( self._indexlist[i] )):
ind=self._indexlist[i][j]
SNameList.append(stocklist[ind].code)
self.configfile.KW_modify(code=SNameList)
self.configfile.KW_save_config()
def collector(self,stocklist):
print('##############Inking the diary#############')
self.info_collect(stocklist)
self.info_process(stocklist)
self.report(stocklist)
self.change_conf(stocklist)
print('#############Report Finish#############')
def ini_classfication():
Industry=ts.get_industry_classified()
Concept=ts.get_concept_classified()
Area=ts.get_area_classified()
_Codelist=Area[['code']]
Codelist=[]
for i in range(len(_Codelist) ):
Codelist.append(_Codelist.ix[i,0])
return [Codelist,Area,Concept,Industry]
def stock_classfication(code,Area,Concept,Industry):
area=Area.query('code=='+"'"+code+"'")
_area=area[['area']]
<|fim▁hole|> _name=area[['name']]
try:
_name=str(_name.iloc[0,0])
except:
_name='未知'
# area=area.get_value(0,0)
try:
_area=[_area.iloc[0,0]]
except:
_area=[]
#or Area[Area.code.isin([code])]
concept=Concept.query('code=='+"'"+code+"'")
_concept=concept[['c_name']]
try:
__concept=[]
for i in range( len(_concept) ):
__concept.append( _concept.iloc[i,0] )
except:
__concept=[]
industry=Industry.query('code=='+"'"+code+"'")
try:
_industry=industry[['c_name']]
_industry=_industry.iloc[0,0]
_industry=_industry.replace('行业','')
if len(_industry)==4:
_industry=[_industry[0:2],_industry[2:4]]
else:
_industry=[_industry]
except:
_industry=[]
return [_name,_area,__concept,_industry]
def prelearn_weight_s(stockobj,STR):
stockobj.hunt(STR)
def prelearn_weight(stockobjlist,strlist):
for STR in strlist:
for stock in stockobjlist:
try:
prelearn_weight_s(stock,STR)
except:
print("Stock %s prelearn failed"%stock.name)
#12 min to download 1000 news with content.
def SINA_prelearn(stockobjlist,newslength,with_c=False):
download_flag=False
while not download_flag:
PDnews=ts.get_latest_news(top=newslength,show_content=with_c)
try:
len(PDnews)
download_flag=True
print("prelearn news download finished.")
except:
print("the newslength %d didn't work. We minus it with 100 and try again."%newslength)
newslength=newslength-100
Newsstr=[]
if(with_c):
for i in range( len( PDnews) ): #len(PDnews.index)
Newsstr.append(PDnews.ix[i,'classify']+PDnews.ix[i,'title']+PDnews.ix[i,'content'])
else:
for i in range(len( PDnews)):
Newsstr.append(PDnews.ix[i,'classify']+PDnews.ix[i,'title'])
prelearn_weight(stockobjlist,Newsstr)
def test():
# a=stock_info(name='a',code='000000',area=['概率'],industry=['方法','还好'],concept=['沪江','了就'])
# a.distribution=[0,0,0,0,0,0,0]
# b=stock_info(name='b',code='000000',area=[],industry=['方法','还好'],concept=['沪江','了就'])
# b.distribution=[0,0,0,0,0,2]
# c=stock_info(name='c',code='000000',area=['概率'],industry=[],concept=['沪江','了就'])
# c.distribution=[0,0,0,0,1]
# d=stock_info(name='d',code='000000',area=['概率'],industry=['方法','还好'],concept=[])
# d.distribution=[0,0,0,0,0]
# e=stock_info(name='e',code='000000',area=['概率'],industry=['方法'],concept=['沪江','了就'])
# e.distribution=[0,0,0,0,0,1]
# f=stock_info(name='f',code='000000',area=['概率'],industry=['方法'],concept=['沪江','了就'])
# f.distribution=[0,0,0,0,0,2]
path='%s%s'%(os.path.dirname(__file__),'/diary/')
#path='%s%s'%(os.path.dirname(os.path.abspath('__file__')),'/diary/')
#path='%s%s'%(os.getcwd(),'/diary/')
diary=diaryfile(rootpath=path,name=DIARYNAME,suffix='txt')
# testColl=Collector('SINA_COLLECTOR',diary)
# testColl.collector([a,b,c,d,e,f])
# diary.get_message('test')
# diary.update_txtdiary()
# diary.txtfile.close()
Conf=Skit.configfile('StockP_config.json')
# Testobj=stock_info(name='首钢',code='000959',industry=['普钢'])
# Testobj.setlooppara(5,Testobj.News)
# Testobj.start()
#--------------------News test-----------------------------
Newsobj=News('SINA_FORCAST_NEWS')
Newsobj.setlooppara(1,Newsobj.Newsget,None)
Newsobj.start()
# a=0
# while not a:
# a=bool(input())
# if a==1 or a==' ':
# Testobj.stoploop()
# print('stop loop The world!!!!!')
#----------------------Sort test-------------------------#
# Testobj=stock_info(name='首钢',code='000959',industry=['普钢','美少女','名给','哲学'])
# Testobj.show_distri()
# Testobj.hunt('普钢里面有美少女不过也有个明给')
# Testobj.show_distri()
#----------------------Initial test--------------#
# [Codelist,Area,Concept,Industry]=ini_classfication()
# [name,ar,co,ind]=stock_classfication('000959',Area,Concept,Industry)
# Stockobj=stock_info(name=name,code='000959',area=[ar],industry=[ind],concept=[co])
# Stockobj.show_distri()
#---------------------hunt test----------------------#
Stockobj_chain=[]
ini_class_flag = True
while ini_class_flag:
try:
[Codelist,Area,Concept,Industry]=ini_classfication()
ini_class_flag=False
except:
print("Fail to download the stock classification data, We try it again...")
ini_class_flag = True
testcoun=0
#print(Codelist)
for code in Codelist:
[name,ar,co,ind]=stock_classfication(code,Area,Concept,Industry)
Stockobj=stock_info(name=name,code=code,area=ar,industry=ind,concept=co)
if Stockobj.name!='未知':
Stockobj.setlooppara(1,Stockobj.Trum,Newsobj)
Stockobj.start()
# Stockobj.show_distri()
Stockobj_chain.append(Stockobj)
SINA_prelearn(Stockobj_chain,2000,0)
# except:
# print('stock initial error')
# for stock in Stockobj_chain:
Coll=Collector('SINA_COLLECTOR',diary,Conf)
Coll.setlooppara(60*10,Coll.collector,Stockobj_chain)
Coll.start()
if __name__ == '__main__': test()<|fim▁end|> | |
<|file_name|>gitutils.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
from collections import defaultdict
import subprocess
import os
import bisect
import re
from .common import log_fmt
class GitProcess():
GIT_BIN = None
def __init__(self, repoDir, args, text=None):
startupinfo = None
if os.name == "nt":
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
self._process = subprocess.Popen(
[GitProcess.GIT_BIN] + args,
cwd=repoDir,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
startupinfo=startupinfo,
universal_newlines=text)
@property
def process(self):
return self._process
@property
def returncode(self):
return self._process.returncode
def communicate(self):
return self._process.communicate()
class Ref():
INVALID = -1
TAG = 0
HEAD = 1
REMOTE = 2
def __init__(self, type, name):
self._type = type
self._name = name
def __str__(self):
string = "type: {0}\n".format(self._type)
string += "name: {0}".format(self._name)
return string
def __lt__(self, other):
return self._type < other._type
@property
def type(self):
return self._type
@type.setter
def type(self, type):
self._type = type
@property
def name(self):
return self._name
@name.setter
def name(self, name):
self._name = name
@classmethod
def fromRawString(cls, string):
if not string or len(string) < 46:
return None
sha1 = string[0:40]
name = string[41:]
if not name.startswith("refs/"):
return None
name = name[5:]
_type = Ref.INVALID
_name = None
if name.startswith("heads/"):
_type = Ref.HEAD
_name = name[6:]
elif name.startswith("remotes") \
and not name.endswith("HEAD"):
_type = Ref.REMOTE
_name = name
elif name.startswith("tags/"):
_type = Ref.TAG
if name.endswith("^{}"):
_name = name[5:-3]
else:
_name = name[5:]
else:
return None
return cls(_type, _name)
class Git():
REPO_DIR = os.getcwd()
REPO_TOP_DIR = os.getcwd()
REF_MAP = {}
REV_HEAD = None
# local uncommitted changes
LUC_SHA1 = "0000000000000000000000000000000000000000"
# local changes checked
LCC_SHA1 = "0000000000000000000000000000000000000001"
@staticmethod
def available():
return GitProcess.GIT_BIN is not None
@staticmethod
def run(args, text=None):
return GitProcess(Git.REPO_DIR, args, text)
@staticmethod
def checkOutput(args, text=None):
process = Git.run(args, text)
data = process.communicate()[0]
if process.returncode != 0:
return None
return data
@staticmethod
def repoTopLevelDir(directory):
"""get top level repo directory
if @directory is not a repository, None returned"""
if not os.path.isdir(directory):
return None
if not os.path.exists(directory):
return None
args = ["rev-parse", "--show-toplevel"]
process = GitProcess(directory, args)
realDir = process.communicate()[0]
if process.returncode != 0:
return None
return realDir.decode("utf-8").replace("\n", "")
@staticmethod
def refs():
args = ["show-ref", "-d"]
data = Git.checkOutput(args)
if not data:
return None
lines = data.decode("utf-8").split('\n')
refMap = defaultdict(list)
for line in lines:
ref = Ref.fromRawString(line)
if not ref:
continue
sha1 = line[0:40]
bisect.insort(refMap[sha1], ref)
return refMap
@staticmethod
def revHead():
args = ["rev-parse", "HEAD"]
data = Git.checkOutput(args)
if not data:
return None
return data.decode("utf-8").rstrip('\n')
@staticmethod
def branches():
args = ["branch", "-a"]
data = Git.checkOutput(args)
if not data:
return None
return data.decode("utf-8").split('\n')
@staticmethod
def commitSummary(sha1):
fmt = "%h%x01%s%x01%ad%x01%an%x01%ae"
args = ["show", "-s",
"--pretty=format:{0}".format(fmt),
"--date=short", sha1]
data = Git.checkOutput(args)
if not data:
return None
parts = data.decode("utf-8").split("\x01")
return {"sha1": parts[0],
"subject": parts[1],
"date": parts[2],
"author": parts[3],
"email": parts[4]}
@staticmethod
def abbrevCommit(sha1):<|fim▁hole|> if not data:
return sha1[:7]
return data.rstrip().decode("utf-8")
@staticmethod
def commitSubject(sha1):
args = ["show", "-s", "--pretty=format:%s", sha1]
data = Git.checkOutput(args)
return data
@staticmethod
def commitRawDiff(sha1, filePath=None, gitArgs=None):
if sha1 == Git.LCC_SHA1:
args = ["diff-index", "--cached", "HEAD"]
elif sha1 == Git.LUC_SHA1:
args = ["diff-files"]
else:
args = ["diff-tree", "-r", "--root", sha1]
args.extend(["-p", "--textconv", "--submodule",
"-C", "--no-commit-id", "-U3"])
if gitArgs:
args.extend(gitArgs)
if filePath:
args.append("--")
args.append(filePath)
data = Git.checkOutput(args)
if not data:
return None
return data
@staticmethod
def externalDiff(branchDir, commit, path=None, tool=None):
args = ["difftool", "--no-prompt"]
if commit.sha1 == Git.LUC_SHA1:
pass
elif commit.sha1 == Git.LCC_SHA1:
args.append("--cached")
else:
args.append("{0}^..{0}".format(commit.sha1))
if tool:
args.append("--tool={}".format(tool))
if path:
args.append("--")
args.append(path)
cwd = branchDir if branchDir else Git.REPO_DIR
process = GitProcess(cwd, args)
@staticmethod
def conflictFiles():
args = ["diff", "--name-only",
"--diff-filter=U",
"-no-color"]
data = Git.checkOutput(args)
if not data:
return None
return data.rstrip(b'\n').decode("utf-8").split('\n')
@staticmethod
def gitDir():
args = ["rev-parse", "--git-dir"]
data = Git.checkOutput(args)
if not data:
return None
return data.rstrip(b'\n').decode("utf-8")
@staticmethod
def gitPath(name):
dir = Git.gitDir()
if not dir:
return None
if dir[-1] != '/' and dir[-1] != '\\':
dir += '/'
return dir + name
@staticmethod
def mergeBranchName():
"""return the current merge branch name"""
# TODO: is there a better way?
path = Git.gitPath("MERGE_MSG")
if not os.path.exists(path):
return None
name = None
with open(path, "r") as f:
line = f.readline()
m = re.match("Merge.* '(.*)'.*", line)
if m:
name = m.group(1)
# likely a sha1
if name and re.match("[a-f0-9]{7,40}", name):
data = Git.checkOutput(["branch", "--remotes",
"--contains", name])
if data:
data = data.rstrip(b'\n')
if data:
# might have more than one branch
name = data.decode("utf-8").split('\n')[0].strip()
return name
@staticmethod
def resolveBy(ours, path):
args = ["checkout",
"--ours" if ours else "--theirs",
path]
process = Git.run(args)
process.communicate()
if process.returncode != 0:
return False
args = ["add", path]
process = Git.run(args)
process.communicate()
return True if process.returncode == 0 else False
@staticmethod
def undoMerge(path):
"""undo a merge on the @path"""
if not path:
return False
args = ["checkout", "-m", path]
process = Git.run(args)
process.communicate()
return process.returncode == 0
@staticmethod
def hasLocalChanges(branch, cached=False):
# A remote branch should never have local changes
if branch.startswith("remotes/"):
return False
dir = Git.branchDir(branch)
# only branch checked out can have local changes
if not dir:
return False
args = ["diff", "--quiet"]
if cached:
args.append("--cached")
process = GitProcess(dir, args)
process.communicate()
return process.returncode == 1
@staticmethod
def branchDir(branch):
"""returned the branch directory if it checked out
otherwise returned an empty string"""
if not branch or branch.startswith("remotes/"):
return ""
# Use the repo dir directly
# since we are unable to get two detached branch
if branch.startswith("(HEAD detached"):
return Git.REPO_DIR
args = ["worktree", "list"]
data = Git.checkOutput(args)
if not data:
return ""
worktree_re = re.compile(
r"(\S+)\s+[a-f0-9]+\s+(\[(\S+)\]|\(detached HEAD\))$")
worktrees = data.rstrip(b'\n').decode("utf8").split('\n')
for wt in worktrees:
m = worktree_re.fullmatch(wt)
if not m:
print("Oops! Wrong format for worktree:", wt)
elif m.group(3) == branch:
return m.group(1)
return ""
@staticmethod
def generateDiff(sha1, filePath):
data = Git.commitRawDiff(sha1)
if not data:
return False
with open(filePath, "wb+") as f:
f.write(data)
return True
@staticmethod
def generatePatch(sha1, filePath):
args = ["format-patch", "-1", "--stdout", sha1]
data = Git.checkOutput(args)
if not data:
return False
with open(filePath, "wb+") as f:
f.write(data)
return True
@staticmethod
def revertCommit(branch, sha1):
branchDir = Git.branchDir(branch)
args = ["revert", "--no-edit", sha1]
process = GitProcess(branchDir, args)
_, error = process.communicate()
if process.returncode != 0 and error is not None:
error = error.decode("utf-8")
return process.returncode, error
@staticmethod
def resetCommitTo(branch, sha1, method):
branchDir = Git.branchDir(branch)
args = ["reset", "--" + method, sha1]
process = GitProcess(branchDir, args)
_, error = process.communicate()
if process.returncode != 0 and error is not None:
error = error.decode("utf-8")
return process.returncode, error
@staticmethod
def repoUrl():
args = ["config", "remote.origin.url"]
data = Git.checkOutput(args)
if data:
return data.rstrip(b'\n').decode("utf-8")
return ""
@staticmethod
def runWithError(args):
process = Git.run(args)
_, error = process.communicate()
if process.returncode != 0 and error is not None:
error = error.decode("utf-8")
return process.returncode, error
@staticmethod
def setConfigValue(key, value, isGlobal=True):
if not key:
return 0, None
args = ["config"]
if isGlobal:
args.append("--global")
args.append(key)
if value:
args.append(value)
else:
args.insert(1, "--unset")
return Git.runWithError(args)
@staticmethod
def removeSection(section, isGlobal=True):
if not section:
return 0, None
args = ["config"]
if isGlobal:
args.append("--global")
args.append("--remove-section")
args.append(section)
return Git.runWithError(args)
@staticmethod
def setDiffTool(name, cmd, isGlobal=True):
if not name:
return 0, None
if not cmd:
Git.removeSection("difftool.%s" % name)
# treat as OK
return 0, None
key = "difftool.%s.cmd" % name
return Git.setConfigValue(key, cmd, isGlobal)
@staticmethod
def setMergeTool(name, cmd, isGlobal=True):
if not name:
return 0, None
if not cmd:
Git.removeSection("mergetool.%s" % name)
return 0, None
key = "mergetool.%s.cmd" % name
ret, error = Git.setConfigValue(key, cmd, isGlobal)
if ret != 0:
return ret, error
key = "mergetool.%s.trustExitCode" % name
return Git.setConfigValue(key, "true", isGlobal)
@staticmethod
def getConfigValue(key, isGlobal=True):
if not key:
return ""
args = ["config", "--get", key]
if isGlobal:
args.insert(1, "--global")
data = Git.checkOutput(args, True)
if data is None:
return ""
return data.rstrip("\n")
@staticmethod
def diffToolCmd(name, isGlobal=True):
if not name:
return ""
return Git.getConfigValue("difftool.%s.cmd" % name)
@staticmethod
def mergeToolCmd(name, isGlobal=True):
if not name:
return ""
return Git.getConfigValue("mergetool.%s.cmd" % name)<|fim▁end|> | args = ["show", "-s", "--pretty=format:%h", sha1]
data = Git.checkOutput(args) |
<|file_name|>setup.py<|end_file_name|><|fim▁begin|>from setuptools import setup, Extension
import numpy as np
import platform
version = '0.1.3'
<|fim▁hole|>ext_modules = []
setup (name = 'droneapi',
zip_safe=True,
version = version,
description = 'Python language bindings for the DroneApi',
long_description = '''Python language bindings for the DroneApi (includes the droneapi MAVProxy module)''',
url = 'https://github.com/diydrones/droneapi-python',
author = '3D Robotics',
install_requires = [ 'pymavlink',
'MAVProxy >= 1.3.1',
'protobuf >= 2.5.0' ],
author_email = '[email protected]',
classifiers=['Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Topic :: Scientific/Engineering'
],
license='apache',
packages = ['droneapi', 'droneapi.module', 'droneapi.lib' ],
# doesn't work: package_data={'droneapi': ['example/*']},
ext_modules = ext_modules)<|fim▁end|> | |
<|file_name|>classes_c.js<|end_file_name|><|fim▁begin|>var searchData=<|fim▁hole|>[
['upmixtype',['UpmixType',['../struct_upmix_type.html',1,'']]]
];<|fim▁end|> | |
<|file_name|>is-plain-function.js<|end_file_name|><|fim▁begin|>export default function isPlainFunction(test) {
return typeof test === 'function' && test.PrototypeMixin === undefined;<|fim▁hole|><|fim▁end|> | } |
<|file_name|>test_gstr_3b_report.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# Copyright (c) 2019, Frappe Technologies Pvt. Ltd. and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
from frappe.utils import getdate
from erpnext.accounts.doctype.sales_invoice.test_sales_invoice import create_sales_invoice
from erpnext.accounts.doctype.purchase_invoice.test_purchase_invoice import make_purchase_invoice
from erpnext.stock.doctype.item.test_item import make_item
import json
class TestGSTR3BReport(unittest.TestCase):
def test_gstr_3b_report(self):
month_number_mapping = {
1: "January",
2: "February",
3: "March",
4: "April",
5: "May",
6: "June",
7: "July",
8: "August",
9: "September",
10: "October",
11: "November",
12: "December"
}
frappe.set_user("Administrator")
frappe.db.sql("delete from `tabSales Invoice` where company='_Test Company GST'")
frappe.db.sql("delete from `tabPurchase Invoice` where company='_Test Company GST'")
frappe.db.sql("delete from `tabGSTR 3B Report` where company='_Test Company GST'")
make_company()
make_item("Milk", properties = {"is_nil_exempt": 1, "standard_rate": 0.000000})
set_account_heads()
make_customers()
make_suppliers()
make_sales_invoice()
create_purchase_invoices()
if frappe.db.exists("GSTR 3B Report", "GSTR3B-March-2019-_Test Address-Billing"):
report = frappe.get_doc("GSTR 3B Report", "GSTR3B-March-2019-_Test Address-Billing")
report.save()
else:
report = frappe.get_doc({
"doctype": "GSTR 3B Report",
"company": "_Test Company GST",
"company_address": "_Test Address-Billing",
"year": getdate().year,
"month": month_number_mapping.get(getdate().month)
}).insert()
output = json.loads(report.json_output)
self.assertEqual(output["sup_details"]["osup_det"]["iamt"], 18),
self.assertEqual(output["sup_details"]["osup_zero"]["iamt"], 18),
self.assertEqual(output["inter_sup"]["unreg_details"][0]["iamt"], 18),
self.assertEqual(output["sup_details"]["osup_nil_exmp"]["txval"], 100),
self.assertEqual(output["inward_sup"]["isup_details"][0]["inter"], 250)
self.assertEqual(output["itc_elg"]["itc_avl"][4]["iamt"], 45)
def make_sales_invoice():
si = create_sales_invoice(company="_Test Company GST",
customer = '_Test GST Customer',
currency = 'INR',
warehouse = 'Finished Goods - _GST',
debit_to = 'Debtors - _GST',
income_account = 'Sales - _GST',
expense_account = 'Cost of Goods Sold - _GST',
cost_center = 'Main - _GST',
do_not_save=1
)
si.append("taxes", {
"charge_type": "On Net Total",
"account_head": "IGST - _GST",
"cost_center": "Main - _GST",
"description": "IGST @ 18.0",
"rate": 18
})
si.submit()
si1 = create_sales_invoice(company="_Test Company GST",
customer = '_Test GST SEZ Customer',
currency = 'INR',
warehouse = 'Finished Goods - _GST',
debit_to = 'Debtors - _GST',
income_account = 'Sales - _GST',
expense_account = 'Cost of Goods Sold - _GST',
cost_center = 'Main - _GST',
do_not_save=1
)
si1.append("taxes", {
"charge_type": "On Net Total",
"account_head": "IGST - _GST",<|fim▁hole|> })
si1.submit()
si2 = create_sales_invoice(company="_Test Company GST",
customer = '_Test Unregistered Customer',
currency = 'INR',
warehouse = 'Finished Goods - _GST',
debit_to = 'Debtors - _GST',
income_account = 'Sales - _GST',
expense_account = 'Cost of Goods Sold - _GST',
cost_center = 'Main - _GST',
do_not_save=1
)
si2.append("taxes", {
"charge_type": "On Net Total",
"account_head": "IGST - _GST",
"cost_center": "Main - _GST",
"description": "IGST @ 18.0",
"rate": 18
})
si2.submit()
si3 = create_sales_invoice(company="_Test Company GST",
customer = '_Test GST Customer',
currency = 'INR',
item = 'Milk',
warehouse = 'Finished Goods - _GST',
debit_to = 'Debtors - _GST',
income_account = 'Sales - _GST',
expense_account = 'Cost of Goods Sold - _GST',
cost_center = 'Main - _GST',
do_not_save=1
)
si3.submit()
def create_purchase_invoices():
pi = make_purchase_invoice(
company="_Test Company GST",
supplier = '_Test Registered Supplier',
currency = 'INR',
warehouse = 'Finished Goods - _GST',
cost_center = 'Main - _GST',
do_not_save=1,
)
pi.eligibility_for_itc = "All Other ITC"
pi.append("taxes", {
"charge_type": "On Net Total",
"account_head": "IGST - _GST",
"cost_center": "Main - _GST",
"description": "IGST @ 18.0",
"rate": 18
})
pi.submit()
pi1 = make_purchase_invoice(
company="_Test Company GST",
supplier = '_Test Registered Supplier',
currency = 'INR',
warehouse = 'Finished Goods - _GST',
cost_center = 'Main - _GST',
item = "Milk",
do_not_save=1
)
pi1.submit()
def make_suppliers():
if not frappe.db.exists("Supplier", "_Test Registered Supplier"):
frappe.get_doc({
"supplier_group": "_Test Supplier Group",
"supplier_name": "_Test Registered Supplier",
"gst_category": "Registered Regular",
"supplier_type": "Individual",
"doctype": "Supplier",
}).insert()
if not frappe.db.exists("Supplier", "_Test Unregistered Supplier"):
frappe.get_doc({
"supplier_group": "_Test Supplier Group",
"supplier_name": "_Test Unregistered Supplier",
"gst_category": "Unregistered",
"supplier_type": "Individual",
"doctype": "Supplier",
}).insert()
if not frappe.db.exists('Address', '_Test Supplier GST-1-Billing'):
address = frappe.get_doc({
"address_line1": "_Test Address Line 1",
"address_title": "_Test Supplier GST-1",
"address_type": "Billing",
"city": "_Test City",
"state": "Test State",
"country": "India",
"doctype": "Address",
"is_primary_address": 1,
"phone": "+91 0000000000",
"gstin": "29AACCV0498C1Z9",
"gst_state": "Karnataka",
}).insert()
address.append("links", {
"link_doctype": "Supplier",
"link_name": "_Test Registered Supplier"
})
address.save()
if not frappe.db.exists('Address', '_Test Supplier GST-2-Billing'):
address = frappe.get_doc({
"address_line1": "_Test Address Line 1",
"address_title": "_Test Supplier GST-2",
"address_type": "Billing",
"city": "_Test City",
"state": "Test State",
"country": "India",
"doctype": "Address",
"is_primary_address": 1,
"phone": "+91 0000000000",
"gst_state": "Karnataka",
}).insert()
address.append("links", {
"link_doctype": "Supplier",
"link_name": "_Test Unregistered Supplier"
})
address.save()
def make_customers():
if not frappe.db.exists("Customer", "_Test GST Customer"):
frappe.get_doc({
"customer_group": "_Test Customer Group",
"customer_name": "_Test GST Customer",
"gst_category": "Registered Regular",
"customer_type": "Individual",
"doctype": "Customer",
"territory": "_Test Territory"
}).insert()
if not frappe.db.exists("Customer", "_Test GST SEZ Customer"):
frappe.get_doc({
"customer_group": "_Test Customer Group",
"customer_name": "_Test GST SEZ Customer",
"gst_category": "SEZ",
"customer_type": "Individual",
"doctype": "Customer",
"territory": "_Test Territory"
}).insert()
if not frappe.db.exists("Customer", "_Test Unregistered Customer"):
frappe.get_doc({
"customer_group": "_Test Customer Group",
"customer_name": "_Test Unregistered Customer",
"gst_category": "Unregistered",
"customer_type": "Individual",
"doctype": "Customer",
"territory": "_Test Territory"
}).insert()
if not frappe.db.exists('Address', '_Test GST-1-Billing'):
address = frappe.get_doc({
"address_line1": "_Test Address Line 1",
"address_title": "_Test GST-1",
"address_type": "Billing",
"city": "_Test City",
"state": "Test State",
"country": "India",
"doctype": "Address",
"is_primary_address": 1,
"phone": "+91 0000000000",
"gstin": "29AZWPS7135H1ZG",
"gst_state": "Karnataka",
"gst_state_number": "29"
}).insert()
address.append("links", {
"link_doctype": "Customer",
"link_name": "_Test GST Customer"
})
address.save()
if not frappe.db.exists('Address', '_Test GST-2-Billing'):
address = frappe.get_doc({
"address_line1": "_Test Address Line 1",
"address_title": "_Test GST-2",
"address_type": "Billing",
"city": "_Test City",
"state": "Test State",
"country": "India",
"doctype": "Address",
"is_primary_address": 1,
"phone": "+91 0000000000",
"gst_state": "Haryana",
}).insert()
address.append("links", {
"link_doctype": "Customer",
"link_name": "_Test Unregistered Customer"
})
address.save()
if not frappe.db.exists('Address', '_Test GST-3-Billing'):
address = frappe.get_doc({
"address_line1": "_Test Address Line 1",
"address_title": "_Test GST-3",
"address_type": "Billing",
"city": "_Test City",
"state": "Test State",
"country": "India",
"doctype": "Address",
"is_primary_address": 1,
"phone": "+91 0000000000",
"gst_state": "Gujarat",
}).insert()
address.append("links", {
"link_doctype": "Customer",
"link_name": "_Test GST SEZ Customer"
})
address.save()
def make_company():
if frappe.db.exists("Company", "_Test Company GST"):
return
company = frappe.new_doc("Company")
company.company_name = "_Test Company GST"
company.abbr = "_GST"
company.default_currency = "INR"
company.country = "India"
company.insert()
if not frappe.db.exists('Address', '_Test Address-Billing'):
address = frappe.get_doc({
"address_line1": "_Test Address Line 1",
"address_title": "_Test Address",
"address_type": "Billing",
"city": "_Test City",
"state": "Test State",
"country": "India",
"doctype": "Address",
"is_primary_address": 1,
"phone": "+91 0000000000",
"gstin": "27AAECE4835E1ZR",
"gst_state": "Maharashtra",
"gst_state_number": "27"
}).insert()
address.append("links", {
"link_doctype": "Company",
"link_name": "_Test Company GST"
})
address.save()
def set_account_heads():
gst_settings = frappe.get_doc("GST Settings")
gst_account = frappe.get_all(
"GST Account",
fields=["cgst_account", "sgst_account", "igst_account"],
filters = {"company": "_Test Company GST"})
if not gst_account:
gst_settings.append("gst_accounts", {
"company": "_Test Company GST",
"cgst_account": "CGST - _GST",
"sgst_account": "SGST - _GST",
"igst_account": "IGST - _GST",
})
gst_settings.save()<|fim▁end|> | "cost_center": "Main - _GST",
"description": "IGST @ 18.0",
"rate": 18 |
<|file_name|>test_livemigrationops.py<|end_file_name|><|fim▁begin|># Copyright 2014 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from os_win import exceptions as os_win_exc
from oslo_config import cfg
from jacket.tests.compute.unit import fake_instance
from jacket.tests.compute.unit.virt.hyperv import test_base
from jacket.compute.virt.hyperv import livemigrationops
CONF = cfg.CONF
class LiveMigrationOpsTestCase(test_base.HyperVBaseTestCase):
"""Unit tests for the Hyper-V LiveMigrationOps class."""
def setUp(self):
super(LiveMigrationOpsTestCase, self).setUp()
self.context = 'fake_context'
self._livemigrops = livemigrationops.LiveMigrationOps()
self._livemigrops._livemigrutils = mock.MagicMock()
self._livemigrops._pathutils = mock.MagicMock()
@mock.patch('compute.virt.hyperv.vmops.VMOps.copy_vm_console_logs')
@mock.patch('compute.virt.hyperv.vmops.VMOps.copy_vm_dvd_disks')
def _test_live_migration(self, mock_get_vm_dvd_paths,
mock_copy_logs, side_effect):
mock_instance = fake_instance.fake_instance_obj(self.context)
mock_post = mock.MagicMock()
mock_recover = mock.MagicMock()
fake_dest = mock.sentinel.DESTINATION
self._livemigrops._livemigrutils.live_migrate_vm.side_effect = [
side_effect]
if side_effect is os_win_exc.HyperVException:
self.assertRaises(os_win_exc.HyperVException,
self._livemigrops.live_migration,
self.context, mock_instance, fake_dest,
mock_post, mock_recover, False, None)
mock_recover.assert_called_once_with(self.context, mock_instance,
fake_dest, False)
else:
self._livemigrops.live_migration(context=self.context,
instance_ref=mock_instance,
dest=fake_dest,
post_method=mock_post,
recover_method=mock_recover)
mock_copy_logs.assert_called_once_with(mock_instance.name,
fake_dest)
mock_live_migr = self._livemigrops._livemigrutils.live_migrate_vm
mock_live_migr.assert_called_once_with(mock_instance.name,
fake_dest)
mock_post.assert_called_once_with(self.context, mock_instance,
fake_dest, False)
def test_live_migration(self):
self._test_live_migration(side_effect=None)
def test_live_migration_exception(self):
self._test_live_migration(side_effect=os_win_exc.HyperVException)
@mock.patch('compute.virt.hyperv.volumeops.VolumeOps'
'.ebs_root_in_block_devices')
@mock.patch('compute.virt.hyperv.imagecache.ImageCache.get_cached_image')
@mock.patch('compute.virt.hyperv.volumeops.VolumeOps'
'.initialize_volumes_connection')
def test_pre_live_migration(self, mock_initialize_connection,
mock_get_cached_image,
mock_ebs_root_in_block_devices):
mock_instance = fake_instance.fake_instance_obj(self.context)
mock_instance.image_ref = "fake_image_ref"
mock_ebs_root_in_block_devices.return_value = None
CONF.set_override('use_cow_images', True)
self._livemigrops.pre_live_migration(
self.context, mock_instance,
block_device_info=mock.sentinel.BLOCK_INFO,
network_info=mock.sentinel.NET_INFO)
check_config = (
self._livemigrops._livemigrutils.check_live_migration_config)
check_config.assert_called_once_with()
mock_ebs_root_in_block_devices.assert_called_once_with(
mock.sentinel.BLOCK_INFO)
mock_get_cached_image.assert_called_once_with(self.context,
mock_instance)
mock_initialize_connection.assert_called_once_with(
mock.sentinel.BLOCK_INFO)
@mock.patch('compute.virt.hyperv.volumeops.VolumeOps.disconnect_volumes')
def test_post_live_migration(self, mock_disconnect_volumes):
self._livemigrops.post_live_migration(
self.context, mock.sentinel.instance,
mock.sentinel.block_device_info)
mock_disconnect_volumes.assert_called_once_with(
mock.sentinel.block_device_info)
self._livemigrops._pathutils.get_instance_dir.assert_called_once_with(
mock.sentinel.instance.name, create_dir=False, remove_dir=True)
<|fim▁hole|> @mock.patch('compute.virt.hyperv.vmops.VMOps.log_vm_serial_output')
def test_post_live_migration_at_destination(self, mock_log_vm):
mock_instance = fake_instance.fake_instance_obj(self.context)
self._livemigrops.post_live_migration_at_destination(
self.context, mock_instance, network_info=mock.sentinel.NET_INFO,
block_migration=mock.sentinel.BLOCK_INFO)
mock_log_vm.assert_called_once_with(mock_instance.name,
mock_instance.uuid)<|fim▁end|> | |
<|file_name|>infohash.rs<|end_file_name|><|fim▁begin|>/*
* Copyright (C) 2014-2020 Savoir-faire Linux Inc.
* Author: Sébastien Blin <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
use crate::ffi::*;
use std::ffi::CStr;
use std::ffi::CString;
use std::fmt;
pub use crate::ffi::InfoHash;<|fim▁hole|> InfoHash {
d: [0; 20]
}
}
pub fn random() -> InfoHash {
let mut h = InfoHash::new();
unsafe {
dht_infohash_random(&mut h);
}
h
}
pub fn get(data: &str) -> InfoHash {
let mut h = InfoHash::new();
unsafe {
let c_str = CString::new(data).unwrap();
dht_infohash_get(&mut h, c_str.as_ptr() as *mut u8, data.len());
}
h
}
pub fn from_bytes(data: &Vec<u8>) -> InfoHash {
let mut h = InfoHash::new();
unsafe {
dht_infohash_get(&mut h, data.as_ptr() as *mut u8, data.len());
}
h
}
pub fn from_hex(data: &str) -> InfoHash {
let mut h = InfoHash::new();
unsafe {
let c_str = CString::new(data).unwrap();
dht_infohash_from_hex(&mut h, c_str.as_ptr());
}
h
}
pub fn is_zero(&self) -> bool {
unsafe {
dht_infohash_is_zero(self)
}
}
}
impl fmt::Display for InfoHash {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
unsafe {
let self_str = CStr::from_ptr(
dht_infohash_print(self)
).to_str().unwrap_or("");
write!(f, "{}", self_str)
}
}
}<|fim▁end|> |
impl InfoHash {
pub fn new() -> InfoHash { |
<|file_name|>dashboard.ts<|end_file_name|><|fim▁begin|>/**
* Created by Derwish ([email protected]) on 15.09.2017.
* License: http://www.gnu.org/licenses/gpl-3.0.txt
*/
import { Database } from "../database/database";
import { Container } from "../nodes/container";
import { Node } from "../nodes/node";
import { UiNode } from "../nodes/nodes/ui/ui-node";
import { DashboardServerSocket } from "./dashboard-server-socket";
export interface UiPanel {
name: string;
title: string;
icon: string;
// order: number;
subPanels: Array<UiSubpanel>;
}
export interface UiSubpanel {
title: string;
uiElements: Array<UiElement>;
}
export interface UiElement {
title: string;
type: string;
//link to node
cid: number;
id: number;
// state?: any;
}
export class Dashboard {
db: Database;
uiPanels: Array<UiPanel>;
socket: DashboardServerSocket;
constructor(socket: DashboardServerSocket) {
this.socket = socket;
this.uiPanels = [];
}
loadFromDatabase(db: Database) {
this.db = db;
db.getUiPanels((err, docs) => {
if (err) return console.log(err);
this.uiPanels = docs || [];
})
}
// onNodeCreated(node: UiNode) {
// if (!node.isDashboardNode)
// return;
// }
onNodeRemoved(node: UiNode) {
if (!node.isDashboardNode)
return;
this.removeElemetForNode(node);
this.removeEmptyPanels();
this.socket.io.emit("getUiPanel", this.getUiPanel(node.settings["ui-panel"].value));
}
onNodeChangePanelOrTitle(node: UiNode, newPanelName: string, newTitle: string) {
this.removeElemetForNode(node);
//add new element
var uiElemet: UiElement = {
title: newTitle,
type: node.uiElementType,
cid: node.container.id,
id: node.id,
// state: node.properties['state']
}
var newPanel = this.getUiPanel(newPanelName);
if (!newPanel) {
//add to new panel
newPanel = this.addUiPanel(newPanelName);
newPanel.subPanels[0].uiElements.push(uiElemet);
if (this.db)
this.db.addUiPanel(newPanel);
} else {
//update existing panel
newPanel.subPanels[0].uiElements.push(uiElemet);
if (this.db)
this.db.updateUiPanel(newPanel.name, { $set: { subPanels: newPanel.subPanels } })
}
this.removeEmptyPanels();
this.socket.io.emit("getUiPanelsList", this.getUiPanelsList())
this.socket.io.emit("getUiPanel", this.getUiPanel(newPanelName))
}
removeEmptyPanels() {
let changed = false;
for (var p = 0; p < this.uiPanels.length; p++) {
let panel = this.uiPanels[p];
if (panel.subPanels.every(s => s.uiElements.length == 0)) {
//reove panel
changed = true;
this.uiPanels = this.uiPanels.filter(pan => pan.name != panel.name);
if (this.db)
this.db.removeUiPanel(panel.name);
}
}
if (changed)
this.socket.io.emit("getUiPanelsList", this.getUiPanelsList());
}
removeElemetForNode(node: UiNode) {<|fim▁hole|> //remove old element
for (var s = 0; s < oldPanel.subPanels.length; s++) {
var subPanel = oldPanel.subPanels[s];
for (var e = 0; e < subPanel.uiElements.length; e++) {
var element = subPanel.uiElements[e];
if (element.cid == node.container.id && element.id == node.id) {
subPanel.uiElements.splice(e, 1);
if (this.db)
this.db.updateUiPanel(oldPanel.name, { $set: { subPanels: oldPanel.subPanels } })
// return;
}
}
}
}
}
// updateElementsStates() {
// this.uiPanels.forEach(p => {
// p.subPanels.forEach(s => {
// s.uiElements.forEach(e => {
// let container = Container.containers[e.cid];
// if (container) {
// let node = container._nodes[e.id];
// if (node) {
// e.state = node.properties['state'];
// }
// else console.log("Can't update dashboard element state. Node [ " + e.cid + "/" + e.id + "] is not found");
// }
// else console.log("Can't update dashboard element state. Node container [" + e.cid + "] is not found");
// })
// })
// });
// }
// updateElementStateForNode(node: UiNode) {
// let element = this.getUiElementForNode(node);
// if (element)
// element.state = node.properties['state'];
// else console.log("Can't update dashboard element state. Element for node " + node.getReadableId() + " is not found");
// }
getUiElementForNode(node: UiNode): UiElement {
var panel = this.getUiPanel(node.settings["ui-panel"].value);
if (!panel)
return;
for (var s = 0; s < panel.subPanels.length; s++) {
var subPanel = panel.subPanels[s];
for (var e = 0; e < subPanel.uiElements.length; e++) {
var element = subPanel.uiElements[e];
if (element.cid == node.container.id
&& element.id == node.id)
return element;
}
}
};
getUiPanelForNode(node: UiNode): UiPanel {
return this.uiPanels.find(p => p.name === node.settings["ui-panel"].value);
};
getUiPanel(name: string): UiPanel {
return this.uiPanels.find(p => p.name === name);
};
getUiPanels(): Array<UiPanel> {
return this.uiPanels;
};
getUiPanelsList(): Array<string> {
let arr = [];
this.uiPanels.forEach(p => {
arr.push({ name: p.name, title: p.title, icon: p.icon })
});
return arr;
};
addUiPanel(name: string, callback?: (err?: Error, doc?: UiPanel) => void): UiPanel {
var subPanel: UiSubpanel = {
title: "",
uiElements: []
}
var panel: UiPanel = {
name: name,
title: name,
icon: "label_outline",
subPanels: [subPanel]
};
this.uiPanels.push(panel);
return panel;
// return this.db.addUiPanel(panel, (err, doc) => {
// if (!err)
// this.uiPanels.push(panel);
// callback(err, doc);
// });
};
// updateUiPanel(name: string, update: any, callback?: (err?: Error) => void) {
// this.db.updateUiPanel(name, update, (err) => {
// if (!err) {
// callback(err);
// return;
// }
// this.db.getUiPanel(name, (err, doc) => {
// if (err) {
// callback(err);
// return;
// }
// this.uiPanels = this.uiPanels.filter(p => p.name != name);
// this.uiPanels.push(doc);
// })
// });
// };
// removeUiPanel(name: string, callback?: (err?: Error) => void) {
// this.uiPanels = this.uiPanels.filter(p => p.name != name);
// this.db.removeUiPanel(name, callback);
// };
// dropUiPanels(callback?: (err?: Error) => void) {
// this.uiPanels = [];
// this.db.dropUiPanels(callback);
// };
}<|fim▁end|> | var oldPanel = this.getUiPanel(node.settings["ui-panel"].value);
if (oldPanel) { |
<|file_name|>walletunlock.py<|end_file_name|><|fim▁begin|>from jsonrpc import ServiceProxy
access = ServiceProxy("http://127.0.0.1:5332")<|fim▁hole|><|fim▁end|> | pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60) |
<|file_name|>rollback_test.rs<|end_file_name|><|fim▁begin|>extern crate clementine;
use clementine::{Database, Data, Result, Error, ErrorKind, Config};
#[test]
fn test_rollback_update() {
let db = &Database::new(Config::default()).unwrap();
let update_ok_result = db.update(|txn| -> Result<()> {
txn.update("1", Data::Int(1));
Ok(())
});
assert!(update_ok_result.is_ok());
let update_fail_result = db.update(|txn| -> Result<()> {
txn.update("1", Data::Int(2));
Err(Error::new(ErrorKind::DataBaseClosed))
});
assert!(update_fail_result.is_ok());
let read_result = db.read(|txn| -> Result<()> {
assert_eq!(&Data::Int(1), txn.get("1").unwrap());
Ok(())
});
assert!(read_result.is_ok());
}
#[test]
fn test_rollback_remove() {
let db = &Database::new(Config::default()).unwrap();
let update_ok_result = db.update(|txn| -> Result<()> {
txn.update("1", Data::Int(1));
Ok(())
});
assert!(update_ok_result.is_ok());
let read_result = db.read(|txn| -> Result<()> {
assert_eq!(&Data::Int(1), txn.get("1").unwrap());
Ok(())
});
assert!(read_result.is_ok());
let update_fail_result = db.update(|txn| -> Result<()> {
txn.remove("1");
Err(Error::new(ErrorKind::DataBaseClosed))
});
assert!(update_fail_result.is_ok());
let read_rollback_result = db.read(|txn| -> Result<()> {
assert_eq!(&Data::Int(1), txn.get("1").unwrap());
Ok(())
});
assert!(read_rollback_result.is_ok());
}
#[test]
fn test_rollback_remove_all() {<|fim▁hole|> txn.update("2", Data::Int(2));
Ok(())
});
assert!(update_result.is_ok());
let update_fail_result = db.update(|txn| -> Result<()> {
txn.update("1", Data::Int(1));
txn.clear();
Err(Error::new(ErrorKind::DataBaseClosed))
});
assert!(update_fail_result.is_ok());
let read_rollback_result = db.read(|txn| -> Result<()> {
assert_eq!(&Data::Int(1), txn.get("1").unwrap());
assert_eq!(&Data::Int(2), txn.get("2").unwrap());
Ok(())
});
assert!(read_rollback_result.is_ok());
}<|fim▁end|> | let db = &Database::new(Config::default()).unwrap();
let update_result = db.update(|txn| -> Result<()> {
txn.update("1", Data::Int(1)); |
<|file_name|>Canvas.cpp<|end_file_name|><|fim▁begin|>/*=============================================================================
Copyright (c) 2012, Ludo Sapiens Inc. and contributors.
See accompanying file LICENSE.txt for details.
=============================================================================*/
#include <Fusion/Widget/Canvas.h>
#include <Fusion/VM/VMObjectPool.h>
#include <CGMath/CGMath.h>
#include <Base/ADT/StringMap.h>
/*==============================================================================
UNNAME NAMESPACE
==============================================================================*/
UNNAMESPACE_BEGIN
//------------------------------------------------------------------------------
//!
inline void
execute( const VMRef& ref, Widget* widget, const RCP<Widget>& item )
{
if( ref.isValid() )
{
VMState* vm = ref.vm();
VM::push( vm, ref );
VM::pushProxy( vm, widget );
VM::pushProxy( vm, item.ptr() );
VM::ecall( vm, 2, 0 );
}
}
//------------------------------------------------------------------------------
//!
int
getCanvasRectVM( VMState* vm )
{
Canvas* canvas = (Canvas*)VM::thisPtr( vm );
VM::push( vm, canvas->canvasRect() );
return 1;
}
//------------------------------------------------------------------------------
//!
enum {
ATTRIB_GET_CANVAS_RECT,
ATTRIB_OFFSET,
ATTRIB_ONMODIFY
};
StringMap _attributes(
"getCanvasRect", ATTRIB_GET_CANVAS_RECT,
"offset", ATTRIB_OFFSET,
"onModify", ATTRIB_ONMODIFY,
""
);
//------------------------------------------------------------------------------
//!
const char* _canvas_str_ = "canvas";
UNNAMESPACE_END
NAMESPACE_BEGIN
/*==============================================================================
CLASS Canvas
==============================================================================*/
//------------------------------------------------------------------------------
//!
void
Canvas::initialize()
{
VMObjectPool::registerObject( "UI", _canvas_str_, stdCreateVM<Canvas>, stdGetVM<Canvas>, stdSetVM<Canvas> );
}
//------------------------------------------------------------------------------
//!
Canvas::Canvas()
: WidgetContainer(),
_offset(0.0f,0.0f),
_canvasRectCached( false )
{}
//------------------------------------------------------------------------------
//!
Canvas::~Canvas()
{}
//------------------------------------------------------------------------------
//!
void
Canvas::render( const RCP<Gfx::RenderNode>& rn )
{
if( isScissored() )
{
// Render the widget container.
Widget::render( rn );
// Render the contained widgets.
const int* sc = rn->current()->addScissor( (int)_scPos.x, (int)_scPos.y, (int)_scSize.x, (int)_scSize.y );
Container::ConstIterator it = _widgets.begin();
Container::ConstIterator end = _widgets.end();
for( ; it != end; ++it )
{
if( !(*it)->hidden() ) (*it)->render( rn );
}
rn->current()->setScissor( sc[0], sc[1], sc[2], sc[3] );
}
else
{
WidgetContainer::render( rn );
}
}
//------------------------------------------------------------------------------
//!
bool
Canvas::isAttribute
( const char* name ) const
{
if( _attributes[ name ] != StringMap::INVALID ) return true;
return WidgetContainer::isAttribute( name );
}
//------------------------------------------------------------------------------
//!
Vec4f
Canvas::canvasRect() const
{
if( !_canvasRectCached )
{
Container::ConstIterator it = _widgets.begin();
Container::ConstIterator end = _widgets.end();
_canvasRect = Vec4f(
_offset.x,
_offset.y,
_offset.x + actualSize().x,
_offset.y + actualSize().y
);
for( ; it != end; ++it )
{
if( (*it)->hidden() ) continue;
CGM::clampMax( _canvasRect.x, (*it)->absPosition().x );
CGM::clampMax( _canvasRect.y, (*it)->absPosition().y );
CGM::clampMin( _canvasRect.z, (*it)->absPosition().x + (*it)->actualSize().x );
CGM::clampMin( _canvasRect.w, (*it)->absPosition().y + (*it)->actualSize().y );
}
_canvasRectCached = true;
}
return _canvasRect;
}
//------------------------------------------------------------------------------
//!
void
Canvas::offset( const Vec2f& val )
{
if( val != _offset )
{
_offset = val;
modified();
markForUpdate();
}
}
//------------------------------------------------------------------------------
//!
Vec2f
Canvas::performComputeBaseSize()
{
return Vec2f( 0.0f, 0.0f );
}
//------------------------------------------------------------------------------
//!
void
Canvas::performSetGeometry()
{
Container::ConstIterator it = _widgets.begin();
Container::ConstIterator end = _widgets.end();
for( ; it != end; ++it )
{
if( (*it)->hidden() ) continue;
(*it)->geometry(
globalPosition() - _offset,
(*it)->localPosition(),
(*it)->actualBaseSize()
);
}
// Ignores flexibility of child
_canvasRectCached = false;
canvasRect();
modified();
_scPos.x = globalPosition().x + border().x;
_scPos.y = globalPosition().y + border().y;<|fim▁hole|> _scSize.x = actualSize().x - border().x - border().z;
_scSize.y = actualSize().y - border().y - border().w;
}
//------------------------------------------------------------------------------
//!
void
Canvas::performSetPosition()
{
Container::ConstIterator it = _widgets.begin();
Container::ConstIterator end = _widgets.end();
for( ; it != end; ++it )
{
if( !(*it)->hidden() )
{
(*it)->position( globalPosition() - _offset, (*it)->localPosition() );
}
}
_scPos.x = globalPosition().x + border().x;
_scPos.y = globalPosition().y + border().y;
}
//------------------------------------------------------------------------------
//!
bool
Canvas::isScissored() const
{
Vec4f rect = canvasRect();
if( _offset.x > rect.x || _offset.y > rect.y ||
_offset.x + actualSize().x < rect.z ||
_offset.y + actualSize().y < rect.w )
{
return true;
}
return false;
}
//------------------------------------------------------------------------------
//!
void
Canvas::modified()
{
_onModify.exec( this );
execute( _onModifyRef, this, this );
}
//------------------------------------------------------------------------------
//!
const char*
Canvas::meta() const
{
return _canvas_str_;
}
//------------------------------------------------------------------------------
//!
void
Canvas::init( VMState* vm )
{
VM::get( vm, 1, "offset", _offset );
VM::get( vm, 1, "onModify", _onModifyRef );
// Base class init.
WidgetContainer::init( vm );
}
//------------------------------------------------------------------------------
//!
bool
Canvas::performGet( VMState* vm )
{
switch( _attributes[ VM::toCString( vm, 2 ) ] )
{
case ATTRIB_GET_CANVAS_RECT:
VM::push( vm, this, getCanvasRectVM );
return true;
case ATTRIB_OFFSET:
VM::push( vm, _offset );
return true;
case ATTRIB_ONMODIFY:
VM::push( vm, _onModifyRef );
return true;
default: break;
}
return WidgetContainer::performGet( vm );
}
//------------------------------------------------------------------------------
//!
bool
Canvas::performSet( VMState* vm )
{
switch( _attributes[ VM::toCString( vm, 2 ) ] )
{
case ATTRIB_GET_CANVAS_RECT:
return true; // read-only
case ATTRIB_OFFSET:
offset( VM::toVec2f( vm, 3 ) );
return true;
case ATTRIB_ONMODIFY:
VM::toRef( vm, 3, _onModifyRef );
return true;
default: break;
}
return WidgetContainer::performSet( vm );
}
NAMESPACE_END<|fim▁end|> | |
<|file_name|>ORMAP_LayersConfig.py<|end_file_name|><|fim▁begin|># ---------------------------------------------------------------------------
# OrmapLayersConfig.py
# Created by: Shad Campbell
# Date: 3/11/2011
# Updated by:
# Description: This is a configuration file to be customized by each county.
# Do not delete any of the items in this file. If they are not in use then
# specify thier value and/or definition query to "".
# ---------------------------------------------------------------------------
LOTSANNO_LAYER="LotsAnno"
LOTSANNO_QD="\"MapNumber\" = '*MapNumber*'OR \"MapNumber\" is NULL OR \"MapNumber\" = ''"
PLATSANNO_LAYER="PlatsAnno"
PLATSANNO_QD="\"MapNumber\" = '*MapNumber*' OR \"MapNumber\" is NULL OR \"MapNumber\" = ''"
TAXCODEANNO_LAYER="TaxCodeAnno"
TAXCODEANNO_QD="\"MapNumber\" = '*MapNumber*' OR \"MapNumber\" is NULL OR \"MapNumber\" = ''"
TAXNUMANNO_LAYER="TaxlotNumberAnno"
TAXNUMANNO_QD="\"MapNumber\" = '*MapNumber*' OR \"MapNumber\" is NULL OR \"MapNumber\" = ''"
ACRESANNO_LAYER="TaxlotAcresAnno"
<|fim▁hole|>
ANNO20_LAYER="Anno0020scale"
ANNO20_QD="\"MapNumber\" = '*MapNumber*' OR \"MapNumber\" is NULL OR \"MapNumber\" = ''"
ANNO30_LAYER="Anno0030scale"
ANNO30_QD="\"MapNumber\" = '*MapNumber*' OR \"MapNumber\" is NULL OR \"MapNumber\" = ''"
ANNO40_LAYER="Anno0040scale"
ANNO40_QD="\"MapNumber\" = '*MapNumber*' OR \"MapNumber\" is NULL OR \"MapNumber\" = ''"
ANNO50_LAYER="Anno0050scale"
ANNO50_QD="\"MapNumber\" = '*MapNumber*' OR \"MapNumber\" is NULL OR \"MapNumber\" = ''"
ANNO60_LAYER="Anno0060scale"
ANNO60_QD="\"MapNumber\" = '*MapNumber*' OR \"MapNumber\" is NULL OR \"MapNumber\" = ''"
ANNO100_LAYER="Anno0100scale"
ANNO100_QD="\"MapNumber\" = '*MapNumber*' OR \"MapNumber\" is NULL OR \"MapNumber\" = ''"
ANNO200_LAYER="Anno0200scale"
ANNO200_QD="\"MapNumber\" = '*MapNumber*' OR \"MapNumber\" is NULL OR \"MapNumber\" = ''"
ANNO400_LAYER="Anno0400scale"
ANNO400_QD="\"MapNumber\" = '*MapNumber*' OR \"MapNumber\" is NULL OR \"MapNumber\" = ''"
ANNO800_LAYER="Anno0800scale"
ANNO800_QD="\"MapNumber\" = '*MapNumber*' OR \"MapNumber\" is NULL OR \"MapNumber\" = ''"
ANNO2000_LAYER="Anno2000scale"
ANNO2000_QD="\"MapNumber\"='*MapNumber*' OR \"MapNumber\" is NULL OR \"MapNumber\" = ''"
CORNER_ABOVE_LAYER="Corner"
CORNER_ABOVE_QD="\"MapNumber\"='*MapNumber*'"
TAXCODELINES_ABOVE_LAYER="TaxCodeLines - Above"
TAXCODELINES_ABOVE_QD=""
TAXLOTLINES_ABOVE_LAYER="TaxlotLines - Above"
TAXLOTLINES_ABOVE_QD="\"LineType\" <> 32"
REFLINES_ABOVE_LAYER="ReferenceLines - Above"
REFLINES_ABOVE_QD="\"MAPNUMBER\" = '*MapNumber*'"
CARTOLINES_ABOVE_LAYER="CartographicLines - Above"
CARTOLINES_ABOVE_QD=""
WATERLINES_ABOVE_LAYER="WaterLines - Above"
WATERLINES_ABOVE_QD=""
WATER_ABOVE_LAYER="Water - Above"
WATER_ABOVE_QD=""
MAPINDEXSEEMAP_LAYER=""
MAPINDEXSEEMAP_QD=""
MAPINDEX_LAYER="SeeMaps"
MAPINDEX_QD="\"IndexMap\" = '*MapNumber*'"
CORNER_BELOW_LAYER="Corner - Below"
CORNER_BELOW_QD=""
TAXCODELINES_BELOW_LAYER="TaxCodeLines - Below"
TAXCODELINES_BELOW_QD=""
TAXLOTLINES_BELOW_LAYER="TaxlotLines - Below"
TAXLOTLINES_BELOW_QD=""
REFLINES_BELOW_LAYER="ReferenceLines - Below"
REFLINES_BELOW_QD=""
CARTOLINES_BELOW_LAYER="CartographicLines - Below"
CARTOLINES_BELOW_QD=""
WATERLINES_BELOW_LAYER="WaterLines - Below"
WATERLINES_BELOW_QD=""
WATER_BELOW_LAYER="Water - Below"
WATER_BELOW_QD=""
PAGELAYOUT_TABLE="giscarto.CREATOR_ASR.PAGELAYOUTELEMENTS"
CANCELLEDNUMBERS_TABLE="giscarto.CREATOR_ASR.CANCELLEDNUMBERS"
CUSTOMDEFINITIONQUERIES_TABLE="CustomDefinitionQueries"
EXTRA1_LAYER="Arrow0010scale"
EXTRA1_QD="\"MapNumber\"='*MapNumber*' OR \"MapNumber\" is NULL OR \"MapNumber\" = ''"
EXTRA2_LAYER="Arrow0020scale"
EXTRA2_QD="\"MapNumber\"='*MapNumber*' OR \"MapNumber\" is NULL OR \"MapNumber\" = ''"
EXTRA3_LAYER="Arrow0030scale"
EXTRA3_QD="\"MapNumber\"='*MapNumber*' OR \"MapNumber\" is NULL OR \"MapNumber\" = ''"
EXTRA4_LAYER="Arrow0040scale"
EXTRA4_QD="\"MapNumber\"='*MapNumber*' OR \"MapNumber\" is NULL OR \"MapNumber\" = ''"
EXTRA5_LAYER="Arrow0050scale"
EXTRA5_QD="\"MapNumber\"='*MapNumber*' OR \"MapNumber\" is NULL OR \"MapNumber\" = ''"
EXTRA6_LAYER="Arrow0100scale"
EXTRA6_QD="\"MapNumber\"='*MapNumber*' OR \"MapNumber\" is NULL OR \"MapNumber\" = ''"
EXTRA7_LAYER="Arrow0200scale"
EXTRA7_QD="\"MapNumber\"='*MapNumber*' OR \"MapNumber\" is NULL OR \"MapNumber\" = ''"
EXTRA8_LAYER="Arrow0400scale"
EXTRA8_QD="\"MapNumber\"='*MapNumber*' OR \"MapNumber\" is NULL OR \"MapNumber\" = ''"
EXTRA9_LAYER="Arrow2000scale"
EXTRA9_QD="\"MapNumber\"='*MapNumber*' OR \"MapNumber\" is NULL OR \"MapNumber\" = ''"
EXTRA10_LAYER="MapSecLines - Below"
EXTRA10_QD="\"MapNumber\"='*MapNumber*'"
EXTRA11_LAYER="Railroad"
EXTRA11_QD="CL <> 'Y'"
EXTRA12_LAYER="MapArea"
EXTRA12_QD="\"MapNumber\"='*MapNumber*'"
EXTRA13_LAYER=""
EXTRA13_QD=""
EXTRA14_LAYER="Taxlots - Above"
EXTRA14_QD="\"MapNumber\"='*MapNumber*'"
EXTRA15_LAYER="Arrow0060scale"
EXTRA15_QD="\"MapNumber\"='*MapNumber*' OR \"MapNumber\" is NULL OR \"MapNumber\" = ''"
EXTRA16_LAYER="Landmarks"
EXTRA16_QD="\"MapNumber\"='*MapNumber*' OR \"MapNumber\" is NULL OR \"MapNumber\" = ''"
EXTRA17_LAYER=""
EXTRA17_QD=""
EXTRA18_LAYER=""
EXTRA18_QD=""
EXTRA19_LAYER=""
EXTRA19_QD=""
EXTRA20_LAYER=""
EXTRA20_QD=""<|fim▁end|> | ACRESANNO_QD="\"MapNumber\" = '*MapNumber*' OR \"MapNumber\" is NULL OR \"MapNumber\" = ''"
ANNO10_LAYER="Anno0010scale"
ANNO10_QD="\"MapNumber\" = '*MapNumber*' OR \"MapNumber\" is NULL OR \"MapNumber\" = ''"
|
<|file_name|>UploaderLauncher.java<|end_file_name|><|fim▁begin|>/*
* Copyright 2017-present Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License. You may obtain
* a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package com.facebook.buck.util.trace.uploader.launcher;
import com.facebook.buck.core.model.BuildId;
import com.facebook.buck.log.Logger;
import com.facebook.buck.util.env.BuckClasspath;
import com.facebook.buck.util.trace.uploader.types.CompressionType;
import com.google.common.base.Strings;
import java.io.IOException;<|fim▁hole|>import java.net.URI;
import java.nio.file.Path;
/** Utility to upload chrome trace in background. */
public class UploaderLauncher {
private static final Logger LOG = Logger.get(UploaderLauncher.class);
/** Upload chrome trace in background process which runs even after current process dies. */
public static void uploadInBackground(
BuildId buildId,
Path traceFilePath,
String traceFileKind,
URI traceUploadUri,
Path logFile,
CompressionType compressionType) {
LOG.debug("Uploading build trace in the background. Upload will log to %s", logFile);
String buckClasspath = BuckClasspath.getBuckClasspathFromEnvVarOrNull();
if (Strings.isNullOrEmpty(buckClasspath)) {
LOG.error(
BuckClasspath.ENV_VAR_NAME + " env var is not set. Will not upload the trace file.");
return;
}
try {
String[] args = {
"java",
"-cp",
buckClasspath,
"com.facebook.buck.util.trace.uploader.Main",
"--buildId",
buildId.toString(),
"--traceFilePath",
traceFilePath.toString(),
"--traceFileKind",
traceFileKind,
"--baseUrl",
traceUploadUri.toString(),
"--log",
logFile.toString(),
"--compressionType",
compressionType.name(),
};
Runtime.getRuntime().exec(args);
} catch (IOException e) {
LOG.error(e, e.getMessage());
}
}
}<|fim▁end|> | |
<|file_name|>game-id.ts<|end_file_name|><|fim▁begin|>export class GameId {<|fim▁hole|> public game_id: string) {
}
}<|fim▁end|> | constructor( |
<|file_name|>English.ts<|end_file_name|><|fim▁begin|><?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE TS>
<TS version="2.0" language="en" sourcelanguage="it_IT">
<context>
<name>QObject</name>
<message>
<source>La versione del software installato è: </source>
<translation>This version installed is: </translation>
</message>
<message>
<source>USA LA LINEA DI COMANDO: </source>
<translation>USAGE COMMAND LINE : </translation>
</message>
<message>
<source>VISUALIZZA
</source>
<translation>DISPLAYS
</translation>
</message>
<message>
<source>-p or --package </source>
<translation>-p or --package </translation>
</message>
<message>
<source>Selezione del pacchetto da scaricare
</source>
<translation>Download package to select
</translation>
</message>
<message>
<source>-u or --url </source>
<translation>-u or --url </translation>
</message>
<message>
<source>Selezione dell'indirizzo internet:
ESEMPIO: http://
</source>
<translation>Selectiing to url:
EXAMPLE: http://
</translation>
</message>
<message>
<source>VISUALIZZA LA VERSIONE INSTALLATA DEL SOFTWARE:
</source>
<translation>DISPLAYS THE SOFTWARE VERSION:
</translation>
</message>
<message>
<source>-v or --version </source>
<translation>-v or --version </translation>
</message>
<message>
<source>Versione del software
</source>
<translation>Software version
</translation>
</message>
<message>
<source>VISUALIZZA LE INFORMAZIONI DEL PROGRAMMA:
</source>
<translation>DISPLAYS THE SOFTWARE INFORMATION :
</translation>
</message>
<message>
<source>-h or --help </source>
<translation>-h or --help </translation>
</message>
<message>
<source>Informazioni del software.</source>
<translation>software information.</translation>
</message>
<message>
<source>Comando non trovato: </source>
<translation>Command not found: </translation>
</message>
</context>
<context>
<name>update</name>
<message>
<source>Dialog</source>
<translation></translation>
</message>
<message>
<source><html><head/><body><p>Annulla download</p></body></html></source>
<translation><html><head/><body><p>Clear download</p></body></html></translation>
</message>
<message>
<source>Installazione</source>
<translation>Installation</translation>
</message>
<message>
<source><html><head/><body><p>Download aggiornamento</p></body></html></source>
<translation><html><head/><body><p>Download update</p></body></html></translation>
</message>
<message>
<source><html><head/><body><p>Installa aggiornamento</p></body></html></source><|fim▁hole|> <source>Installa aggiornamento</source>
<translation>Install update</translation>
</message>
<message>
<source>Downaload aggiornamento</source>
<translation>Download update</translation>
</message>
<message>
<source>Gestore aggiornamento</source>
<translation>Update management</translation>
</message>
<message>
<source>Scaricamento in corso di: </source>
<translation>Download in progress: </translation>
</message>
<message>
<source>Velocità di scaricamento: </source>
<translation>Fast download: </translation>
</message>
<message>
<source> Dimensione: </source>
<translation> Size: </translation>
</message>
<message>
<source> Tempo stimato: </source>
<translation> Elapsed time: </translation>
</message>
<message>
<source>Download fallito: </source>
<translation>Download failed: </translation>
</message>
<message>
<source>Scaricamento completato</source>
<translation>Download completed</translation>
</message>
<message>
<source>Scaricamento annullato</source>
<translation>Clear download</translation>
</message>
<message>
<source>Errore scaricamento</source>
<translation>Download error</translation>
</message>
<message>
<source>Download fallito </source>
<translation>Download failed</translation>
</message>
</context>
</TS><|fim▁end|> | <translation><html><head/><body><p>Install update</p></body></html></translation>
</message>
<message> |
<|file_name|>rpc_misc.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3
# Copyright (c) 2019-2021 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test RPC misc output."""
import xml.etree.ElementTree as ET
from test_framework.test_framework import SyscoinTestFramework
from test_framework.util import (
assert_raises_rpc_error,
assert_equal,
assert_greater_than,
assert_greater_than_or_equal,
)
from test_framework.authproxy import JSONRPCException
class RpcMiscTest(SyscoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.supports_cli = False
def run_test(self):
node = self.nodes[0]
self.log.info("test CHECK_NONFATAL")
assert_raises_rpc_error(
-1,
'Internal bug detected: \'request.params[9].get_str() != "trigger_internal_bug"\'',
lambda: node.echo(arg9='trigger_internal_bug'),
)
self.log.info("test getmemoryinfo")
memory = node.getmemoryinfo()['locked']
assert_greater_than(memory['used'], 0)
assert_greater_than(memory['free'], 0)
assert_greater_than(memory['total'], 0)
# assert_greater_than_or_equal() for locked in case locking pages failed at some point
assert_greater_than_or_equal(memory['locked'], 0)
assert_greater_than(memory['chunks_used'], 0)
assert_greater_than(memory['chunks_free'], 0)
assert_equal(memory['used'] + memory['free'], memory['total'])
self.log.info("test mallocinfo")
try:
mallocinfo = node.getmemoryinfo(mode="mallocinfo")
self.log.info('getmemoryinfo(mode="mallocinfo") call succeeded')
tree = ET.fromstring(mallocinfo)
assert_equal(tree.tag, 'malloc')
except JSONRPCException:
self.log.info('getmemoryinfo(mode="mallocinfo") not available')
assert_raises_rpc_error(-8, 'mallocinfo is only available when compiled with glibc 2.10+', node.getmemoryinfo, mode="mallocinfo")
assert_raises_rpc_error(-8, "unknown mode foobar", node.getmemoryinfo, mode="foobar")
self.log.info("test logging rpc and help")
# SYSCOIN Test logging RPC returns the expected number of logging categories.
assert_equal(len(node.logging()), 36)
# Test toggling a logging category on/off/on with the logging RPC.
assert_equal(node.logging()['qt'], True)
node.logging(exclude=['qt'])
assert_equal(node.logging()['qt'], False)
node.logging(include=['qt'])
assert_equal(node.logging()['qt'], True)
# Test logging RPC returns the logging categories in alphabetical order.
sorted_logging_categories = sorted(node.logging())
assert_equal(list(node.logging()), sorted_logging_categories)
# Test logging help returns the logging categories string in alphabetical order.
categories = ', '.join(sorted_logging_categories)
logging_help = self.nodes[0].help('logging')
assert f"valid logging categories are: {categories}" in logging_help
self.log.info("test echoipc (testing spawned process in multiprocess build)")
assert_equal(node.echoipc("hello"), "hello")
self.log.info("test getindexinfo")
# Without any indices running the RPC returns an empty object
assert_equal(node.getindexinfo(), {})
# Restart the node with indices and wait for them to sync
self.restart_node(0, ["-txindex", "-blockfilterindex", "-coinstatsindex"])
self.wait_until(lambda: all(i["synced"] for i in node.getindexinfo().values()))<|fim▁hole|> node.getindexinfo(),
{
"txindex": values,
"basic block filter index": values,
"coinstatsindex": values,
}
)
# Specifying an index by name returns only the status of that index
for i in {"txindex", "basic block filter index", "coinstatsindex"}:
assert_equal(node.getindexinfo(i), {i: values})
# Specifying an unknown index name returns an empty result
assert_equal(node.getindexinfo("foo"), {})
if __name__ == '__main__':
RpcMiscTest().main()<|fim▁end|> |
# Returns a list of all running indices by default
values = {"synced": True, "best_block_height": 200}
assert_equal( |
<|file_name|>displayquerygrid.js<|end_file_name|><|fim▁begin|>goog.provide('gmf.DisplayquerygridController');
goog.provide('gmf.displayquerygridDirective');
goog.require('gmf');
goog.require('ngeo.CsvDownload');
goog.require('ngeo.GridConfig');
/** @suppress {extraRequire} */
goog.require('ngeo.gridDirective');
goog.require('ngeo.FeatureOverlay');
goog.require('ngeo.FeatureOverlayMgr');
goog.require('ol.Collection');
goog.require('ol.style.Circle');
goog.require('ol.style.Fill');
goog.require('ol.style.Stroke');
goog.require('ol.style.Style');
ngeo.module.value('gmfDisplayquerygridTemplateUrl',
/**
* @param {angular.JQLite} element Element.
* @param {angular.Attributes} attrs Attributes.
* @return {string} Template.
*/
function(element, attrs) {
var templateUrl = attrs['gmfDisplayquerygridTemplateurl'];
return templateUrl !== undefined ? templateUrl :
gmf.baseTemplateUrl + '/displayquerygrid.html';
});
/**
* Provides a directive to display results of the {@link ngeo.queryResult} in a
* grid and shows related features on the map using
* the {@link ngeo.FeatureOverlayMgr}.
*
* You can override the default directive's template by setting the
* value `gmfDisplayquerygridTemplateUrl`.
*
* Features displayed on the map use a default style but you can override these
* styles by passing ol.style.Style objects as attributes of this directive.
*
* Example:
*
* <gmf-displayquerygrid
* gmf-displayquerygrid-map="ctrl.map"
* gmf-displayquerygrid-featuresstyle="ctrl.styleForAllFeatures"
* gmf-displayquerygrid-selectedfeaturestyle="ctrl.styleForTheCurrentFeature">
* </gmf-displayquerygrid>
*
* @htmlAttribute {boolean} gmf-displayquerygrid-active The active state of the component.
* @htmlAttribute {ol.style.Style} gmf-displayquerygrid-featuresstyle A style
* object for all features from the result of the query.
* @htmlAttribute {ol.style.Style} gmf-displayquerygrid-selectedfeaturestyle A style
* object for the currently selected features.
* @htmlAttribute {ol.Map} gmf-displayquerygrid-map The map.
* @htmlAttribute {boolean?} gmf-displayquerygrid-removeemptycolumns Optional. Should
* empty columns be hidden? Default: `false`.
* @htmlAttribute {number?} gmf-displayquerygrid-maxrecenterzoom Optional. Maximum
* zoom-level to use when zooming to selected features.
* @htmlAttribute {gmfx.GridMergeTabs?} gmf-displayquerygrid-gridmergetabas Optional.
* Configuration to merge grids with the same attributes into a single grid.
* @param {string} gmfDisplayquerygridTemplateUrl URL to a template.
* @return {angular.Directive} Directive Definition Object.
* @ngInject
* @ngdoc directive
* @ngname gmfDisplayquerygrid
*/
gmf.displayquerygridDirective = function(
gmfDisplayquerygridTemplateUrl) {
return {
bindToController: true,
controller: 'GmfDisplayquerygridController',
controllerAs: 'ctrl',
templateUrl: gmfDisplayquerygridTemplateUrl,
replace: true,
restrict: 'E',
scope: {
'active': '=gmfDisplayquerygridActive',
'featuresStyleFn': '&gmfDisplayquerygridFeaturesstyle',
'selectedFeatureStyleFn': '&gmfDisplayquerygridSourceselectedfeaturestyle',
'getMapFn': '&gmfDisplayquerygridMap',
'removeEmptyColumnsFn': '&?gmfDisplayquerygridRemoveemptycolumns',
'maxResultsFn': '&?gmfDisplayquerygridMaxresults',
'maxRecenterZoomFn': '&?gmfDisplayquerygridMaxrecenterzoom',
'mergeTabsFn': '&?gmfDisplayquerygridMergetabs'
}
};
};
gmf.module.directive('gmfDisplayquerygrid', gmf.displayquerygridDirective);
/**
* Controller for the query grid.
*
* @param {!angular.Scope} $scope Angular scope.
* @param {ngeox.QueryResult} ngeoQueryResult ngeo query result.
* @param {ngeo.FeatureOverlayMgr} ngeoFeatureOverlayMgr The ngeo feature
* overlay manager service.
* @param {angular.$timeout} $timeout Angular timeout service.
* @param {ngeo.CsvDownload} ngeoCsvDownload CSV download service.
* @param {ngeo.Query} ngeoQuery Query service.
* @param {angular.JQLite} $element Element.
* @constructor
* @export
* @ngInject
* @ngdoc Controller
* @ngname GmfDisplayquerygridController
*/
gmf.DisplayquerygridController = function($scope, ngeoQueryResult,
ngeoFeatureOverlayMgr, $timeout, ngeoCsvDownload, ngeoQuery, $element) {
/**
* @type {!angular.Scope}
* @private
*/
this.$scope_ = $scope;
/**
* @type {angular.$timeout}
* @private
*/
this.$timeout_ = $timeout;<|fim▁hole|> * @export
*/
this.ngeoQueryResult = ngeoQueryResult;
/**
* @type {ngeo.CsvDownload}
* @private
*/
this.ngeoCsvDownload_ = ngeoCsvDownload;
/**
* @type {angular.JQLite}
* @private
*/
this.$element_ = $element;
/**
* @type {number}
* @export
*/
this.maxResults = ngeoQuery.getLimit();
/**
* @type {boolean}
* @export
*/
this.active = false;
/**
* @type {boolean}
* @export
*/
this.pending = false;
/**
* @type {!Object.<string, gmfx.GridSource>}
* @export
*/
this.gridSources = {};
/**
* IDs of the grid sources in the order they were loaded.
* @type {Array.<string>}
* @export
*/
this.loadedGridSources = [];
/**
* The id of the currently shown query source.
* @type {string|number|null}
* @export
*/
this.selectedTab = null;
/**
* @type {boolean}
* @private
*/
this.removeEmptyColumns_ = this['removeEmptyColumnsFn'] ?
this['removeEmptyColumnsFn']() === true : false;
/**
* @type {number|undefined}
* @export
*/
this.maxRecenterZoom = this['maxRecenterZoomFn'] ? this['maxRecenterZoomFn']() : undefined;
var mergeTabs = this['mergeTabsFn'] ? this['mergeTabsFn']() : {};
/**
* @type {!gmfx.GridMergeTabs}
* @private
*/
this.mergeTabs_ = mergeTabs ? mergeTabs : {};
/**
* A mapping between row uid and the corresponding feature for each
* source.
* @type {!Object.<string, Object.<string, ol.Feature>>}
* @private
*/
this.featuresForSources_ = {};
// Styles for displayed features (features) and selected features
// (highlightFeatures_) (user can set both styles).
/**
* @type {ol.Collection}
* @private
*/
this.features_ = new ol.Collection();
var featuresOverlay = ngeoFeatureOverlayMgr.getFeatureOverlay();
var featuresStyle = this['featuresStyleFn']();
if (featuresStyle !== undefined) {
goog.asserts.assertInstanceof(featuresStyle, ol.style.Style);
featuresOverlay.setStyle(featuresStyle);
}
featuresOverlay.setFeatures(this.features_);
/**
* @type {ngeo.FeatureOverlay}
* @private
*/
this.highlightFeatureOverlay_ = ngeoFeatureOverlayMgr.getFeatureOverlay();
/**
* @type {ol.Collection}
* @private
*/
this.highlightFeatures_ = new ol.Collection();
this.highlightFeatureOverlay_.setFeatures(this.highlightFeatures_);
var highlightFeatureStyle = this['selectedFeatureStyleFn']();
if (highlightFeatureStyle !== undefined) {
goog.asserts.assertInstanceof(highlightFeatureStyle, ol.style.Style);
} else {
var fill = new ol.style.Fill({color: [255, 0, 0, 0.6]});
var stroke = new ol.style.Stroke({color: [255, 0, 0, 1], width: 2});
highlightFeatureStyle = new ol.style.Style({
fill: fill,
image: new ol.style.Circle({fill: fill, radius: 5, stroke: stroke}),
stroke: stroke,
zIndex: 10
});
}
this.highlightFeatureOverlay_.setStyle(highlightFeatureStyle);
var map = null;
var mapFn = this['getMapFn'];
if (mapFn) {
map = mapFn();
goog.asserts.assertInstanceof(map, ol.Map);
}
/**
* @type {ol.Map}
* @private
*/
this.map_ = map;
// Watch the ngeo query result service.
this.$scope_.$watchCollection(
function() {
return ngeoQueryResult;
},
function(newQueryResult, oldQueryResult) {
if (newQueryResult !== oldQueryResult) {
this.updateData_();
}
}.bind(this));
/**
* An unregister function returned from `$scope.$watchCollection` for
* "on-select" changes (when rows are selected/unselected).
* @type {?function()}
* @private
*/
this.unregisterSelectWatcher_ = null;
};
/**
* Returns a list of grid sources in the order they were loaded.
* @export
* @return {Array.<gmfx.GridSource>} Grid sources.
*/
gmf.DisplayquerygridController.prototype.getGridSources = function() {
return this.loadedGridSources.map(function(sourceId) {
return this.gridSources[sourceId];
}.bind(this));
};
/**
* @private
*/
gmf.DisplayquerygridController.prototype.updateData_ = function() {
// close if there are no results
if (this.ngeoQueryResult.total === 0 && !this.hasOneWithTooManyResults_()) {
var oldActive = this.active;
this.clear();
if (oldActive) {
// don't close if there are pending queries
this.active = this.ngeoQueryResult.pending;
this.pending = this.ngeoQueryResult.pending;
}
return;
}
this.active = true;
this.pending = false;
var sources = this.ngeoQueryResult.sources;
// merge sources if requested
if (Object.keys(this.mergeTabs_).length > 0) {
sources = this.getMergedSources_(sources);
}
// create grids (only for source with features or with too many results)
sources.forEach(function(source) {
if (source.tooManyResults) {
this.makeGrid_(null, source);
} else {
var features = source.features;
if (features.length > 0) {
this.collectData_(source);
}
}
}.bind(this));
if (this.loadedGridSources.length == 0) {
// if no grids were created, do not show
this.active = false;
return;
}
// keep the first existing navigation tab open
if (this.selectedTab === null || !(('' + this.selectedTab) in this.gridSources)) {
// selecting the tab is done in a timeout, because otherwise in rare cases
// `ng-class` might set the `active` class on multiple tabs.
this.$timeout_(function() {
var firstSourceId = this.loadedGridSources[0];
this.selectTab(this.gridSources[firstSourceId]);
this.reflowGrid_(firstSourceId);
}.bind(this), 0);
}
};
/**
* @private
* @return {boolean} If one of the source has too many results.
*/
gmf.DisplayquerygridController.prototype.hasOneWithTooManyResults_ = function() {
return this.ngeoQueryResult.sources.some(function(source) {
return source.tooManyResults;
});
};
/**
* Returns if the given grid source is selected?
* @export
* @param {gmfx.GridSource} gridSource Grid source.
* @return {boolean} Is selected?
*/
gmf.DisplayquerygridController.prototype.isSelected = function(gridSource) {
return this.selectedTab === gridSource.source.id;
};
/**
* Try to merge the mergable sources.
* @param {Array.<ngeox.QueryResultSource>} sources Sources.
* @return {Array.<ngeox.QueryResultSource>} The merged sources.
* @private
*/
gmf.DisplayquerygridController.prototype.getMergedSources_ = function(sources) {
var allSources = [];
/** @type {Object.<string, ngeox.QueryResultSource>} */
var mergedSources = {};
sources.forEach(function(source) {
// check if this source can be merged
var mergedSource = this.getMergedSource_(source, mergedSources);
if (mergedSource === null) {
// this source should not be merged, add as is
allSources.push(source);
}
}.bind(this));
for (var mergedSourceId in mergedSources) {
allSources.push(mergedSources[mergedSourceId]);
}
return allSources;
};
/**
* Check if the given source should be merged. If so, an artificial source
* that will contain the features of all mergable sources is returned. If not,
* `null` is returned.
* @param {ngeox.QueryResultSource} source Source.
* @param {Object.<string, ngeox.QueryResultSource>} mergedSources Merged sources.
* @return {?ngeox.QueryResultSource} A merged source of null if the source should
* not be merged.
* @private
*/
gmf.DisplayquerygridController.prototype.getMergedSource_ = function(source, mergedSources) {
var mergeSourceId = null;
for (var currentMergeSourceId in this.mergeTabs_) {
var sourceIds = this.mergeTabs_[currentMergeSourceId];
var containsSource = sourceIds.some(function(sourceId) {
return sourceId == source.id;
});
if (containsSource) {
mergeSourceId = currentMergeSourceId;
break;
}
}
if (mergeSourceId === null) {
// this source should not be merged
return null;
}
/** @type {ngeox.QueryResultSource} */
var mergeSource;
if (mergeSourceId in mergedSources) {
mergeSource = mergedSources[mergeSourceId];
} else {
mergeSource = {
features: [],
id: mergeSourceId,
label: mergeSourceId,
pending: false,
queried: true,
tooManyResults: false,
totalFeatureCount: undefined
};
mergedSources[mergeSourceId] = mergeSource;
}
// add features of source to merge source
source.features.forEach(function(feature) {
mergeSource.features.push(feature);
});
// if one of the source has too many results, the resulting merged source will
// also be marked with `tooManyResults` and will not contain any features.
mergeSource.tooManyResults = mergeSource.tooManyResults || source.tooManyResults;
if (mergeSource.tooManyResults) {
mergeSource.totalFeatureCount = (mergeSource.totalFeatureCount !== undefined) ?
mergeSource.totalFeatureCount + mergeSource.features.length : mergeSource.features.length;
mergeSource.features = [];
}
if (source.totalFeatureCount !== undefined) {
mergeSource.totalFeatureCount = (mergeSource.totalFeatureCount !== undefined) ?
mergeSource.totalFeatureCount + source.totalFeatureCount : source.totalFeatureCount;
}
return mergeSource;
};
/**
* Collect all features in the queryResult object.
* @param {ngeox.QueryResultSource} source Result source.
* @private
*/
gmf.DisplayquerygridController.prototype.collectData_ = function(source) {
var features = source.features;
var allProperties = [];
var featureGeometriesNames = [];
var featuresForSource = {};
var properties, featureGeometryName;
features.forEach(function(feature) {
properties = feature.getProperties();
if (properties !== undefined) {
// Keeps distinct geometry names to remove theme later.
featureGeometryName = feature.getGeometryName();
if (featureGeometriesNames.indexOf(featureGeometryName) === -1) {
featureGeometriesNames.push(featureGeometryName);
}
allProperties.push(properties);
featuresForSource[ngeo.GridConfig.getRowUid(properties)] = feature;
}
}.bind(this));
this.cleanProperties_(allProperties, featureGeometriesNames);
if (allProperties.length > 0) {
var gridCreated = this.makeGrid_(allProperties, source);
if (gridCreated) {
this.featuresForSources_['' + source.id] = featuresForSource;
}
}
};
/**
* Remove all unwanted columns.
* @param {Array.<Object>} allProperties A row.
* @param {Array.<string>} featureGeometriesNames Geometry names.
* @private
*/
gmf.DisplayquerygridController.prototype.cleanProperties_ = function(
allProperties, featureGeometriesNames) {
allProperties.forEach(function(properties) {
featureGeometriesNames.forEach(function(featureGeometryName) {
delete properties[featureGeometryName];
});
delete properties['boundedBy'];
});
if (this.removeEmptyColumns_ === true) {
this.removeEmptyColumnsFn_(allProperties);
}
};
/**
* Remove columns that will be completely empty between each properties.
* @param {Array.<Object>} allProperties A row.
* @private
*/
gmf.DisplayquerygridController.prototype.removeEmptyColumnsFn_ = function(
allProperties) {
// Keep all keys that correspond to at least one value in a properties object.
var keysToKeep = [];
var i, key;
for (key in allProperties[0]) {
for (i = 0; i < allProperties.length; i++) {
if (allProperties[i][key] !== undefined) {
keysToKeep.push(key);
break;
}
}
}
// Get all keys that previously always refers always to an empty value.
var keyToRemove;
allProperties.forEach(function(properties) {
keyToRemove = [];
for (key in properties) {
if (keysToKeep.indexOf(key) === -1) {
keyToRemove.push(key);
}
}
// Remove these keys.
keyToRemove.forEach(function(key) {
delete properties[key];
});
});
};
/**
* @param {?Array.<Object>} data Grid rows.
* @param {ngeox.QueryResultSource} source Query source.
* @return {boolean} Returns true if a grid was created.
* @private
*/
gmf.DisplayquerygridController.prototype.makeGrid_ = function(data, source) {
var sourceId = '' + source.id;
var gridConfig = null;
if (data !== null) {
gridConfig = this.getGridConfiguration_(data);
if (gridConfig === null) {
return false;
}
}
if (this.loadedGridSources.indexOf(sourceId) == -1) {
this.loadedGridSources.push(sourceId);
}
this.gridSources[sourceId] = {
configuration: gridConfig,
source: source
};
return true;
};
/**
* @param {Array.<!Object>} data Grid rows.
* @return {?ngeo.GridConfig} Grid config.
* @private
*/
gmf.DisplayquerygridController.prototype.getGridConfiguration_ = function(
data) {
goog.asserts.assert(data.length > 0);
var columns = Object.keys(data[0]);
/** @type {Array.<ngeox.GridColumnDef>} */
var columnDefs = [];
columns.forEach(function(column) {
if (column !== 'ol_uid') {
columnDefs.push(/** @type {ngeox.GridColumnDef} */ ({
name: column
}));
}
});
if (columnDefs.length > 0) {
return new ngeo.GridConfig(data, columnDefs);
} else {
// no columns, do not show grid
return null;
}
};
/**
* Remove the current selected feature and source and remove all features
* from the map.
* @export
*/
gmf.DisplayquerygridController.prototype.clear = function() {
this.active = false;
this.pending = false;
this.gridSources = {};
this.loadedGridSources = [];
this.selectedTab = null;
this.tooManyResults = false;
this.features_.clear();
this.highlightFeatures_.clear();
this.featuresForSources_ = {};
if (this.unregisterSelectWatcher_) {
this.unregisterSelectWatcher_();
}
};
/**
* Select the tab for the given grid source.
* @param {gmfx.GridSource} gridSource Grid source.
* @export
*/
gmf.DisplayquerygridController.prototype.selectTab = function(gridSource) {
var source = gridSource.source;
this.selectedTab = source.id;
if (this.unregisterSelectWatcher_) {
this.unregisterSelectWatcher_();
this.unregisterSelectWatcher_ = null;
}
if (gridSource.configuration !== null) {
this.unregisterSelectWatcher_ = this.$scope_.$watchCollection(
function() {
return gridSource.configuration.selectedRows;
},
function(newSelected, oldSelectedRows) {
if (Object.keys(newSelected) !== Object.keys(oldSelectedRows)) {
this.onSelectionChanged_();
}
}.bind(this));
}
this.updateFeatures_(gridSource);
};
/**
* @private
* @param {string|number} sourceId Id of the source that should be refreshed.
*/
gmf.DisplayquerygridController.prototype.reflowGrid_ = function(sourceId) {
// this is a "work-around" to make sure that the grid is rendered correctly.
// when a pane is activated by setting `this.selectedTab`, the class `active`
// is not yet set on the pane. that's why the class is set manually, and
// after the pane is shown (in the next digest loop), the grid table can
// be refreshed.
var activePane = this.$element_.find('div.tab-pane#' + sourceId);
activePane.removeClass('active').addClass('active');
this.$timeout_(function() {
activePane.find('div.ngeo-grid-table-container table')['trigger']('reflow');
});
};
/**
* Called when the row selection has changed.
* @private
*/
gmf.DisplayquerygridController.prototype.onSelectionChanged_ = function() {
if (this.selectedTab === null) {
return;
}
var gridSource = this.gridSources['' + this.selectedTab];
this.updateFeatures_(gridSource);
};
/**
* @param {gmfx.GridSource} gridSource Grid source
* @private
*/
gmf.DisplayquerygridController.prototype.updateFeatures_ = function(gridSource) {
this.features_.clear();
this.highlightFeatures_.clear();
if (gridSource.configuration === null) {
return;
}
var sourceId = '' + gridSource.source.id;
var featuresForSource = this.featuresForSources_[sourceId];
var selectedRows = gridSource.configuration.selectedRows;
for (var rowId in featuresForSource) {
var feature = featuresForSource[rowId];
if (rowId in selectedRows) {
this.highlightFeatures_.push(feature);
} else {
this.features_.push(feature);
}
}
};
/**
* Get the currently shown grid source.
* @export
* @return {gmfx.GridSource|null} Grid source.
*/
gmf.DisplayquerygridController.prototype.getActiveGridSource = function() {
if (this.selectedTab === null) {
return null;
} else {
return this.gridSources['' + this.selectedTab];
}
};
/**
* Returns if a row of the currently active grid is selected?
* @export
* @return {boolean} Is one selected?
*/
gmf.DisplayquerygridController.prototype.isOneSelected = function() {
var source = this.getActiveGridSource();
if (source === null || source.configuration === null) {
return false;
} else {
return source.configuration.getSelectedCount() > 0;
}
};
/**
* Returns the number of selected rows of the currently active grid.
* @export
* @return {number} The number of selected rows.
*/
gmf.DisplayquerygridController.prototype.getSelectedRowCount = function() {
var source = this.getActiveGridSource();
if (source === null || source.configuration === null) {
return 0;
} else {
return source.configuration.getSelectedCount();
}
};
/**
* Select all rows of the currently active grid.
* @export
*/
gmf.DisplayquerygridController.prototype.selectAll = function() {
var source = this.getActiveGridSource();
if (source !== null) {
source.configuration.selectAll();
}
};
/**
* Unselect all rows of the currently active grid.
* @export
*/
gmf.DisplayquerygridController.prototype.unselectAll = function() {
var source = this.getActiveGridSource();
if (source !== null) {
source.configuration.unselectAll();
}
};
/**
* Invert the selection of the currently active grid.
* @export
*/
gmf.DisplayquerygridController.prototype.invertSelection = function() {
var source = this.getActiveGridSource();
if (source !== null) {
source.configuration.invertSelection();
}
};
/**
* Zoom to the selected features.
* @export
*/
gmf.DisplayquerygridController.prototype.zoomToSelection = function() {
var source = this.getActiveGridSource();
if (source !== null) {
var extent = ol.extent.createEmpty();
this.highlightFeatures_.forEach(function(feature) {
ol.extent.extend(extent, feature.getGeometry().getExtent());
});
var mapSize = this.map_.getSize();
goog.asserts.assert(mapSize !== undefined);
this.map_.getView().fit(extent, mapSize, {maxZoom: this.maxRecenterZoom});
}
};
/**
* Start a CSV download for the selected features.
* @export
*/
gmf.DisplayquerygridController.prototype.downloadCsv = function() {
var source = this.getActiveGridSource();
if (source !== null) {
var columnDefs = source.configuration.columnDefs;
goog.asserts.assert(columnDefs !== undefined);
var selectedRows = source.configuration.getSelectedRows();
this.ngeoCsvDownload_.startDownload(
selectedRows, columnDefs, 'query-results');
}
};
gmf.module.controller('GmfDisplayquerygridController',
gmf.DisplayquerygridController);<|fim▁end|> |
/**
* @type {ngeox.QueryResult} |
<|file_name|>node_addressing.go<|end_file_name|><|fim▁begin|>// Copyright 2018-2020 Authors of Cilium
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package datapath
import (
"net"
"github.com/cilium/cilium/pkg/cidr"
)
// NodeAddressingFamily is the node addressing information for a particular
// address family
type NodeAddressingFamily interface {
// Router is the address that will act as the router on each node where
// an agent is running on. Endpoints have a default route that points
// to this address.
Router() net.IP
// PrimaryExternal is the primary external address of the node. Nodes
// must be able to reach each other via this address.
PrimaryExternal() net.IP
// AllocationCIDR is the CIDR used for IP allocation of all endpoints
// on the node
AllocationCIDR() *cidr.CIDR
// LocalAddresses lists all local addresses
LocalAddresses() ([]net.IP, error)
// LoadBalancerNodeAddresses lists all addresses on which HostPort and
// NodePort services should be responded to
LoadBalancerNodeAddresses() []net.IP
}
// NodeAddressing implements addressing of a node
type NodeAddressing interface {<|fim▁hole|> IPv6() NodeAddressingFamily
IPv4() NodeAddressingFamily
}<|fim▁end|> | |
<|file_name|>tags.go<|end_file_name|><|fim▁begin|>package ext
import opentracing "github.com/opentracing/opentracing-go"
// These constants define common tag names recommended for better portability across
// tracing systems and languages/platforms.
//
// The tag names are defined as typed strings, so that in addition to the usual use
//
// span.setTag(TagName, value)
//
// they also support value type validation via this additional syntax:
//
// TagName.Set(span, value)
//
var (
//////////////////////////////////////////////////////////////////////
// SpanKind (client/server or producer/consumer)
//////////////////////////////////////////////////////////////////////
// SpanKind hints at relationship between spans, e.g. client/server
SpanKind = spanKindTagName("span.kind")
// SpanKindRPCClient marks a span representing the client-side of an RPC
// or other remote call
SpanKindRPCClientEnum = SpanKindEnum("client")
SpanKindRPCClient = opentracing.Tag{Key: string(SpanKind), Value: SpanKindRPCClientEnum}
// SpanKindRPCServer marks a span representing the server-side of an RPC
// or other remote call
SpanKindRPCServerEnum = SpanKindEnum("server")
SpanKindRPCServer = opentracing.Tag{Key: string(SpanKind), Value: SpanKindRPCServerEnum}
// SpanKindProducer marks a span representing the producer-side of a
// message bus
SpanKindProducerEnum = SpanKindEnum("producer")
SpanKindProducer = opentracing.Tag{Key: string(SpanKind), Value: SpanKindProducerEnum}
// SpanKindConsumer marks a span representing the consumer-side of a
// message bus
SpanKindConsumerEnum = SpanKindEnum("consumer")
SpanKindConsumer = opentracing.Tag{Key: string(SpanKind), Value: SpanKindConsumerEnum}
//////////////////////////////////////////////////////////////////////
// Component name
//////////////////////////////////////////////////////////////////////
// Component is a low-cardinality identifier of the module, library,
// or package that is generating a span.
Component = stringTagName("component")
//////////////////////////////////////////////////////////////////////
// Sampling hint
//////////////////////////////////////////////////////////////////////
// SamplingPriority determines the priority of sampling this Span.
SamplingPriority = uint16TagName("sampling.priority")
//////////////////////////////////////////////////////////////////////
// Peer tags. These tags can be emitted by either client-side of
// server-side to describe the other side/service in a peer-to-peer
// communications, like an RPC call.
//////////////////////////////////////////////////////////////////////
// PeerService records the service name of the peer.
PeerService = stringTagName("peer.service")
// PeerAddress records the address name of the peer. This may be a "ip:port",
// a bare "hostname", a FQDN or even a database DSN substring
// like "mysql://[email protected]:3306/dbname"
PeerAddress = stringTagName("peer.address")
// PeerHostname records the host name of the peer
PeerHostname = stringTagName("peer.hostname")
// PeerHostIPv4 records IP v4 host address of the peer
PeerHostIPv4 = ipv4Tag("peer.ipv4")
// PeerHostIPv6 records IP v6 host address of the peer
PeerHostIPv6 = stringTagName("peer.ipv6")
// PeerPort records port number of the peer
PeerPort = uint16TagName("peer.port")
//////////////////////////////////////////////////////////////////////
// HTTP Tags
//////////////////////////////////////////////////////////////////////
// HTTPUrl should be the URL of the request being handled in this segment
// of the trace, in standard URI format. The protocol is optional.
HTTPUrl = stringTagName("http.url")
// HTTPMethod is the HTTP method of the request, and is case-insensitive.
HTTPMethod = stringTagName("http.method")
// HTTPStatusCode is the numeric HTTP status code (200, 404, etc) of the
// HTTP response.
HTTPStatusCode = uint16TagName("http.status_code")
//////////////////////////////////////////////////////////////////////
// DB Tags
//////////////////////////////////////////////////////////////////////
// DBInstance is database instance name.
DBInstance = stringTagName("db.instance")
// DBStatement is a database statement for the given database type.
// It can be a query or a prepared statement (i.e., before substitution).
DBStatement = stringTagName("db.statement")
// DBType is a database type. For any SQL database, "sql".
// For others, the lower-case database category, e.g. "redis"
DBType = stringTagName("db.type")
// DBUser is a username for accessing database.
DBUser = stringTagName("db.user")
//////////////////////////////////////////////////////////////////////
// Message Bus Tag
//////////////////////////////////////////////////////////////////////
// MessageBusDestination is an address at which messages can be exchanged
MessageBusDestination = stringTagName("message_bus.destination")
//////////////////////////////////////////////////////////////////////
// Error Tag
//////////////////////////////////////////////////////////////////////
// Error indicates that operation represented by the span resulted in an error.
Error = boolTagName("error")
)
// ---
// SpanKindEnum represents common span types
type SpanKindEnum string
type spanKindTagName string
// Set adds a string tag to the `span`
func (tag spanKindTagName) Set(span opentracing.Span, value SpanKindEnum) {
span.SetTag(string(tag), value)
}
type rpcServerOption struct {
clientContext opentracing.SpanContext
}
func (r rpcServerOption) Apply(o *opentracing.StartSpanOptions) {
if r.clientContext != nil {
opentracing.ChildOf(r.clientContext).Apply(o)
}
SpanKindRPCServer.Apply(o)
}
// RPCServerOption returns a StartSpanOption appropriate for an RPC server span
// with `client` representing the metadata for the remote peer Span if available.
// In case client == nil, due to the client not being instrumented, this RPC
// server span will be a root span.
func RPCServerOption(client opentracing.SpanContext) opentracing.StartSpanOption {
return rpcServerOption{client}<|fim▁hole|>
type stringTagName string
// Set adds a string tag to the `span`
func (tag stringTagName) Set(span opentracing.Span, value string) {
span.SetTag(string(tag), value)
}
// ---
type uint32TagName string
// Set adds a uint32 tag to the `span`
func (tag uint32TagName) Set(span opentracing.Span, value uint32) {
span.SetTag(string(tag), value)
}
// ---
type uint16TagName string
// Set adds a uint16 tag to the `span`
func (tag uint16TagName) Set(span opentracing.Span, value uint16) {
span.SetTag(string(tag), value)
}
// ---
type boolTagName string
// Add adds a bool tag to the `span`
func (tag boolTagName) Set(span opentracing.Span, value bool) {
span.SetTag(string(tag), value)
}
type ipv4Tag string
// Set adds IP v4 host address of the peer as an uint32 value to the `span`, keep this for backward and zipkin compatibility
func (tag ipv4Tag) Set(span opentracing.Span, value uint32) {
span.SetTag(string(tag), value)
}
// SetString records IP v4 host address of the peer as a .-separated tuple to the `span`. E.g., "127.0.0.1"
func (tag ipv4Tag) SetString(span opentracing.Span, value string) {
span.SetTag(string(tag), value)
}<|fim▁end|> | }
// --- |
<|file_name|>timestamp.rs<|end_file_name|><|fim▁begin|>use std::sync::atomic::{AtomicUsize, Ordering};
/// A monotonically-increasing value used to compare when objects entered
/// their current zone.
pub type Timestamp = usize;
lazy_static! {
static ref LAST_TIMESTAMP: AtomicUsize = AtomicUsize::new(0);
}
/// Generate a new timestamp, which should always be larger than the last one.
pub fn get_timestamp() -> Timestamp {
LAST_TIMESTAMP.fetch_add(1, Ordering::SeqCst)
}
#[test]
fn it_gives_increasing_numbers() {
let a = get_timestamp();
let b = get_timestamp();
assert!(b > a);<|fim▁hole|><|fim▁end|> | } |
<|file_name|>worker.rs<|end_file_name|><|fim▁begin|>use std::borrow::Cow;
use std::cmp::{max, min};
use std::io::{Error, ErrorKind};
use std::sync::Arc;
use petgraph::graph::NodeIndex;
use petgraph::{EdgeDirection, Graph};
use crate::compiler::{CommandInfo, CompilationTask, Compiler, OutputInfo, SharedState, Toolchain};
pub type BuildGraph = Graph<Arc<BuildTask>, ()>;
pub struct BuildTask {
pub title: String,
pub action: BuildAction,
}
pub enum BuildAction {
Empty,
Exec(CommandInfo, Vec<String>),
Compilation(Arc<dyn Toolchain>, CompilationTask),
}
pub struct BuildResult<'a> {
// Completed task
pub task: &'a BuildTask,
// Worker number
pub worker: usize,
// Build result
pub result: &'a Result<OutputInfo, Error>,
// Completed task count
pub completed: usize,
// Total task count
pub total: usize,
}
struct ResultMessage {
index: NodeIndex,
task: Arc<BuildTask>,
worker: usize,
result: Result<OutputInfo, Error>,
}
struct TaskMessage {
index: NodeIndex,
task: Arc<BuildTask>,
}
impl<'a> BuildResult<'a> {
fn new(message: &'a ResultMessage, completed: &mut usize, total: usize) -> Self {
*completed += 1;
BuildResult {
worker: message.worker,
task: &message.task,
result: &message.result,
completed: *completed,
total,
}
}
}
impl BuildAction {
pub fn create_tasks<C: Compiler>(
compiler: &C,
command: CommandInfo,
args: &[String],
title: &str,
) -> Vec<BuildAction> {
let actions: Vec<BuildAction> = compiler
.create_tasks(command.clone(), args)
.map(|tasks| {
tasks
.into_iter()
.map(|(toolchain, task)| BuildAction::Compilation(toolchain, task))
.collect()
})
.unwrap_or_else(|e| {
println!("Can't use octobuild for task {}: {}", title, e);
Vec::new()
});
if actions.is_empty() {
return vec![BuildAction::Exec(command, args.to_vec())];
}
actions
}
pub fn title(&self) -> Cow<str> {
match self {
BuildAction::Empty => Cow::Borrowed(""),
BuildAction::Exec(_, ref args) => Cow::Owned(format!("{:?}", args)),
BuildAction::Compilation(_, ref task) => {
Cow::Borrowed(task.input_source.to_str().unwrap_or(""))
}
}
}
}
pub fn validate_graph<N, E>(graph: Graph<N, E>) -> Result<Graph<N, E>, Error> {
let mut completed: Vec<bool> = Vec::with_capacity(graph.node_count());
let mut queue: Vec<NodeIndex> = Vec::with_capacity(graph.node_count());
if graph.node_count() == 0 {
return Ok(graph);
}
for index in 0..graph.node_count() {
completed.push(false);
queue.push(NodeIndex::new(index));
}
let mut count: usize = 0;
let mut i: usize = 0;
while i < queue.len() {
let index = queue[i];
if (!completed[index.index()]) && (is_ready(&graph, &completed, index)) {
completed[index.index()] = true;
for neighbor in graph.neighbors_directed(index, EdgeDirection::Incoming) {
queue.push(neighbor);
}
count += 1;
if count == completed.len() {
return Ok(graph);
}
}
i += 1;
}
Err(Error::new(
ErrorKind::InvalidInput,
"Found cycles in build dependencies",
))
}
fn execute_until_failed<F>(
graph: &BuildGraph,
tx_task: crossbeam::channel::Sender<TaskMessage>,
rx_result: &crossbeam::channel::Receiver<ResultMessage>,
count: &mut usize,
update_progress: F,
) -> Result<Option<i32>, Error>
where
F: Fn(BuildResult) -> Result<(), Error>,
{
let mut completed: Vec<bool> = vec![false; graph.node_count()];
for index in graph.externals(EdgeDirection::Outgoing) {
tx_task
.send(TaskMessage {
index,
task: graph.node_weight(index).unwrap().clone(),
})
.map_err(|e| Error::new(ErrorKind::Other, e))?;
}
for message in rx_result.iter() {
assert!(!completed[message.index.index()]);
update_progress(BuildResult::new(&message, count, graph.node_count()))?;
let result = message.result?;
if !result.success() {
let status = result.status;
return Ok(status);
}
completed[message.index.index()] = true;
for source in graph.neighbors_directed(message.index, EdgeDirection::Incoming) {
if is_ready(graph, &completed, source) {
tx_task
.send(TaskMessage {
index: source,
task: graph.node_weight(source).unwrap().clone(),
})
.map_err(|e| Error::new(ErrorKind::Other, e))?;
}
}
if *count == completed.len() {
return Ok(Some(0));
}
}
panic!("Unexpected end of result pipe");
}
fn is_ready<N, E>(graph: &Graph<N, E>, completed: &[bool], source: NodeIndex) -> bool {
for neighbor in graph.neighbors_directed(source, EdgeDirection::Outgoing) {
if !completed[neighbor.index()] {
return false;
}
}
true
}
pub fn execute_graph<F>(
state: &SharedState,
build_graph: BuildGraph,
process_limit: usize,
update_progress: F,
) -> Result<Option<i32>, Error>
where
F: Fn(BuildResult) -> Result<(), Error>,
{
let graph = validate_graph(build_graph)?;
if graph.node_count() == 0 {
return Ok(Some(0));
}
if graph.node_count() == 1 {
let task = &graph.raw_nodes()[0].weight;
let result = execute_compiler(state, task);
update_progress(BuildResult {
worker: 0,
completed: 1,
total: 1,
result: &result,
task,
})?;
return result.map(|output| output.status);
}
let (tx_result, rx_result) = crossbeam::channel::unbounded::<ResultMessage>();
let (tx_task, rx_task) = crossbeam::channel::unbounded::<TaskMessage>();
let num_cpus = max(1, min(process_limit, graph.node_count()));
crossbeam::scope(|scope| {
for worker_id in 0..num_cpus {
let local_rx_task = rx_task.clone();
let local_tx_result = tx_result.clone();
scope.spawn(move |_| {
while let Ok(message) = local_rx_task.recv() {
match local_tx_result.send(ResultMessage {
index: message.index,
worker: worker_id,
result: execute_compiler(state, &message.task),
task: message.task,
}) {
Ok(_) => {}
Err(_) => {
break;
}
}
}
});
}
drop(tx_result);
// Run all tasks.
let mut count: usize = 0;
let result =
execute_until_failed(&graph, tx_task, &rx_result, &mut count, &update_progress);
// Cleanup task queue.
for _ in rx_task.try_iter() {}
// Wait for in progress task completion.<|fim▁hole|> for message in rx_result.iter() {
update_progress(BuildResult::new(&message, &mut count, graph.node_count()))?;
}
result
})
.unwrap()
}
fn execute_compiler(state: &SharedState, task: &BuildTask) -> Result<OutputInfo, Error> {
match &task.action {
BuildAction::Empty => Ok(OutputInfo {
status: Some(0),
stderr: Vec::new(),
stdout: Vec::new(),
}),
BuildAction::Exec(ref command, ref args) => state.wrap_slow(|| {
command
.to_command()
.args(args)
.output()
.map(OutputInfo::new)
}),
BuildAction::Compilation(ref toolchain, ref task) => {
toolchain.compile_task(state, task.clone())
}
}
}
#[cfg(test)]
mod test {
use std::sync::{Arc, Mutex};
use crate::compiler::SharedState;
use crate::config::Config;
use super::*;
#[test]
fn test_execute_graph_empty() {
let state = SharedState::new(&Config::defaults().unwrap()).unwrap();
let graph = BuildGraph::new();
execute_graph(&state, graph, 2, |_| {
unreachable!();
})
.unwrap();
}
#[test]
fn test_execute_graph_single() {
let state = SharedState::new(&Config::defaults().unwrap()).unwrap();
// Simple two tasks graph
let mut graph = BuildGraph::new();
graph.add_node(Arc::new(BuildTask {
title: "task 1".to_string(),
action: BuildAction::Empty,
}));
let result = Mutex::new(Vec::new());
execute_graph(&state, graph, 4, |r| {
result.lock().unwrap().push(r.task.title.clone());
Ok(())
})
.unwrap();
let actual: Vec<String> = result.lock().unwrap().clone();
assert_eq!(actual, vec!["task 1".to_string()]);
}
// Test for #19 issue (https://github.com/bozaro/octobuild/issues/19)
#[test]
fn test_execute_graph_no_hang() {
let state = SharedState::new(&Config::defaults().unwrap()).unwrap();
// Simple two tasks graph
let mut graph = BuildGraph::new();
let t1 = graph.add_node(Arc::new(BuildTask {
title: "task 1".to_string(),
action: BuildAction::Empty,
}));
let t2 = graph.add_node(Arc::new(BuildTask {
title: "task 2".to_string(),
action: BuildAction::Empty,
}));
graph.add_edge(t2, t1, ());
let result = Mutex::new(Vec::new());
execute_graph(&state, graph, 4, |r| {
result.lock().unwrap().push(r.task.title.clone());
Ok(())
})
.unwrap();
let actual: Vec<String> = result.lock().unwrap().clone();
assert_eq!(actual, vec!["task 1".to_string(), "task 2".to_string()]);
}
}<|fim▁end|> | |
<|file_name|>scm.py<|end_file_name|><|fim▁begin|>import os
import re
import subprocess
from six.moves.urllib.parse import urlparse, quote_plus
from subprocess import CalledProcessError, PIPE, STDOUT
from conans.client.tools.env import no_op, environment_append
from conans.client.tools.files import chdir
from conans.errors import ConanException
from conans.util.files import decode_text, to_file_bytes
class Git(object):
def __init__(self, folder=None, verify_ssl=True, username=None, password=None,
force_english=True, runner=None):
self.folder = folder or os.getcwd()
if not os.path.exists(self.folder):
os.makedirs(self.folder)
self._verify_ssl = verify_ssl
self._force_eng = force_english
self._username = username
self._password = password
self._runner = runner
def run(self, command):
command = "git %s" % command
with chdir(self.folder) if self.folder else no_op():
with environment_append({"LC_ALL": "en_US.UTF-8"}) if self._force_eng else no_op():
if not self._runner:
return subprocess.check_output(command, shell=True).decode().strip()
else:
return self._runner(command)
def get_repo_root(self):
return self.run("rev-parse --show-toplevel")
def get_url_with_credentials(self, url):
if not self._username or not self._password:
return url
if urlparse(url).password:
return url
user_enc = quote_plus(self._username)
pwd_enc = quote_plus(self._password)
url = url.replace("://", "://" + user_enc + ":" + pwd_enc + "@", 1)
return url
def _configure_ssl_verify(self):
return self.run("config http.sslVerify %s" % ("true" if self._verify_ssl else "false"))
def clone(self, url, branch=None):
url = self.get_url_with_credentials(url)
if os.path.exists(url):
url = url.replace("\\", "/") # Windows local directory
if os.path.exists(self.folder) and os.listdir(self.folder):
if not branch:
raise ConanException("The destination folder '%s' is not empty, "
"specify a branch to checkout (not a tag or commit) "
"or specify a 'subfolder' "
"attribute in the 'scm'" % self.folder)
output = self.run("init")
output += self._configure_ssl_verify()
output += self.run('remote add origin "%s"' % url)
output += self.run("fetch ")
output += self.run("checkout -t origin/%s" % branch)
else:
branch_cmd = "--branch %s" % branch if branch else ""
output = self.run('clone "%s" . %s' % (url, branch_cmd))
output += self._configure_ssl_verify()
return output
def checkout(self, element, submodule=None):
self._check_git_repo()
output = self.run('checkout "%s"' % element)
if submodule:
if submodule == "shallow":
output += self.run("submodule sync")
output += self.run("submodule update --init")
elif submodule == "recursive":
output += self.run("submodule sync --recursive")
output += self.run("submodule update --init --recursive")
else:
raise ConanException("Invalid 'submodule' attribute value in the 'scm'. "
"Unknown value '%s'. Allowed values: ['shallow', 'recursive']" % submodule)
# Element can be a tag, branch or commit
return output
def excluded_files(self):
try:
file_paths = [os.path.normpath(os.path.join(os.path.relpath(folder, self.folder), el)).replace("\\", "/")
for folder, dirpaths, fs in os.walk(self.folder)
for el in fs + dirpaths]
p = subprocess.Popen(['git', 'check-ignore', '--stdin'],
stdout=PIPE, stdin=PIPE, stderr=STDOUT, cwd=self.folder)
paths = to_file_bytes("\n".join(file_paths))
grep_stdout = decode_text(p.communicate(input=paths)[0])
tmp = grep_stdout.splitlines()
except CalledProcessError:
tmp = []
return tmp
def get_remote_url(self, remote_name=None):
self._check_git_repo()
remote_name = remote_name or "origin"
try:
remotes = self.run("remote -v")
for remote in remotes.splitlines():
try:
name, url = remote.split(None, 1)
url, _ = url.rsplit(None, 1)
if name == remote_name:
return url
except Exception:
pass
except subprocess.CalledProcessError:
pass
return None
def get_commit(self):
self._check_git_repo()
try:
commit = self.run("rev-parse HEAD")
commit = commit.strip()
return commit
except Exception as e:
raise ConanException("Unable to get git commit from %s\n%s" % (self.folder, str(e)))
get_revision = get_commit
def _check_git_repo(self):
try:
self.run("status")
except Exception:
raise ConanException("Not a valid git repository")
<|fim▁hole|> status = self.run("status -bs --porcelain")
# ## feature/scm_branch...myorigin/feature/scm_branch
branch = status.splitlines()[0].split("...")[0].strip("#").strip()
return branch
except Exception as e:
raise ConanException("Unable to get git branch from %s\n%s" % (self.folder, str(e)))<|fim▁end|> | def get_branch(self):
self._check_git_repo()
try: |
<|file_name|>color.rs<|end_file_name|><|fim▁begin|>// Copyright (c) 2015-2019 William (B.J.) Snow Orvis
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Color-related constants and functions.
/// Colors that work with `graphics` functions, which want color as vectors of f32.
#[derive(Debug, Clone, Copy, PartialEq)]
pub struct ColorF32(pub [f32; 4]);
/// Colors that work with `image` functions, which want color as vectors of u8.
#[derive(Debug, Clone, Copy, PartialEq)]
pub struct ColorU8(pub [u8; 4]);
/// Black for use with `graphics`' functions
pub const BLACK_F32: ColorF32 = ColorF32([0.0, 0.0, 0.0, 1.0]);
/// Grey for use with `graphics`' functions
pub const GREY_F32: ColorF32 = ColorF32([0.5, 0.5, 0.5, 1.0]);
/// White for use with `graphics`' functions
pub const WHITE_F32: ColorF32 = ColorF32([1.0, 1.0, 1.0, 1.0]);
/// Dark blue for use with `image`' functions
pub const AEBLUE_U8: ColorU8 = ColorU8([0, 0, 48, 255]);
/// Black for use with `image`' functions
pub const BLACK_U8: ColorU8 = ColorU8([0, 0, 0, 255]);
/// White for use with `image`' functions
pub const WHITE_U8: ColorU8 = ColorU8([255, 255, 255, 255]);
/// Generates a linear range of RGBA colors from a start color to a final color.
///
///
/// Eg, to create a spectrum from white to black:
///
/// ```
/// use fractal_lib::color::{ColorU8, color_range_linear};
///
/// let black = ColorU8([0,0,0,255]);
/// let white = ColorU8([255,255,255,255]);<|fim▁hole|>///
/// assert_eq!(range[0], black);
/// assert_eq!(range[255], white);
/// assert_eq!(range[10], ColorU8([10,10,10,255]));
/// ```
///
/// If you want to simulate a cutoff/saturation point where the gradients reach the peak color
/// before some maximium index value, then you can use `std::cmp::min` to prevent an out of bounds
/// error:
///
/// ```
/// use fractal_lib::color::{ColorU8, color_range_linear};
/// use std::cmp::min;
///
/// let black = ColorU8([0,0,0,255]);
/// let white = ColorU8([255,255,255,255]);
/// let gradient_count = 128;
/// let range = color_range_linear(black, white, gradient_count);
///
/// assert_eq!(range[min(gradient_count-1, 0)], black);
/// assert_eq!(range[min(gradient_count-1, gradient_count-1)], white);
/// assert_eq!(range[min(gradient_count-1, 255)], white);
/// assert_eq!(range[min(gradient_count-1, 127)], white);
/// assert_eq!(range[min(gradient_count-1, 10)], ColorU8([20,20,20,255]));
/// ```
pub fn color_range_linear(first: ColorU8, last: ColorU8, count: usize) -> Vec<ColorU8> {
if count < 2 {
panic!("Count must be 2 or more: {}", count);
}
let deltas = [
(f32::from(last.0[0]) - f32::from(first.0[0])) / f32::from((count as u16) - 1),
(f32::from(last.0[1]) - f32::from(first.0[1])) / f32::from((count as u16) - 1),
(f32::from(last.0[2]) - f32::from(first.0[2])) / f32::from((count as u16) - 1),
(f32::from(last.0[3]) - f32::from(first.0[3])) / f32::from((count as u16) - 1),
];
(0..count)
.map(|i| {
ColorU8([
(f32::from(first.0[0]) + f32::from(i as u16) * deltas[0]) as u8,
(f32::from(first.0[1]) + f32::from(i as u16) * deltas[1]) as u8,
(f32::from(first.0[2]) + f32::from(i as u16) * deltas[2]) as u8,
(f32::from(first.0[3]) + f32::from(i as u16) * deltas[3]) as u8,
])
})
.collect()
}
#[cfg(test)]
mod test {
use super::*;
#[test]
#[should_panic(expected = "Count must be 2 or more")]
fn test_linear_zero() {
let black = ColorU8([0, 0, 0, 255]);
let white = ColorU8([255, 255, 255, 255]);
let range = color_range_linear(black, white, 0);
assert!(range.len() == 0);
}
#[test]
#[should_panic(expected = "Count must be 2 or more")]
fn test_linear_one() {
let black = ColorU8([0, 0, 0, 255]);
let white = ColorU8([255, 255, 255, 255]);
let range = color_range_linear(black, white, 1);
assert!(range.len() == 1);
}
#[test]
fn test_linear_two() {
let black = ColorU8([0, 0, 0, 255]);
let white = ColorU8([255, 255, 255, 255]);
let range = color_range_linear(black, white, 2);
assert_eq!(black, range[0]);
assert_eq!(white, range[1]);
}
}<|fim▁end|> | ///
/// let range = color_range_linear(black, white, 256); |
<|file_name|>tail.rs<|end_file_name|><|fim▁begin|>#![crate_name = "tail"]
#![feature(collections, core, old_io, old_path, rustc_private, std_misc)]
/*
* This file is part of the uutils coreutils package.
*
* (c) Morten Olsen Lysgaard <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*
*/
extern crate getopts;<|fim▁hole|>use std::old_io::{BufferedReader, BytesReader};
use std::old_io::fs::File;
use std::old_path::Path;
use std::str::from_utf8;
use getopts::{optopt, optflag, getopts, usage};
use std::collections::VecDeque;
use std::old_io::timer::sleep;
use std::time::duration::Duration;
#[path = "../common/util.rs"]
#[macro_use]
mod util;
static NAME: &'static str = "tail";
static VERSION: &'static str = "0.0.1";
pub fn uumain(args: Vec<String>) -> i32 {
let mut beginning = false;
let mut lines = true;
let mut byte_count = 0usize;
let mut line_count = 10usize;
let mut sleep_msec = 1000u64;
// handle obsolete -number syntax
let options = match obsolete(args.tail()) {
(args, Some(n)) => { line_count = n; args },
(args, None) => args
};
let args = options;
let possible_options = [
optopt("c", "bytes", "Number of bytes to print", "k"),
optopt("n", "lines", "Number of lines to print", "k"),
optflag("f", "follow", "Print the file as it grows"),
optopt("s", "sleep-interval", "Number or seconds to sleep between polling the file when running with -f", "n"),
optflag("h", "help", "help"),
optflag("V", "version", "version"),
];
let given_options = match getopts(args.as_slice(), &possible_options) {
Ok (m) => { m }
Err(_) => {
println!("{}", usage(NAME, &possible_options));
return 1;
}
};
if given_options.opt_present("h") {
println!("{}", usage(NAME, &possible_options));
return 0;
}
if given_options.opt_present("V") { version(); return 0 }
let follow = given_options.opt_present("f");
if follow {
match given_options.opt_str("s") {
Some(n) => {
let parsed: Option<u64> = n.parse().ok();
match parsed {
Some(m) => { sleep_msec = m * 1000 }
None => {}
}
}
None => {}
};
}
match given_options.opt_str("n") {
Some(n) => {
let mut slice = n.as_slice();
if slice.len() > 0 && slice.char_at(0) == '+' {
beginning = true;
slice = &slice[1..];
}
line_count = match parse_size(slice) {
Some(m) => m,
None => {
show_error!("invalid number of lines ({})", slice);
return 1;
}
};
}
None => match given_options.opt_str("c") {
Some(n) => {
let mut slice = n.as_slice();
if slice.len() > 0 && slice.char_at(0) == '+' {
beginning = true;
slice = &slice[1..];
}
byte_count = match parse_size(slice) {
Some(m) => m,
None => {
show_error!("invalid number of bytes ({})", slice);
return 1;
}
};
lines = false;
}
None => { }
}
};
let files = given_options.free;
if files.is_empty() {
let mut buffer = BufferedReader::new(stdin());
tail(&mut buffer, line_count, byte_count, beginning, lines, follow, sleep_msec);
} else {
let mut multiple = false;
let mut firstime = true;
if files.len() > 1 {
multiple = true;
}
for file in files.iter() {
if multiple {
if !firstime { println!(""); }
println!("==> {} <==", file.as_slice());
}
firstime = false;
let path = Path::new(file.as_slice());
let reader = File::open(&path).unwrap();
let mut buffer = BufferedReader::new(reader);
tail(&mut buffer, line_count, byte_count, beginning, lines, follow, sleep_msec);
}
}
0
}
fn parse_size(mut size_slice: &str) -> Option<usize> {
let mut base =
if size_slice.len() > 0 && size_slice.char_at(size_slice.len() - 1) == 'B' {
size_slice = &size_slice[..size_slice.len() - 1];
1000usize
} else {
1024usize
};
let exponent =
if size_slice.len() > 0 {
let mut has_suffix = true;
let exp = match size_slice.char_at(size_slice.len() - 1) {
'K' => 1usize,
'M' => 2usize,
'G' => 3usize,
'T' => 4usize,
'P' => 5usize,
'E' => 6usize,
'Z' => 7usize,
'Y' => 8usize,
'b' => {
base = 512usize;
1usize
}
_ => {
has_suffix = false;
0usize
}
};
if has_suffix {
size_slice = &size_slice[..size_slice.len() - 1];
}
exp
} else {
0usize
};
let mut multiplier = 1usize;
for _ in range(0usize, exponent) {
multiplier *= base;
}
if base == 1000usize && exponent == 0usize {
// sole B is not a valid suffix
None
} else {
let value = size_slice.parse();
match value {
Ok(v) => Some(multiplier * v),
_ => None
}
}
}
// It searches for an option in the form of -123123
//
// In case is found, the options vector will get rid of that object so that
// getopts works correctly.
fn obsolete(options: &[String]) -> (Vec<String>, Option<usize>) {
let mut options: Vec<String> = options.to_vec();
let mut a = 0;
let b = options.len();
while a < b {
let current = options[a].clone();
let current = current.as_bytes();
if current.len() > 1 && current[0] == '-' as u8 {
let len = current.len();
for pos in range(1, len) {
// Ensure that the argument is only made out of digits
if !(current[pos] as char).is_numeric() { break; }
// If this is the last number
if pos == len - 1 {
options.remove(a);
let number: Option<usize> = from_utf8(¤t[1..len]).unwrap().parse().ok();
return (options, Some(number.unwrap()));
}
}
}
a += 1;
};
(options, None)
}
macro_rules! tail_impl (
($kind:ty, $kindfn:ident, $kindprint:ident, $reader:ident, $count:ident, $beginning:ident) => ({
// read through each line and store them in a ringbuffer that always contains
// count lines/chars. When reaching the end of file, output the data in the
// ringbuf.
let mut ringbuf: VecDeque<$kind> = VecDeque::new();
let data = $reader.$kindfn().skip(
if $beginning {
let temp = $count;
$count = ::std::usize::MAX;
temp - 1
} else {
0
}
);
for io_datum in data {
match io_datum {
Ok(datum) => {
if $count <= ringbuf.len() {
ringbuf.pop_front();
}
ringbuf.push_back(datum);
}
Err(err) => panic!(err)
}
}
let mut stdout = stdout();
for datum in ringbuf.iter() {
$kindprint(&mut stdout, datum);
}
})
);
fn tail<T: Reader>(reader: &mut BufferedReader<T>, mut line_count: usize, mut byte_count: usize, beginning: bool, lines: bool, follow: bool, sleep_msec: u64) {
if lines {
tail_impl!(String, lines, print_string, reader, line_count, beginning);
} else {
tail_impl!(u8, bytes, print_byte, reader, byte_count, beginning);
}
// if we follow the file, sleep a bit and print the rest if the file has grown.
while follow {
sleep(Duration::milliseconds(sleep_msec as i64));
for io_line in reader.lines() {
match io_line {
Ok(line) => print!("{}", line),
Err(err) => panic!(err)
}
}
}
}
#[inline]
fn print_byte<T: Writer>(stdout: &mut T, ch: &u8) {
if let Err(err) = stdout.write_u8(*ch) {
crash!(1, "{}", err);
}
}
#[inline]
fn print_string<T: Writer>(_: &mut T, s: &String) {
print!("{}", s);
}
fn version () {
println!("{} v{}", NAME, VERSION);
}<|fim▁end|> |
use std::char::CharExt;
use std::old_io::{stdin, stdout}; |
<|file_name|>queryer.py<|end_file_name|><|fim▁begin|>#! /usr/bin/env python
# --*-- coding:utf-8 --*--
import os
import sys
sys.path.append(os.path.split(os.path.split(os.path.abspath(sys.path[0]))[0])[0])
from CountMan.monitor.util import *
from CountMan.monitor.setting import *
class Queryer(object):
def __init__(self):
self.dao = DatabaseInterface()
self.dataSet = dict()
self.logger = getLogger('root')
def getData(self):
for queryKey in QUERYPARAM:
self.dataSet[queryKey] = getResponse(QUERYPARAM.get(queryKey))
<|fim▁hole|> self.dao.insertCollection(self.dataSet)
if __name__ == '__main__':
q = Queryer()
if ISDEBUG:
import cProfile
cProfile.run("q.set2db")
else:
q.set2db<|fim▁end|> | @property
def set2db(self):
self.getData()
self.logger.info('get query data: {0} success'.format(self.dataSet)) |
<|file_name|>models.py<|end_file_name|><|fim▁begin|>from django.db import models
from django.utils.translation import ugettext_lazy as _
from cms.models import CMSPlugin, Page, get_plugin_media_path
from os.path import basename
class Picture(CMSPlugin):
"""
A Picture with or without a link
"""
CENTER = "center"
LEFT = "left"
RIGHT = "right"
FLOAT_CHOICES = ((CENTER, _("center")),
(LEFT, _("left")),
(RIGHT, _("right")),
)
image = models.ImageField(_("image"), upload_to=get_plugin_media_path)
url = models.CharField(_("link"), max_length=255, blank=True, null=True, help_text=_("if present image will be clickable"))
page_link = models.ForeignKey(Page, verbose_name=_("page"), null=True, blank=True, help_text=_("if present image will be clickable"))
alt = models.CharField(_("alternate text"), max_length=255, blank=True, null=True, help_text=_("textual description of the image"))
longdesc = models.CharField(_("long description"), max_length=255, blank=True, null=True, help_text=_("additional description of the image"))
float = models.CharField(_("side"), max_length=10, blank=True, null=True, choices=FLOAT_CHOICES)
class Meta:
db_table = 'cmsplugin_picture'<|fim▁hole|> return self.alt[:40]
elif self.image:
# added if, because it raised attribute error when file wasn't defined
try:
return u"%s" % basename(self.image.path)
except:
pass
return "<empty>"<|fim▁end|> |
def __unicode__(self):
if self.alt: |
<|file_name|>ResourceConfig.java<|end_file_name|><|fim▁begin|>/**
* @@@ START COPYRIGHT @@@
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.<|fim▁hole|>**/
package org.trafodion.wms.rest;
import com.sun.jersey.api.core.PackagesResourceConfig;
public class ResourceConfig extends PackagesResourceConfig {
public ResourceConfig() {
super("org.trafodion.wms.rest");
}
}<|fim▁end|> | *
* @@@ END COPYRIGHT @@@ |
<|file_name|>2102000.js<|end_file_name|><|fim▁begin|>var status = 0;
var minlvl = 100;
var maxlvl = 255;
var minplayers = 1;
var maxplayers = 6;
var time = 15;
var open = true;
function start() {
status = -1; // and when they click lets fight make it turn to a really cool ifght song :D LOL ok like the Zakum battle song? kk and btw uhm can you add a message like after they click OK to say "Matt: Meet me near the top of the map." ? o-o in other words, a
action(1, 0, 0);
}
function action(mode, type, selection) {
if (mode == -1) {
cm.dispose();
} else if (mode == 0) {
cm.sendOk("I spy a chicken :O"); // lLOL
cm.dispose();
} else {
if (mode == 1)
status++;
else
status--;
if (status == 0) {
cm.sendYesNo("Hello #b #h ##k! Would you like to fight #rSuper Horntail?#k He is waiting :)");
} else if (status == 1) {
if (cm.getPlayer().warning[1] == false && cm.isLeader()) {
cm.getPlayer().warning[1] = true;
cm.mapMessage("On behalf of MapleZtory, please defeat Big Puff Daddy! rawrawrawr");
var mf = cm.getPlayer().getMap().getMapFactory();
<|fim▁hole|> bossmap.removePortals();
var mob = net.sf.odinms.server.life.MapleLifeFactory.getMonster(8810018);
var mob1 = net.sf.odinms.server.life.MapleLifeFactory.getMonster(8810002);
var mob2 = net.sf.odinms.server.life.MapleLifeFactory.getMonster(8810003);
var mob3 = net.sf.odinms.server.life.MapleLifeFactory.getMonster(8810004);
var mob4 = net.sf.odinms.server.life.MapleLifeFactory.getMonster(8810005);
var mob5 = net.sf.odinms.server.life.MapleLifeFactory.getMonster(8810006);
var mob6 = net.sf.odinms.server.life.MapleLifeFactory.getMonster(8810007);
var mob7 = net.sf.odinms.server.life.MapleLifeFactory.getMonster(8810008);
var mob8 = net.sf.odinms.server.life.MapleLifeFactory.getMonster(8810009);
var overrideStats = new net.sf.odinms.server.life.MapleMonsterStats();
overrideStats.setHp(2147000000);
overrideStats.setExp(2147000000);
overrideStats.setMp(mob.getMaxMp());
// mob.setOverrideStats(overrideStats);
mob1.setOverrideStats(overrideStats);
mob2.setOverrideStats(overrideStats);
mob3.setOverrideStats(overrideStats);
mob4.setOverrideStats(overrideStats);
mob5.setOverrideStats(overrideStats);
mob6.setOverrideStats(overrideStats);
mob7.setOverrideStats(overrideStats);
mob8.setOverrideStats(overrideStats);
mob.setHp(overrideStats.getHp());
//eim.registerMonster(mob);
bossmap.spawnMonsterOnGroudBelow(mob, new java.awt.Point(-182, -178));
bossmap.spawnMonsterOnGroudBelow(mob1, new java.awt.Point(-182, -178));
bossmap.spawnMonsterOnGroudBelow(mob2, new java.awt.Point(-182, -178));
bossmap.spawnMonsterOnGroudBelow(mob3, new java.awt.Point(-182, -178));
bossmap.spawnMonsterOnGroudBelow(mob4, new java.awt.Point(-182, -178));
bossmap.spawnMonsterOnGroudBelow(mob5, new java.awt.Point(-182, -178));
bossmap.spawnMonsterOnGroudBelow(mob6, new java.awt.Point(-182, -178));
bossmap.spawnMonsterOnGroudBelow(mob7, new java.awt.Point(-182, -178));
bossmap.spawnMonsterOnGroudBelow(mob8, new java.awt.Point(-182, -178));
// bossmap.killAllMonsters(false);
// bossmap.killMonster(8810018); // i like that funkshun :( // this one looks pro though :Dlol ur right XD
// spawnMonster(int mobid, int HP, int MP, int level, int EXP, int boss, int undead, int amount, int x, int y);
cm.dispose();
} else {
cm.sendOk("Super Horntail has already been spawned or you are not leader!");
cm.dispose();
}
}
}
}<|fim▁end|> | var bossmap = mf.getMap(240030103);
|
<|file_name|>OpenColorIOTest.py<|end_file_name|><|fim▁begin|>##########################################################################
#
# Copyright (c) 2012, John Haddon. All rights reserved.
# Copyright (c) 2012, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import os
import unittest
import IECore
import Gaffer
import GafferTest
import GafferImage
class OpenColorIOTest( unittest.TestCase ) :
fileName = os.path.expandvars( "$GAFFER_ROOT/python/GafferTest/images/checker.exr" )
def test( self ) :
n = GafferImage.ImageReader()
n["fileName"].setValue( self.fileName )
o = GafferImage.OpenColorIO()
o["in"].setInput( n["out"] )
self.assertEqual( n["out"].image(), o["out"].image() )
o["inputSpace"].setValue( "linear" )
o["outputSpace"].setValue( "sRGB" )
self.assertNotEqual( n["out"].image(), o["out"].image() )
def testHashPassThrough( self ) :
n = GafferImage.ImageReader()
n["fileName"].setValue( self.fileName )
o = GafferImage.OpenColorIO()
o["in"].setInput( n["out"] )
self.assertEqual( n["out"].image(), o["out"].image() )<|fim▁hole|> self.assertNotEqual( n["out"].image(), o["out"].image() )
o["enabled"].setValue( False )
self.assertEqual( n["out"].image(), o["out"].image() )
self.assertEqual( n["out"]['format'].hash(), o["out"]['format'].hash() )
self.assertEqual( n["out"]['dataWindow'].hash(), o["out"]['dataWindow'].hash() )
self.assertEqual( n["out"]['channelNames'].hash(), o["out"]['channelNames'].hash() )
o["enabled"].setValue( True )
o["inputSpace"].setValue( "linear" )
o["outputSpace"].setValue( "linear" )
self.assertEqual( n["out"].image(), o["out"].image() )
self.assertEqual( n["out"]['format'].hash(), o["out"]['format'].hash() )
self.assertEqual( n["out"]['dataWindow'].hash(), o["out"]['dataWindow'].hash() )
self.assertEqual( n["out"]['channelNames'].hash(), o["out"]['channelNames'].hash() )
def testImageHashPassThrough( self ) :
i = GafferImage.ImageReader()
i["fileName"].setValue( self.fileName )
o = GafferImage.OpenColorIO()
o["in"].setInput( i["out"] )
self.assertEqual( i["out"].imageHash(), o["out"].imageHash() )
o["inputSpace"].setValue( "linear" )
o["outputSpace"].setValue( "sRGB" )
self.assertNotEqual( i["out"].imageHash(), o["out"].imageHash() )
def testChannelsAreSeparate( self ) :
i = GafferImage.ImageReader()
i["fileName"].setValue( os.path.expandvars( "$GAFFER_ROOT/python/GafferTest/images/circles.exr" ) )
o = GafferImage.OpenColorIO()
o["in"].setInput( i["out"] )
o["inputSpace"].setValue( "linear" )
o["outputSpace"].setValue( "sRGB" )
self.assertNotEqual(
o["out"].channelDataHash( "R", IECore.V2i( 0 ) ),
o["out"].channelDataHash( "G", IECore.V2i( 0 ) )
)
self.assertNotEqual(
o["out"].channelData( "R", IECore.V2i( 0 ) ),
o["out"].channelData( "G", IECore.V2i( 0 ) )
)
if __name__ == "__main__":
unittest.main()<|fim▁end|> |
o["inputSpace"].setValue( "linear" )
o["outputSpace"].setValue( "sRGB" )
|
<|file_name|>search.go<|end_file_name|><|fim▁begin|>package service
import (
"context"
"go-common/library/ecode"
"go-common/library/xstr"
"net/url"
"strconv"
"go-common/app/admin/main/videoup-task/model"
"go-common/library/database/elastic"
"go-common/library/log"
)
const (
_searchBusinessQAVideo = "task_qa"
_searchBusinessQAVideoRandom = "task_qa_random"
_searchIndexQAVideo = "task_qa"
_searchLogURL = "/x/admin/search/log"
)
func (s *Service) searchQAVideo(c context.Context, pm *model.ListParams) (list *model.QAVideoList, err error) {
needRandom := pm.Limit > 0 && pm.Seed != ""
business := _searchBusinessQAVideo
if needRandom {
business = _searchBusinessQAVideoRandom
}
req := s.es.NewRequest(business).Index(_searchIndexQAVideo).Ps(pm.Ps).Pn(pm.Pn)
if pm.CTimeFrom != "" || pm.CTimeTo != "" {
req.WhereRange("ctime", pm.CTimeFrom, pm.CTimeTo, elastic.RangeScopeLcRc)
}
if pm.FTimeFrom != "" || pm.FTimeTo != "" {
req.WhereRange("ftime", pm.FTimeFrom, pm.FTimeTo, elastic.RangeScopeLcRc)
}
if pm.FansFrom > 0 || pm.FansTo > 0 {
req.WhereRange("fans", pm.FansFrom, pm.FansTo, elastic.RangeScopeLcRc)
}
if len(pm.UID) > 0 {
req.WhereIn("uid", pm.UID)
}
if len(pm.TaskID) > 0 {
req.WhereIn("task_id", pm.TaskID)
}
if len(pm.TagID) > 0 {
req.WhereIn("audit_tagid", pm.TagID)
}
if len(pm.UPGroup) > 0 {
req.WhereIn("up_groups", pm.UPGroup)
}
if len(pm.ArcTypeID) > 0 {
req.WhereIn("arc_typeid", pm.ArcTypeID)
}
if len(pm.AuditStatus) > 0 {
req.WhereIn("audit_status", pm.AuditStatus)
}
if len(pm.Keyword) > 0 {
req.WhereLike([]string{"arc_title"}, pm.Keyword, true, elastic.LikeLevelLow)
}
if needRandom {
req.WhereEq("seed", pm.Seed)
} else {
req.Order(pm.Order, pm.Sort)
}
if pm.State == model.QAStateWait || pm.State == model.QAStateFinish {
req.WhereEq("state", pm.State)
}
if err = req.Scan(c, &list); err != nil {
log.Error("searchQAVideo elastic scan error(%v) params(%+v)", err, pm)
return
}
if needRandom && list != nil && list.Page.Total > pm.Limit {
list.Page.Total = pm.Limit
//移除多余部分
addition := list.Page.Num*list.Page.Size - pm.Limit
if addition > 0 {
list.Result = list.Result[:(list.Page.Size - addition)]
}
}
return
}
func (s *Service) lastInTime(c context.Context, ids []int64) (mcases map[int64][]interface{}, err error) {
return s.lastTime(c, model.ActionHandsUP, ids)
}
<|fim▁hole|>// lastInOutTime
func (s *Service) lastTime(c context.Context, action int8, ids []int64) (mcases map[int64][]interface{}, err error) {
mcases = make(map[int64][]interface{})
params := url.Values{}
uri := s.c.Host.Search + _searchLogURL
params.Set("appid", "log_audit_group")
params.Set("group", "uid")
params.Set("uid", xstr.JoinInts(ids))
params.Set("business", strconv.Itoa(model.LogClientConsumer))
params.Set("action", strconv.Itoa(int(action)))
params.Set("ps", strconv.Itoa(len(ids)))
res := &model.SearchLogResult{}
if err = s.httpClient.Get(c, uri, "", params, &res); err != nil {
log.Error("log_audit_group d.httpClient.Get error(%v)", err)
return
}
if res.Code != ecode.OK.Code() {
log.Error("log_audit_group ecode:%v", res.Code)
return
}
for _, item := range res.Data.Result {
mcases[item.UID] = []interface{}{item.Ctime}
}
log.Info("log_audit_group get: %s params:%s ret:%v", uri, params.Encode(), res)
return
}<|fim▁end|> | func (s *Service) lastOutTime(c context.Context, ids []int64) (mcases map[int64][]interface{}, err error) {
return s.lastTime(c, model.ActionHandsOFF, ids)
}
|
<|file_name|>vhosts.go<|end_file_name|><|fim▁begin|>package vhosts
import (
"net/http"
"strings"
"sync"
)
type VirtualHosts struct {
vhosts map[string]http.Handler
mu sync.RWMutex
}
func NewVirtualHosts(vhosts map[string]http.Handler) *VirtualHosts {
v := &VirtualHosts{}
for hosts, h := range vhosts {
for _, host := range strings.Split(hosts, " ") {
if host != "" {
v.HandleHost(h, host)
}
}
}
return v
}
func (v *VirtualHosts) ServeHTTP(w http.ResponseWriter, r *http.Request) {
v.mu.RLock()
defer v.mu.RUnlock()
if v.vhosts != nil {
if handler, ok := v.vhosts[r.Host]; ok && handler != nil {
handler.ServeHTTP(w, r)
return
}
}
http.NotFound(w, r)
}
<|fim▁hole|> v.mu.Lock()
defer v.mu.Unlock()
if v.vhosts == nil {
v.vhosts = make(map[string]http.Handler)
}
for _, host := range hosts {
for _, h := range strings.Split(host, " ") {
if h = strings.TrimSpace(h); h != "" {
v.vhosts[h] = handler
}
}
}
}<|fim▁end|> | func (v *VirtualHosts) HandleHost(handler http.Handler, hosts ...string) { |
<|file_name|>io.rs<|end_file_name|><|fim▁begin|>use buf::{Buf, MutBuf};
use error::MioResult;
use self::NonBlock::{Ready, WouldBlock};
use error::MioErrorKind as mek;
use os;
pub use os::IoDesc;
/// The result of a non-blocking operation.
#[derive(Debug)]
pub enum NonBlock<T> {
Ready(T),
WouldBlock
}
impl<T> NonBlock<T> {
pub fn would_block(&self) -> bool {
match *self {
WouldBlock => true,
_ => false
}
}
pub fn unwrap(self) -> T {
match self {
Ready(v) => v,
_ => panic!("would have blocked, no result to take")
}
}
}
pub trait IoHandle {
fn desc(&self) -> &IoDesc;
}
pub trait FromIoDesc {
fn from_desc(desc: IoDesc) -> Self;
}
pub trait IoReader {
fn read<B: MutBuf>(&self, buf: &mut B) -> MioResult<NonBlock<usize>>;
fn read_slice(&self, buf: &mut [u8]) -> MioResult<NonBlock<usize>>;
}
pub trait IoWriter {
fn write<B: Buf>(&self, buf: &mut B) -> MioResult<NonBlock<usize>>;
fn write_slice(&self, buf: &[u8]) -> MioResult<NonBlock<usize>>;
}
pub trait IoAcceptor {
type Output;
fn accept(&mut self) -> MioResult<NonBlock<Self::Output>>;
}
pub fn pipe() -> MioResult<(PipeReader, PipeWriter)> {
let (rd, wr) = try!(os::pipe());
Ok((PipeReader { desc: rd }, PipeWriter { desc: wr }))
}
pub struct PipeReader {
desc: os::IoDesc
}
impl IoHandle for PipeReader {
fn desc(&self) -> &os::IoDesc {
&self.desc
}
}
impl FromIoDesc for PipeReader {
fn from_desc(desc: IoDesc) -> Self {
PipeReader { desc: desc }
}
}
pub struct PipeWriter {
desc: os::IoDesc
}
impl IoHandle for PipeWriter {
fn desc(&self) -> &os::IoDesc {
&self.desc
}
}
impl FromIoDesc for PipeWriter {
fn from_desc(desc: IoDesc) -> Self {
PipeWriter { desc: desc }
}
}
impl IoReader for PipeReader {
fn read<B: MutBuf>(&self, buf: &mut B) -> MioResult<NonBlock<usize>> {
read(self, buf)
}
fn read_slice(&self, buf: &mut [u8]) -> MioResult<NonBlock<usize>> {
read_slice(self, buf)
}
}
impl IoWriter for PipeWriter {<|fim▁hole|> }
fn write_slice(&self, buf: &[u8]) -> MioResult<NonBlock<usize>> {
write_slice(self, buf)
}
}
/// Reads the length of the slice supplied by buf.mut_bytes into the buffer
/// This is not guaranteed to consume an entire datagram or segment.
/// If your protocol is msg based (instead of continuous stream) you should
/// ensure that your buffer is large enough to hold an entire segment (1532 bytes if not jumbo
/// frames)
#[inline]
pub fn read<I: IoHandle, B: MutBuf>(io: &I, buf: &mut B) -> MioResult<NonBlock<usize>> {
let res = read_slice(io, buf.mut_bytes());
match res {
// Successfully read some bytes, advance the cursor
Ok(Ready(cnt)) => { buf.advance(cnt); },
_ => {}
}
res
}
///writes the length of the slice supplied by Buf.bytes into the socket
///then advances the buffer that many bytes
#[inline]
pub fn write<O: IoHandle, B: Buf>(io: &O, buf: &mut B) -> MioResult<NonBlock<usize>> {
let res = write_slice(io, buf.bytes());
match res {
Ok(Ready(cnt)) => buf.advance(cnt),
_ => {}
}
res
}
///reads the length of the supplied slice from the socket into the slice
#[inline]
pub fn read_slice<I: IoHandle>(io: & I, buf: &mut [u8]) -> MioResult<NonBlock<usize>> {
match os::read(io.desc(), buf) {
Ok(cnt) => {
Ok(Ready(cnt))
}
Err(e) => {
match e.kind {
mek::WouldBlock => Ok(WouldBlock),
_ => Err(e)
}
}
}
}
///writes the length of the supplied slice into the socket from the slice
#[inline]
pub fn write_slice<I: IoHandle>(io: & I, buf: & [u8]) -> MioResult<NonBlock<usize>> {
match os::write(io.desc(), buf) {
Ok(cnt) => { Ok(Ready(cnt)) }
Err(e) => {
match e.kind {
mek::WouldBlock => Ok(WouldBlock),
_ => Err(e)
}
}
}
}<|fim▁end|> | fn write<B: Buf>(&self, buf: &mut B) -> MioResult<NonBlock<usize>> {
write(self, buf) |
<|file_name|>dead.rs<|end_file_name|><|fim▁begin|>// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// This implements the dead-code warning pass. It follows middle::reachable
// closely. The idea is that all reachable symbols are live, codes called
// from live codes are live, and everything else is dead.
use middle::def;
use middle::pat_util;
use middle::privacy;
use middle::ty;
use middle::typeck;
use lint;
use util::nodemap::NodeSet;
use std::collections::HashSet;
use syntax::ast;
use syntax::ast_map;
use syntax::ast_util::{local_def, is_local, PostExpansionMethod};
use syntax::attr::{mod, AttrMetaMethods};
use syntax::codemap;
use syntax::visit::{mod, Visitor};
// Any local node that may call something in its body block should be
// explored. For example, if it's a live NodeItem that is a
// function, then we should explore its block to check for codes that
// may need to be marked as live.
fn should_explore(tcx: &ty::ctxt, def_id: ast::DefId) -> bool {
if !is_local(def_id) {
return false;
}
match tcx.map.find(def_id.node) {
Some(ast_map::NodeItem(..))
| Some(ast_map::NodeImplItem(..))
| Some(ast_map::NodeForeignItem(..))
| Some(ast_map::NodeTraitItem(..)) => true,
_ => false
}
}
struct MarkSymbolVisitor<'a, 'tcx: 'a> {
worklist: Vec<ast::NodeId>,
tcx: &'a ty::ctxt<'tcx>,
live_symbols: Box<HashSet<ast::NodeId>>,
struct_has_extern_repr: bool,
ignore_non_const_paths: bool,
inherited_pub_visibility: bool,
}
impl<'a, 'tcx> MarkSymbolVisitor<'a, 'tcx> {
fn new(tcx: &'a ty::ctxt<'tcx>,
worklist: Vec<ast::NodeId>) -> MarkSymbolVisitor<'a, 'tcx> {
MarkSymbolVisitor {
worklist: worklist,
tcx: tcx,
live_symbols: box HashSet::new(),
struct_has_extern_repr: false,
ignore_non_const_paths: false,
inherited_pub_visibility: false,
}
}
fn check_def_id(&mut self, def_id: ast::DefId) {
if should_explore(self.tcx, def_id) {
self.worklist.push(def_id.node);
}
self.live_symbols.insert(def_id.node);
}
fn lookup_and_handle_definition(&mut self, id: &ast::NodeId) {
self.tcx.def_map.borrow().get(id).map(|def| {
match def {
&def::DefConst(_) => {
self.check_def_id(def.def_id())
}
_ if self.ignore_non_const_paths => (),
&def::DefPrimTy(_) => (),
&def::DefVariant(enum_id, variant_id, _) => {
self.check_def_id(enum_id);
self.check_def_id(variant_id);
}
_ => {
self.check_def_id(def.def_id());
}
}
});
}
fn lookup_and_handle_method(&mut self, id: ast::NodeId,
span: codemap::Span) {
let method_call = typeck::MethodCall::expr(id);
match self.tcx.method_map.borrow().get(&method_call) {
Some(method) => {
match method.origin {
typeck::MethodStatic(def_id) => {
match ty::provided_source(self.tcx, def_id) {
Some(p_did) => self.check_def_id(p_did),
None => self.check_def_id(def_id)
}
}
typeck::MethodStaticUnboxedClosure(_) => {}
typeck::MethodTypeParam(typeck::MethodParam {
ref trait_ref,
method_num: index,
..
}) |
typeck::MethodTraitObject(typeck::MethodObject {
ref trait_ref,
method_num: index,
..
}) => {
let trait_item = ty::trait_item(self.tcx,
trait_ref.def_id,
index);
match trait_item {
ty::MethodTraitItem(method) => {
self.check_def_id(method.def_id);
}
ty::TypeTraitItem(typedef) => {
self.check_def_id(typedef.def_id);
}
}
}
}
}
None => {
self.tcx.sess.span_bug(span,
"method call expression not \
in method map?!")
}
}
}
fn handle_field_access(&mut self, lhs: &ast::Expr, name: &ast::Ident) {
match ty::get(ty::expr_ty_adjusted(self.tcx, lhs)).sty {
ty::ty_struct(id, _) => {
let fields = ty::lookup_struct_fields(self.tcx, id);
let field_id = fields.iter()
.find(|field| field.name == name.name).unwrap().id;
self.live_symbols.insert(field_id.node);
},
_ => ()
}
}
fn handle_tup_field_access(&mut self, lhs: &ast::Expr, idx: uint) {
match ty::get(ty::expr_ty_adjusted(self.tcx, lhs)).sty {
ty::ty_struct(id, _) => {
let fields = ty::lookup_struct_fields(self.tcx, id);
let field_id = fields[idx].id;
self.live_symbols.insert(field_id.node);
},
_ => ()
}
}
fn handle_field_pattern_match(&mut self, lhs: &ast::Pat,
pats: &[codemap::Spanned<ast::FieldPat>]) {
let id = match (*self.tcx.def_map.borrow())[lhs.id] {
def::DefVariant(_, id, _) => id,
_ => {
match ty::ty_to_def_id(ty::node_id_to_type(self.tcx,
lhs.id)) {
None => {
self.tcx.sess.span_bug(lhs.span,
"struct pattern wasn't of a \
type with a def ID?!")
}
Some(def_id) => def_id,
}
}
};
let fields = ty::lookup_struct_fields(self.tcx, id);
for pat in pats.iter() {
let field_id = fields.iter()
.find(|field| field.name == pat.node.ident.name).unwrap().id;
self.live_symbols.insert(field_id.node);
}
}
fn mark_live_symbols(&mut self) {
let mut scanned = HashSet::new();
while self.worklist.len() > 0 {
let id = self.worklist.pop().unwrap();
if scanned.contains(&id) {
continue
}
scanned.insert(id);
match self.tcx.map.find(id) {
Some(ref node) => {
self.live_symbols.insert(id);
self.visit_node(node);
}
None => (),
}
}
}
fn visit_node(&mut self, node: &ast_map::Node) {
let had_extern_repr = self.struct_has_extern_repr;
self.struct_has_extern_repr = false;
let had_inherited_pub_visibility = self.inherited_pub_visibility;
self.inherited_pub_visibility = false;
match *node {
ast_map::NodeItem(item) => {
match item.node {
ast::ItemStruct(..) => {
self.struct_has_extern_repr = item.attrs.iter().any(|attr| {
attr::find_repr_attrs(self.tcx.sess.diagnostic(), attr)
.contains(&attr::ReprExtern)
});
visit::walk_item(self, &*item);
}
ast::ItemEnum(..) => {
self.inherited_pub_visibility = item.vis == ast::Public;
visit::walk_item(self, &*item);
}
ast::ItemFn(..)
| ast::ItemTy(..)
| ast::ItemStatic(..)
| ast::ItemConst(..) => {
visit::walk_item(self, &*item);
}
_ => ()
}
}
ast_map::NodeTraitItem(trait_method) => {
visit::walk_trait_item(self, trait_method);
}
ast_map::NodeImplItem(impl_item) => {
match *impl_item {
ast::MethodImplItem(ref method) => {
visit::walk_block(self, method.pe_body());
}
ast::TypeImplItem(_) => {}
}
}
ast_map::NodeForeignItem(foreign_item) => {
visit::walk_foreign_item(self, &*foreign_item);
}
_ => ()
}
self.struct_has_extern_repr = had_extern_repr;
self.inherited_pub_visibility = had_inherited_pub_visibility;
}
}
impl<'a, 'tcx, 'v> Visitor<'v> for MarkSymbolVisitor<'a, 'tcx> {
fn visit_struct_def(&mut self, def: &ast::StructDef, _: ast::Ident,
_: &ast::Generics, _: ast::NodeId) {
let has_extern_repr = self.struct_has_extern_repr;
let inherited_pub_visibility = self.inherited_pub_visibility;
let live_fields = def.fields.iter().filter(|f| {
has_extern_repr || inherited_pub_visibility || match f.node.kind {
ast::NamedField(_, ast::Public) => true,
_ => false
}
});
self.live_symbols.extend(live_fields.map(|f| f.node.id));
visit::walk_struct_def(self, def);
}
fn visit_expr(&mut self, expr: &ast::Expr) {
match expr.node {
ast::ExprMethodCall(..) => {
self.lookup_and_handle_method(expr.id, expr.span);
}
ast::ExprField(ref lhs, ref ident, _) => {
self.handle_field_access(&**lhs, &ident.node);
}
ast::ExprTupField(ref lhs, idx, _) => {
self.handle_tup_field_access(&**lhs, idx.node);
}
_ => ()
}
visit::walk_expr(self, expr);
}
fn visit_pat(&mut self, pat: &ast::Pat) {
let def_map = &self.tcx.def_map;
match pat.node {
ast::PatStruct(_, ref fields, _) => {
self.handle_field_pattern_match(pat, fields.as_slice());
}
_ if pat_util::pat_is_const(def_map, pat) => {
// it might be the only use of a const
self.lookup_and_handle_definition(&pat.id)
}
_ => ()
}
self.ignore_non_const_paths = true;
visit::walk_pat(self, pat);
self.ignore_non_const_paths = false;
}
fn visit_path(&mut self, path: &ast::Path, id: ast::NodeId) {
self.lookup_and_handle_definition(&id);
visit::walk_path(self, path);
}
fn visit_item(&mut self, _: &ast::Item) {
// Do not recurse into items. These items will be added to the
// worklist and recursed into manually if necessary.
}
}
fn has_allow_dead_code_or_lang_attr(attrs: &[ast::Attribute]) -> bool {
if attr::contains_name(attrs.as_slice(), "lang") {
return true;
}
let dead_code = lint::builtin::DEAD_CODE.name_lower();
for attr in lint::gather_attrs(attrs).into_iter() {
match attr {
Ok((ref name, lint::Allow, _))
if name.get() == dead_code.as_slice() => return true,
_ => (),
}
}
false
}
// This visitor seeds items that
// 1) We want to explicitly consider as live:
// * Item annotated with #[allow(dead_code)]
// - This is done so that if we want to suppress warnings for a
// group of dead functions, we only have to annotate the "root".
// For example, if both `f` and `g` are dead and `f` calls `g`,
// then annotating `f` with `#[allow(dead_code)]` will suppress
// warning for both `f` and `g`.
// * Item annotated with #[lang=".."]
// - This is because lang items are always callable from elsewhere.
// or
// 2) We are not sure to be live or not
// * Implementation of a trait method
struct LifeSeeder {
worklist: Vec<ast::NodeId>
}
impl<'v> Visitor<'v> for LifeSeeder {
fn visit_item(&mut self, item: &ast::Item) {
let allow_dead_code = has_allow_dead_code_or_lang_attr(item.attrs.as_slice());
if allow_dead_code {
self.worklist.push(item.id);
}
match item.node {
ast::ItemEnum(ref enum_def, _) if allow_dead_code => {
self.worklist.extend(enum_def.variants.iter().map(|variant| variant.node.id));
}
ast::ItemImpl(_, Some(ref _trait_ref), _, ref impl_items) => {
for impl_item in impl_items.iter() {
match *impl_item {
ast::MethodImplItem(ref method) => {
self.worklist.push(method.id);
}
ast::TypeImplItem(_) => {}
}
}
}
_ => ()
}
visit::walk_item(self, item);
}
fn visit_fn(&mut self, fk: visit::FnKind<'v>,
_: &'v ast::FnDecl, block: &'v ast::Block,
_: codemap::Span, id: ast::NodeId) {
// Check for method here because methods are not ast::Item
match fk {
visit::FkMethod(_, _, method) => {
if has_allow_dead_code_or_lang_attr(method.attrs.as_slice()) {
self.worklist.push(id);
}
}
_ => ()
}
visit::walk_block(self, block);
}
}
fn create_and_seed_worklist(tcx: &ty::ctxt,
exported_items: &privacy::ExportedItems,
reachable_symbols: &NodeSet,
krate: &ast::Crate) -> Vec<ast::NodeId> {
let mut worklist = Vec::new();
// Preferably, we would only need to seed the worklist with reachable
// symbols. However, since the set of reachable symbols differs
// depending on whether a crate is built as bin or lib, and we want
// the warning to be consistent, we also seed the worklist with
// exported symbols.
for id in exported_items.iter() {
worklist.push(*id);
}
for id in reachable_symbols.iter() {
worklist.push(*id);
}
// Seed entry point
match *tcx.sess.entry_fn.borrow() {
Some((id, _)) => worklist.push(id),
None => ()
}
// Seed implemented trait methods
let mut life_seeder = LifeSeeder {
worklist: worklist
};
visit::walk_crate(&mut life_seeder, krate);
return life_seeder.worklist;
}
fn find_live(tcx: &ty::ctxt,
exported_items: &privacy::ExportedItems,
reachable_symbols: &NodeSet,
krate: &ast::Crate)
-> Box<HashSet<ast::NodeId>> {
let worklist = create_and_seed_worklist(tcx, exported_items,
reachable_symbols, krate);
let mut symbol_visitor = MarkSymbolVisitor::new(tcx, worklist);
symbol_visitor.mark_live_symbols();
symbol_visitor.live_symbols
}
fn get_struct_ctor_id(item: &ast::Item) -> Option<ast::NodeId> {
match item.node {
ast::ItemStruct(ref struct_def, _) => struct_def.ctor_id,
_ => None
}
}
struct DeadVisitor<'a, 'tcx: 'a> {
tcx: &'a ty::ctxt<'tcx>,
live_symbols: Box<HashSet<ast::NodeId>>,
}
impl<'a, 'tcx> DeadVisitor<'a, 'tcx> {
fn should_warn_about_item(&mut self, item: &ast::Item) -> bool {
let should_warn = match item.node {
ast::ItemStatic(..)
| ast::ItemConst(..)
| ast::ItemFn(..)
| ast::ItemEnum(..)
| ast::ItemStruct(..) => true,
_ => false
};
let ctor_id = get_struct_ctor_id(item);
should_warn && !self.symbol_is_live(item.id, ctor_id)
}
fn should_warn_about_field(&mut self, node: &ast::StructField_) -> bool {
let is_named = node.ident().is_some();
let field_type = ty::node_id_to_type(self.tcx, node.id);
let is_marker_field = match ty::ty_to_def_id(field_type) {
Some(def_id) => self.tcx.lang_items.items().any(|(_, item)| *item == Some(def_id)),
_ => false
};
is_named
&& !self.symbol_is_live(node.id, None)
&& !is_marker_field
&& !has_allow_dead_code_or_lang_attr(node.attrs.as_slice())
}
fn should_warn_about_variant(&mut self, variant: &ast::Variant_) -> bool {
!self.symbol_is_live(variant.id, None)
&& !has_allow_dead_code_or_lang_attr(variant.attrs.as_slice())
}
// id := node id of an item's definition.
// ctor_id := `Some` if the item is a struct_ctor (tuple struct),
// `None` otherwise.
// If the item is a struct_ctor, then either its `id` or
// `ctor_id` (unwrapped) is in the live_symbols set. More specifically,
// DefMap maps the ExprPath of a struct_ctor to the node referred by
// `ctor_id`. On the other hand, in a statement like
// `type <ident> <generics> = <ty>;` where <ty> refers to a struct_ctor,
// DefMap maps <ty> to `id` instead.
fn symbol_is_live(&mut self, id: ast::NodeId,
ctor_id: Option<ast::NodeId>) -> bool {
if self.live_symbols.contains(&id)
|| ctor_id.map_or(false,
|ctor| self.live_symbols.contains(&ctor)) {
return true;
}
// If it's a type whose methods are live, then it's live, too.
// This is done to handle the case where, for example, the static
// method of a private type is used, but the type itself is never
// called directly.
let impl_items = self.tcx.impl_items.borrow();
match self.tcx.inherent_impls.borrow().get(&local_def(id)) {
None => (),
Some(impl_list) => {
for impl_did in impl_list.iter() {
for item_did in (*impl_items)[*impl_did].iter() {
if self.live_symbols.contains(&item_did.def_id()
.node) {
return true;
}
}
}
}
}
false
}
fn warn_dead_code(&mut self,
id: ast::NodeId,
span: codemap::Span,
ident: ast::Ident,
node_type: &str) {
let name = ident.as_str();
if !name.starts_with("_") {
self.tcx
.sess
.add_lint(lint::builtin::DEAD_CODE,
id,
span,
format!("{} is never used: `{}`", node_type, name));
}
}
}
impl<'a, 'tcx, 'v> Visitor<'v> for DeadVisitor<'a, 'tcx> {
fn visit_item(&mut self, item: &ast::Item) {
if self.should_warn_about_item(item) {
self.warn_dead_code(item.id, item.span, item.ident, item.node.descriptive_variant());
} else {
match item.node {
ast::ItemEnum(ref enum_def, _) => {
for variant in enum_def.variants.iter() {
if self.should_warn_about_variant(&variant.node) {
self.warn_dead_code(variant.node.id, variant.span,
variant.node.name, "variant");
}
}
},
_ => ()
}
}
visit::walk_item(self, item);
}
fn visit_foreign_item(&mut self, fi: &ast::ForeignItem) {
if !self.symbol_is_live(fi.id, None) {
self.warn_dead_code(fi.id, fi.span, fi.ident, fi.node.descriptive_variant());
}
visit::walk_foreign_item(self, fi);
}
<|fim▁hole|> match fk {
visit::FkMethod(name, _, _) => {
if !self.symbol_is_live(id, None) {
self.warn_dead_code(id, span, name, "method");
}
}
_ => ()
}
visit::walk_block(self, block);
}
fn visit_struct_field(&mut self, field: &ast::StructField) {
if self.should_warn_about_field(&field.node) {
self.warn_dead_code(field.node.id, field.span,
field.node.ident().unwrap(), "struct field");
}
visit::walk_struct_field(self, field);
}
// Overwrite so that we don't warn the trait method itself.
fn visit_trait_item(&mut self, trait_method: &ast::TraitItem) {
match *trait_method {
ast::ProvidedMethod(ref method) => {
visit::walk_block(self, &*method.pe_body())
}
ast::RequiredMethod(_) => {}
ast::TypeTraitItem(_) => {}
}
}
}
pub fn check_crate(tcx: &ty::ctxt,
exported_items: &privacy::ExportedItems,
reachable_symbols: &NodeSet) {
let krate = tcx.map.krate();
let live_symbols = find_live(tcx, exported_items,
reachable_symbols, krate);
let mut visitor = DeadVisitor { tcx: tcx, live_symbols: live_symbols };
visit::walk_crate(&mut visitor, krate);
}<|fim▁end|> | fn visit_fn(&mut self, fk: visit::FnKind<'v>,
_: &'v ast::FnDecl, block: &'v ast::Block,
span: codemap::Span, id: ast::NodeId) {
// Have to warn method here because methods are not ast::Item |
<|file_name|>quickcheck_prim.rs<|end_file_name|><|fim▁begin|>#![feature(plugin, float_extras)]
#![plugin(quickcheck_macros)]
extern crate quickcheck;
extern crate float;
use float::Float;
use quickcheck::TestResult;
use std::fmt::Display;
fn assert_eq<T: Copy + PartialEq + Display + From<Float>>(a: Float, b: T) -> TestResult
where Float: From<T>
{
let f = a.clone().into();
assert!(b == f,
"{} != {}, ({:?} vs. {:?})", f, b, a, Float::from(b));
TestResult::from_bool(true)
}
macro_rules! tests {
($t: ident) => {
mod $t {
use std::$t;
use assert_eq;
use float::Float;
use quickcheck::TestResult;
#[quickcheck]
fn to_from(x: $t) {
let f = Float::from(x);
assert_eq(f, x);
}
#[quickcheck]
fn to_f32_prec(x: $t) {<|fim▁hole|> let g = f.with_precision(24);
assert_eq(g, x as f32);
}
#[quickcheck]
fn to_f64_prec(x: $t) {
let f = Float::from(x);
let g = f.with_precision(53);
assert_eq(g, x as f64);
}
#[quickcheck]
fn add(x: $t, y: $t) {
let (f, g) = (Float::from(x), Float::from(y));
assert_eq(f + g, x + y);
}
#[quickcheck]
fn sub(x: $t, y: $t) {
let (f, g) = (Float::from(x), Float::from(y));
assert_eq(f - g, x - y);
}
#[quickcheck]
fn mul(x: $t, y: $t) {
let (f, g) = (Float::from(x), Float::from(y));
assert_eq(f + g, x + y);
}
#[quickcheck]
fn div(x: $t, y: $t) -> TestResult {
if y == 0.0 { return TestResult::discard() }
let (f, g) = (Float::from(x), Float::from(y));
assert_eq(f / g, x / y)
}
#[quickcheck]
fn sqrt(x: $t) -> TestResult {
if x < 0.0 { return TestResult::discard() }
let f = Float::from(x);
assert_eq(f.sqrt(), x.sqrt())
}
#[quickcheck]
fn next_after(x: $t, target: $t) {
let f = Float::from(x);
let f_target = Float::from(target);
assert_eq(f.clone().next_toward(&f_target),
x.next_after(target));
assert_eq(f.clone().next_toward(&f), x.next_after(x));
}
#[quickcheck]
fn next_above(x: $t) {
let f = Float::from(x);
assert_eq(f.next_above(), x.next_after($t::INFINITY));
}
#[quickcheck]
fn next_below(x: $t) {
let f = Float::from(x);
assert_eq(f.next_below(), x.next_after($t::NEG_INFINITY));
}
#[quickcheck]
fn eq(x: $t, y: $t) {
let f = Float::from(x);
let g = Float::from(y);
let arr = [(&f, x), (&g, y)];
for &(b1, p1) in &arr {
for &(b2, p2) in &arr {
assert_eq!(*b1 == *b1, p1 == p1);
assert_eq!(*b1 == *b2, p1 == p2);
assert_eq!(*b2 == *b1, p2 == p1);
assert_eq!(*b2 == *b2, p2 == p2);
assert_eq!(*b1 == p1, p1 == p1);
assert_eq!(*b1 == p2, p1 == p2);
assert_eq!(*b2 == p1, p2 == p1);
assert_eq!(*b2 == p2, p2 == p2);
assert_eq!(p1 == *b1, p1 == p1);
assert_eq!(p1 == *b2, p1 == p2);
assert_eq!(p2 == *b1, p2 == p1);
assert_eq!(p2 == *b2, p2 == p2);
}
}
}
#[quickcheck]
fn ne(x: $t, y: $t) {
let f = Float::from(x);
let g = Float::from(y);
let arr = [(&f, x), (&g, y)];
for &(b1, p1) in &arr {
for &(b2, p2) in &arr {
assert_eq!(*b1 != *b1, p1 != p1);
assert_eq!(*b1 != *b2, p1 != p2);
assert_eq!(*b2 != *b1, p2 != p1);
assert_eq!(*b2 != *b2, p2 != p2);
assert_eq!(*b1 != p1, p1 != p1);
assert_eq!(*b1 != p2, p1 != p2);
assert_eq!(*b2 != p1, p2 != p1);
assert_eq!(*b2 != p2, p2 != p2);
assert_eq!(p1 != *b1, p1 != p1);
assert_eq!(p1 != *b2, p1 != p2);
assert_eq!(p2 != *b1, p2 != p1);
assert_eq!(p2 != *b2, p2 != p2);
}
}
}
#[quickcheck]
fn partial_cmp(x: $t, y: $t) {
let f = Float::from(x);
let g = Float::from(y);
let arr = [(&f, x), (&g, y)];
for &(b1, p1) in &arr {
for &(b2, p2) in &arr {
assert_eq!(b1.partial_cmp(&*b1), p1.partial_cmp(&p1));
assert_eq!(b1.partial_cmp(&*b2), p1.partial_cmp(&p2));
assert_eq!(b2.partial_cmp(&*b1), p2.partial_cmp(&p1));
assert_eq!(b2.partial_cmp(&*b2), p2.partial_cmp(&p2));
assert_eq!(b1.partial_cmp(&p1), p1.partial_cmp(&p1));
assert_eq!(b1.partial_cmp(&p2), p1.partial_cmp(&p2));
assert_eq!(b2.partial_cmp(&p1), p2.partial_cmp(&p1));
assert_eq!(b2.partial_cmp(&p2), p2.partial_cmp(&p2));
assert_eq!(p1.partial_cmp(b1), p1.partial_cmp(&p1));
assert_eq!(p1.partial_cmp(b2), p1.partial_cmp(&p2));
assert_eq!(p2.partial_cmp(b1), p2.partial_cmp(&p1));
assert_eq!(p2.partial_cmp(b2), p2.partial_cmp(&p2));
}
}
}
#[quickcheck]
fn lt(x: $t, y: $t) {
let f = Float::from(x);
let g = Float::from(y);
let arr = [(&f, x), (&g, y)];
for &(b1, p1) in &arr {
for &(b2, p2) in &arr {
assert_eq!(*b1 < *b1, p1 < p1);
assert_eq!(*b1 < *b2, p1 < p2);
assert_eq!(*b2 < *b1, p2 < p1);
assert_eq!(*b2 < *b2, p2 < p2);
assert_eq!(*b1 < p1, p1 < p1);
assert_eq!(*b1 < p2, p1 < p2);
assert_eq!(*b2 < p1, p2 < p1);
assert_eq!(*b2 < p2, p2 < p2);
assert_eq!(p1 < *b1, p1 < p1);
assert_eq!(p1 < *b2, p1 < p2);
assert_eq!(p2 < *b1, p2 < p1);
assert_eq!(p2 < *b2, p2 < p2);
}
}
}
#[quickcheck]
fn le(x: $t, y: $t) {
let f = Float::from(x);
let g = Float::from(y);
let arr = [(&f, x), (&g, y)];
for &(b1, p1) in &arr {
for &(b2, p2) in &arr {
assert_eq!(*b1 <= *b1, p1 <= p1);
assert_eq!(*b1 <= *b2, p1 <= p2);
assert_eq!(*b2 <= *b1, p2 <= p1);
assert_eq!(*b2 <= *b2, p2 <= p2);
assert_eq!(*b1 <= p1, p1 <= p1);
assert_eq!(*b1 <= p2, p1 <= p2);
assert_eq!(*b2 <= p1, p2 <= p1);
assert_eq!(*b2 <= p2, p2 <= p2);
assert_eq!(p1 <= *b1, p1 <= p1);
assert_eq!(p1 <= *b2, p1 <= p2);
assert_eq!(p2 <= *b1, p2 <= p1);
assert_eq!(p2 <= *b2, p2 <= p2);
}
}
}
#[quickcheck]
fn gt(x: $t, y: $t) {
let f = Float::from(x);
let g = Float::from(y);
let arr = [(&f, x), (&g, y)];
for &(b1, p1) in &arr {
for &(b2, p2) in &arr {
assert_eq!(*b1 > *b1, p1 > p1);
assert_eq!(*b1 > *b2, p1 > p2);
assert_eq!(*b2 > *b1, p2 > p1);
assert_eq!(*b2 > *b2, p2 > p2);
assert_eq!(*b1 > p1, p1 > p1);
assert_eq!(*b1 > p2, p1 > p2);
assert_eq!(*b2 > p1, p2 > p1);
assert_eq!(*b2 > p2, p2 > p2);
assert_eq!(p1 > *b1, p1 > p1);
assert_eq!(p1 > *b2, p1 > p2);
assert_eq!(p2 > *b1, p2 > p1);
assert_eq!(p2 > *b2, p2 > p2);
}
}
}
#[quickcheck]
fn ge(x: $t, y: $t) {
let f = Float::from(x);
let g = Float::from(y);
let arr = [(&f, x), (&g, y)];
for &(b1, p1) in &arr {
for &(b2, p2) in &arr {
assert_eq!(*b1 >= *b1, p1 >= p1);
assert_eq!(*b1 >= *b2, p1 >= p2);
assert_eq!(*b2 >= *b1, p2 >= p1);
assert_eq!(*b2 >= *b2, p2 >= p2);
assert_eq!(*b1 >= p1, p1 >= p1);
assert_eq!(*b1 >= p2, p1 >= p2);
assert_eq!(*b2 >= p1, p2 >= p1);
assert_eq!(*b2 >= p2, p2 >= p2);
assert_eq!(p1 >= *b1, p1 >= p1);
assert_eq!(p1 >= *b2, p1 >= p2);
assert_eq!(p2 >= *b1, p2 >= p1);
assert_eq!(p2 >= *b2, p2 >= p2);
}
}
}
}
}
}
tests!(f32);
tests!(f64);<|fim▁end|> | let f = Float::from(x); |
<|file_name|>app.py<|end_file_name|><|fim▁begin|>from twisted.application.service import Application
from twisted.application.internet import TimerService, TCPServer
from twisted.web import server
from twisted.python import log
from scrapy.utils.misc import load_object
from .interfaces import IEggStorage, IPoller, ISpiderScheduler, IEnvironment
from .launcher import Launcher
from .eggstorage import FilesystemEggStorage
from .scheduler import SpiderScheduler<|fim▁hole|>
def application(config):
app = Application("Scrapyd")
http_port = config.getint('http_port', 6800)
bind_address = config.get('bind_address', '0.0.0.0')
poller = QueuePoller(config)
eggstorage = FilesystemEggStorage(config)
scheduler = SpiderScheduler(config)
environment = Environment(config)
app.setComponent(IPoller, poller)
app.setComponent(IEggStorage, eggstorage)
app.setComponent(ISpiderScheduler, scheduler)
app.setComponent(IEnvironment, environment)
laupath = config.get('launcher', 'scrapyd.launcher.Launcher')
laucls = load_object(laupath)
launcher = laucls(config, app)
timer = TimerService(5, poller.poll)
webservice = TCPServer(http_port, server.Site(Root(config, app)), interface=bind_address)
log.msg("Scrapyd web console available at http://%s:%s/" % (bind_address, http_port))
launcher.setServiceParent(app)
timer.setServiceParent(app)
webservice.setServiceParent(app)
return app<|fim▁end|> | from .poller import QueuePoller
from .environ import Environment
from .website import Root
from .config import Config |
<|file_name|>api-put-object-multipart.go<|end_file_name|><|fim▁begin|>/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
import (
"bytes"
"crypto/md5"
"crypto/sha256"
"encoding/hex"
"encoding/xml"
"fmt"
"hash"
"io"
"io/ioutil"
"net/http"
"net/url"
"os"
"sort"
"strconv"
"strings"
)
// Comprehensive put object operation involving multipart resumable uploads.
//
// Following code handles these types of readers.
//
// - *os.File<|fim▁hole|>// is where each part is re-downloaded, checksummed and verified
// before upload.
func (c Client) putObjectMultipart(bucketName, objectName string, reader io.Reader, size int64, contentType string, progress io.Reader) (n int64, err error) {
if size > 0 && size > minPartSize {
// Verify if reader is *os.File, then use file system functionalities.
if isFile(reader) {
return c.putObjectMultipartFromFile(bucketName, objectName, reader.(*os.File), size, contentType, progress)
}
// Verify if reader is *minio.Object or io.ReaderAt.
// NOTE: Verification of object is kept for a specific purpose
// while it is going to be duck typed similar to io.ReaderAt.
// It is to indicate that *minio.Object implements io.ReaderAt.
// and such a functionality is used in the subsequent code
// path.
if isObject(reader) || isReadAt(reader) {
return c.putObjectMultipartFromReadAt(bucketName, objectName, reader.(io.ReaderAt), size, contentType, progress)
}
}
// For any other data size and reader type we do generic multipart
// approach by staging data in temporary files and uploading them.
return c.putObjectMultipartStream(bucketName, objectName, reader, size, contentType, progress)
}
// putObjectStream uploads files bigger than 5MiB, and also supports
// special case where size is unknown i.e '-1'.
func (c Client) putObjectMultipartStream(bucketName, objectName string, reader io.Reader, size int64, contentType string, progress io.Reader) (n int64, err error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return 0, err
}
if err := isValidObjectName(objectName); err != nil {
return 0, err
}
// Total data read and written to server. should be equal to 'size' at the end of the call.
var totalUploadedSize int64
// Complete multipart upload.
var complMultipartUpload completeMultipartUpload
// A map of all previously uploaded parts.
var partsInfo = make(map[int]objectPart)
// getUploadID for an object, initiates a new multipart request
// if it cannot find any previously partially uploaded object.
uploadID, isNew, err := c.getUploadID(bucketName, objectName, contentType)
if err != nil {
return 0, err
}
// If This session is a continuation of a previous session fetch all
// previously uploaded parts info and as a special case only fetch partsInfo
// for only known upload size.
if !isNew {
// Fetch previously uploaded parts and maximum part size.
partsInfo, err = c.listObjectParts(bucketName, objectName, uploadID)
if err != nil {
return 0, err
}
}
// Calculate the optimal parts info for a given size.
totalPartsCount, partSize, _, err := optimalPartInfo(size)
if err != nil {
return 0, err
}
// Part number always starts with '1'.
partNumber := 1
// Initialize a temporary buffer.
tmpBuffer := new(bytes.Buffer)
for partNumber <= totalPartsCount {
// Choose hash algorithms to be calculated by hashCopyN, avoid sha256
// with non-v4 signature request or HTTPS connection
hashSums := make(map[string][]byte)
hashAlgos := make(map[string]hash.Hash)
hashAlgos["md5"] = md5.New()
if c.signature.isV4() && !c.secure {
hashAlgos["sha256"] = sha256.New()
}
// Calculates hash sums while copying partSize bytes into tmpBuffer.
prtSize, rErr := hashCopyN(hashAlgos, hashSums, tmpBuffer, reader, partSize)
if rErr != nil {
if rErr != io.EOF {
return 0, rErr
}
}
var reader io.Reader
// Update progress reader appropriately to the latest offset
// as we read from the source.
reader = newHook(tmpBuffer, progress)
part, ok := partsInfo[partNumber]
// Verify if part should be uploaded.
if !ok || shouldUploadPart(objectPart{
ETag: hex.EncodeToString(hashSums["md5"]),
PartNumber: partNumber,
Size: prtSize,
}, uploadPartReq{PartNum: partNumber, Part: &part}) {
// Proceed to upload the part.
var objPart objectPart
objPart, err = c.uploadPart(bucketName, objectName, uploadID, reader, partNumber, hashSums["md5"], hashSums["sha256"], prtSize)
if err != nil {
// Reset the temporary buffer upon any error.
tmpBuffer.Reset()
return totalUploadedSize, err
}
// Save successfully uploaded part metadata.
partsInfo[partNumber] = objPart
} else {
// Update the progress reader for the skipped part.
if progress != nil {
if _, err = io.CopyN(ioutil.Discard, progress, prtSize); err != nil {
return totalUploadedSize, err
}
}
}
// Reset the temporary buffer.
tmpBuffer.Reset()
// Save successfully uploaded size.
totalUploadedSize += prtSize
// Increment part number.
partNumber++
// For unknown size, Read EOF we break away.
// We do not have to upload till totalPartsCount.
if size < 0 && rErr == io.EOF {
break
}
}
// Verify if we uploaded all the data.
if size > 0 {
if totalUploadedSize != size {
return totalUploadedSize, ErrUnexpectedEOF(totalUploadedSize, size, bucketName, objectName)
}
}
// Loop over total uploaded parts to save them in
// Parts array before completing the multipart request.
for i := 1; i < partNumber; i++ {
part, ok := partsInfo[i]
if !ok {
return 0, ErrInvalidArgument(fmt.Sprintf("Missing part number %d", i))
}
complMultipartUpload.Parts = append(complMultipartUpload.Parts, completePart{
ETag: part.ETag,
PartNumber: part.PartNumber,
})
}
// Sort all completed parts.
sort.Sort(completedParts(complMultipartUpload.Parts))
_, err = c.completeMultipartUpload(bucketName, objectName, uploadID, complMultipartUpload)
if err != nil {
return totalUploadedSize, err
}
// Return final size.
return totalUploadedSize, nil
}
// initiateMultipartUpload - Initiates a multipart upload and returns an upload ID.
func (c Client) initiateMultipartUpload(bucketName, objectName, contentType string) (initiateMultipartUploadResult, error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return initiateMultipartUploadResult{}, err
}
if err := isValidObjectName(objectName); err != nil {
return initiateMultipartUploadResult{}, err
}
// Initialize url queries.
urlValues := make(url.Values)
urlValues.Set("uploads", "")
if contentType == "" {
contentType = "application/octet-stream"
}
// Set ContentType header.
customHeader := make(http.Header)
customHeader.Set("Content-Type", contentType)
reqMetadata := requestMetadata{
bucketName: bucketName,
objectName: objectName,
queryValues: urlValues,
customHeader: customHeader,
}
// Execute POST on an objectName to initiate multipart upload.
resp, err := c.executeMethod("POST", reqMetadata)
defer closeResponse(resp)
if err != nil {
return initiateMultipartUploadResult{}, err
}
if resp != nil {
if resp.StatusCode != http.StatusOK {
return initiateMultipartUploadResult{}, httpRespToErrorResponse(resp, bucketName, objectName)
}
}
// Decode xml for new multipart upload.
initiateMultipartUploadResult := initiateMultipartUploadResult{}
err = xmlDecoder(resp.Body, &initiateMultipartUploadResult)
if err != nil {
return initiateMultipartUploadResult, err
}
return initiateMultipartUploadResult, nil
}
// uploadPart - Uploads a part in a multipart upload.
func (c Client) uploadPart(bucketName, objectName, uploadID string, reader io.Reader, partNumber int, md5Sum, sha256Sum []byte, size int64) (objectPart, error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return objectPart{}, err
}
if err := isValidObjectName(objectName); err != nil {
return objectPart{}, err
}
if size > maxPartSize {
return objectPart{}, ErrEntityTooLarge(size, maxPartSize, bucketName, objectName)
}
if size <= -1 {
return objectPart{}, ErrEntityTooSmall(size, bucketName, objectName)
}
if partNumber <= 0 {
return objectPart{}, ErrInvalidArgument("Part number cannot be negative or equal to zero.")
}
if uploadID == "" {
return objectPart{}, ErrInvalidArgument("UploadID cannot be empty.")
}
// Get resources properly escaped and lined up before using them in http request.
urlValues := make(url.Values)
// Set part number.
urlValues.Set("partNumber", strconv.Itoa(partNumber))
// Set upload id.
urlValues.Set("uploadId", uploadID)
reqMetadata := requestMetadata{
bucketName: bucketName,
objectName: objectName,
queryValues: urlValues,
contentBody: reader,
contentLength: size,
contentMD5Bytes: md5Sum,
contentSHA256Bytes: sha256Sum,
}
// Execute PUT on each part.
resp, err := c.executeMethod("PUT", reqMetadata)
defer closeResponse(resp)
if err != nil {
return objectPart{}, err
}
if resp != nil {
if resp.StatusCode != http.StatusOK {
return objectPart{}, httpRespToErrorResponse(resp, bucketName, objectName)
}
}
// Once successfully uploaded, return completed part.
objPart := objectPart{}
objPart.Size = size
objPart.PartNumber = partNumber
// Trim off the odd double quotes from ETag in the beginning and end.
objPart.ETag = strings.TrimPrefix(resp.Header.Get("ETag"), "\"")
objPart.ETag = strings.TrimSuffix(objPart.ETag, "\"")
return objPart, nil
}
// completeMultipartUpload - Completes a multipart upload by assembling previously uploaded parts.
func (c Client) completeMultipartUpload(bucketName, objectName, uploadID string, complete completeMultipartUpload) (completeMultipartUploadResult, error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return completeMultipartUploadResult{}, err
}
if err := isValidObjectName(objectName); err != nil {
return completeMultipartUploadResult{}, err
}
// Initialize url queries.
urlValues := make(url.Values)
urlValues.Set("uploadId", uploadID)
// Marshal complete multipart body.
completeMultipartUploadBytes, err := xml.Marshal(complete)
if err != nil {
return completeMultipartUploadResult{}, err
}
// Instantiate all the complete multipart buffer.
completeMultipartUploadBuffer := bytes.NewReader(completeMultipartUploadBytes)
reqMetadata := requestMetadata{
bucketName: bucketName,
objectName: objectName,
queryValues: urlValues,
contentBody: completeMultipartUploadBuffer,
contentLength: int64(len(completeMultipartUploadBytes)),
contentSHA256Bytes: sum256(completeMultipartUploadBytes),
}
// Execute POST to complete multipart upload for an objectName.
resp, err := c.executeMethod("POST", reqMetadata)
defer closeResponse(resp)
if err != nil {
return completeMultipartUploadResult{}, err
}
if resp != nil {
if resp.StatusCode != http.StatusOK {
return completeMultipartUploadResult{}, httpRespToErrorResponse(resp, bucketName, objectName)
}
}
// Read resp.Body into a []bytes to parse for Error response inside the body
var b []byte
b, err = ioutil.ReadAll(resp.Body)
if err != nil {
return completeMultipartUploadResult{}, err
}
// Decode completed multipart upload response on success.
completeMultipartUploadResult := completeMultipartUploadResult{}
err = xmlDecoder(bytes.NewReader(b), &completeMultipartUploadResult)
if err != nil {
// xml parsing failure due to presence an ill-formed xml fragment
return completeMultipartUploadResult, err
} else if completeMultipartUploadResult.Bucket == "" {
// xml's Decode method ignores well-formed xml that don't apply to the type of value supplied.
// In this case, it would leave completeMultipartUploadResult with the corresponding zero-values
// of the members.
// Decode completed multipart upload response on failure
completeMultipartUploadErr := ErrorResponse{}
err = xmlDecoder(bytes.NewReader(b), &completeMultipartUploadErr)
if err != nil {
// xml parsing failure due to presence an ill-formed xml fragment
return completeMultipartUploadResult, err
}
return completeMultipartUploadResult, completeMultipartUploadErr
}
return completeMultipartUploadResult, nil
}<|fim▁end|> | // - *minio.Object
// - Any reader which has a method 'ReadAt()'
//
// If we exhaust all the known types, code proceeds to use stream as |
<|file_name|>printingPens.py<|end_file_name|><|fim▁begin|>from robofab.pens.pointPen import BasePointToSegmentPen
from ufoLib.pointPen import AbstractPointPen
"""
Printing pens print their data. Useful for demos and debugging.
"""
__all__ = ["PrintingPointPen", "PrintingSegmentPen", "SegmentPrintingPointPen"]
class PrintingPointPen(AbstractPointPen):
"""A PointPen that prints every step.
"""
def __init__(self):
self.havePath = False
def beginPath(self):
self.havePath = True
print("pen.beginPath()")
def endPath(self):
self.havePath = False
print("pen.endPath()")
def addPoint(self, pt, segmentType=None, smooth=False, name=None, **kwargs):
assert self.havePath
args = ["(%s, %s)" % (pt[0], pt[1])]
if segmentType is not None:
args.append("segmentType=%r" % segmentType)
if smooth:
args.append("smooth=True")
if name is not None:
args.append("name=%r" % name)
if kwargs:
args.append("**%s" % kwargs)
print("pen.addPoint(%s)" % ", ".join(args))
<|fim▁hole|> print("pen.addComponent(%r, %s)" % (baseGlyphName, tuple(transformation)))
from fontTools.pens.basePen import AbstractPen
class PrintingSegmentPen(AbstractPen):
"""A SegmentPen that prints every step.
"""
def moveTo(self, pt):
print("pen.moveTo(%s)" % (pt,))
def lineTo(self, pt):
print("pen.lineTo(%s)" % (pt,))
def curveTo(self, *pts):
print("pen.curveTo%s" % (pts,))
def qCurveTo(self, *pts):
print("pen.qCurveTo%s" % (pts,))
def closePath(self):
print("pen.closePath()")
def endPath(self):
print("pen.endPath()")
def addComponent(self, baseGlyphName, transformation):
print("pen.addComponent(%r, %s)" % (baseGlyphName, tuple(transformation)))
class SegmentPrintingPointPen(BasePointToSegmentPen):
"""A SegmentPen that pprints every step.
"""
def _flushContour(self, segments):
from pprint import pprint
pprint(segments)
if __name__ == "__main__":
p = SegmentPrintingPointPen()
from robofab.test.test_pens import TestShapes
TestShapes.onCurveLessQuadShape(p)<|fim▁end|> | def addComponent(self, baseGlyphName, transformation):
assert not self.havePath |
<|file_name|>author-routing.module.ts<|end_file_name|><|fim▁begin|>import { NgModule } from '@angular/core';
import { Routes, RouterModule } from '@angular/router';
import { AuthorListComponent } from './authorlist.component';
import { AuthorEditComponent } from './authoredit.component';
const routes: Routes = [
{ path: 'authors', component: AuthorListComponent },
{ path: 'authors/:id', component: AuthorEditComponent }
];<|fim▁hole|>})
export class AuthorRoutingModule { }
export const routedComponents = [AuthorListComponent, AuthorEditComponent];<|fim▁end|> |
@NgModule({
imports: [RouterModule.forChild(routes)],
exports: [RouterModule], |
<|file_name|>index.js<|end_file_name|><|fim▁begin|>/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
var app = {
// Application Constructor
initialize: function() {
this.bindEvents();
},
// Bind Event Listeners
//
// Bind any events that are required on startup. Common events are:
// 'load', 'deviceready', 'offline', and 'online'.
bindEvents: function() {
document.addEventListener('deviceready', this.onDeviceReady, false);
},
// deviceready Event Handler
//<|fim▁hole|> onDeviceReady: function() {
app.receivedEvent('deviceready');
},
// Update DOM on a Received Event
receivedEvent: function(id) {
}
};
app.initialize();<|fim▁end|> | // The scope of 'this' is the event. In order to call the 'receivedEvent'
// function, we must explicitly call 'app.receivedEvent(...);' |
<|file_name|>lib.rs<|end_file_name|><|fim▁begin|>#![allow(dead_code)]
//! # tray\_rust - A Toy Ray Tracer in Rust
//!
//! tray\_rust is a toy physically based ray tracer built off of the techniques
//! discussed in [Physically Based Rendering](http://pbrt.org/). It began life as a port of
//! [tray](https://github.com/Twinklebear/tray) to [Rust](http://www.rust-lang.org) to check out the language.
//! The renderer is currently capable of path tracing, supports triangle meshes (MTL support coming soon),
//! and various physically based material models (including measured data from the
//! [MERL BRDF Database](http://www.merl.com/brdf/)). tray\_rust also supports rigid body animation along
//! B-spline paths and distributed rendering.
//!
//! [](https://travis-ci.org/Twinklebear/tray_rust)
//!
//! ## Running
//!
//! Running and passing `--help` or `-h` will print out options you can pass to the renderer which are documented in the help.
//! For the more complicated use cases I hope to do some write ups and guides on how to use them (e.g. distributed rendering,
//! animation) but this may take a while. I strongly recommend running the release build as the debug version will be very slow.
//!
//! ## Building Your Own Scenes
//!
//! To position and animate objects, the camera and so on the
//! [Blender plugin](https://github.com/Twinklebear/tray_rust_blender) is the easiest to use. However the plugin
//! is still in development and missing some features like setting materials, changing light properties and such so
//! you'll still currently need to do those by hand in the exported JSON file. For materials take a look at
//! the [materials documentation](http://www.willusher.io/tray_rust/tray_rust/material/index.html) for lights
//! you'll likely just want to change the emission color which is an RGB color plus a strength term.
//!
//! Start at the documentation for the [scene module](http://www.willusher.io/tray_rust/tray_rust/scene/index.html),
//! there are also a few example [scenes](https://github.com/Twinklebear/tray_rust/tree/master/scenes) included but not all
//! the models are provided. From a clean `git clone` you should be able to run
//! [cornell\_box.json](https://github.com/Twinklebear/tray_rust/blob/master/scenes/cornell_box.json) and
//! [smallpt.json](https://github.com/Twinklebear/tray_rust/blob/master/scenes/smallpt.json). I plan to add some
//! more simple scenes that show usage of other features like animation to provide examples. The rigid body animation
//! feature is relatively new though so I haven't had time to document it properly yet.
//!
//! ## TODO
//!
//! - More material models (eg. more microfacet models, rough glass, etc.)
//! - Textures
//! - Support for using an OBJ's associated MTL files
//! - Bump mapping
//! - [Subsurface scattering?](http://en.wikipedia.org/wiki/Subsurface_scattering)
//! - [Vertex Connection and Merging?](http://iliyan.com/publications/VertexMerging)
//!
//! ## Sample Renders
//!
//! In the samples the the Buddha, Dragon, Bunny and Lucy statue are from
//! [The Stanford Scanning Repository](http://graphics.stanford.edu/data/3Dscanrep/).
//! The Rust logo model was made by
//! [Nylithius on BlenderArtists](http://blenderartists.org/forum/showthread.php?362836-Rust-language-3D-logo).
//! The Utah teapot used is from [Morgan McGuire's page](http://graphics.cs.williams.edu/data/meshes.xml) and
//! the monkey head is Blender's Suzanne. I've made minor tweaks to some of the models so for convenience
//! you can find versions that can be easily loaded into the sample scenes [here](https://drive.google.com/folderview?id=0B-l_lLEMo1YeflUzUndCd01hOHhRNUhrQUowM3hVd2pCc3JrSXRiS3FQSzRYLWtGcGM0eGc&usp=sharing), though the
//! cube model for the Cornell box scene is included.
//! The materials on the Rust logo, Buddha, Dragon and Lucy are from the
//! [MERL BRDF Database](http://www.merl.com/brdf/).
//!
//! Render times are formatted as hh:mm:ss and were measured using 144 threads on a machine with four
//! [Xeon E7-8890 v3](http://ark.intel.com/products/84685/Intel-Xeon-Processor-E7-8890-v3-45M-Cache-2_50-GHz)
//! CPUs. The machine is an early/engineering sample from Intel so your results may differ, but massive thanks to
//! Intel for the hardware! Some older renders are shown as well without timing since they were
//! run on a different machine.
//!
//! Some more sample renders can be found [here](http://imgur.com/a/3qNBc).
//!
//! <a href="http://i.imgur.com/X5y8oIq.png">
//! <img src="http://i.imgur.com/X5y8oIq.png" alt="Model gallery"
//! style="display:block; max-width:100%; height:auto">
//! </a>
//!
//! 1920x1080, 4096 samples/pixel. Rendering: 00:43:36.45.
//! <|fim▁hole|>//! <a href="http://i.imgur.com/E1ylrZW.png">
//! <img src="http://i.imgur.com/E1ylrZW.png" alt="Rust Logo with friends, disk"
//! style="display:block; max-width:100%; height:auto">
//! </a>
//!
//! 1920x1080, 4096 samples/pixel. Rendering: 00:49:33.514.
//!
extern crate enum_set as enum_set;
extern crate rand;
extern crate byteorder;
extern crate serde_json;
extern crate bspline;
extern crate docopt;
#[macro_use]
extern crate serde_derive;
extern crate scoped_threadpool;
extern crate image;
extern crate bincode;
extern crate mio;
extern crate la;
extern crate light_arena;
pub mod linalg;
pub mod film;
pub mod geometry;
pub mod sampler;
pub mod integrator;
pub mod scene;
pub mod bxdf;
pub mod material;
pub mod light;
pub mod mc;
pub mod partition;
pub mod exec;
pub mod texture;<|fim▁end|> | |
<|file_name|>fasta.py<|end_file_name|><|fim▁begin|>from Bio import SeqIO
def get_proteins_for_db(fastafn, fastadelim, genefield):
"""Runs through fasta file and returns proteins accession nrs, sequences
and evidence levels for storage in lookup DB. Duplicate accessions in
fasta are accepted and removed by keeping only the last one.
"""
records = {acc: (rec, get_record_type(rec)) for acc, rec in
SeqIO.index(fastafn, 'fasta').items()}
proteins = ((x,) for x in records.keys())
sequences = ((acc, str(rec.seq)) for acc, (rec, rtype) in records.items())
desc = ((acc, get_description(rec, rtype)) for acc, (rec, rtype) in records.items() if rtype)
evid = ((acc, get_uniprot_evidence_level(rec, rtype)) for acc, (rec, rtype) in
records.items())
ensgs = [(get_ensg(rec), acc) for acc, (rec, rtype) in records.items()
if rtype == 'ensembl']
def sym_out():
symbols = ((get_symbol(rec, rtype, fastadelim, genefield), acc) for
acc, (rec, rtype) in records.items() if rtype)
othergene = ((get_other_gene(rec, fastadelim, genefield), acc) for acc, (rec, rtype) in records.items()
if not rtype and fastadelim and fastadelim in rec.description)
yield from symbols
yield from othergene
return proteins, sequences, desc, evid, ensgs, [x for x in sym_out()]
def parse_fasta(fn):
with open(fn) as fp:
for record in SeqIO.parse(fp, 'fasta'):
yield record
def get_record_type(record):
dmod = get_decoy_mod_string(record.id)
test_name = record.id
if dmod is not None:
test_name = record.id.replace(dmod, '')
if test_name.split('|')[0] in ['sp', 'tr']:
return 'swiss'
elif test_name[:3] == 'ENS':
return 'ensembl'
else:
return False
def get_decoy_mod_string(protein):
mods = ['tryp_reverse', 'reverse', 'decoy', 'random', 'shuffle']
for mod in mods:
if mod in protein:
if protein.endswith('_{}'.format(mod)):
return '_{}'.format(mod)
elif protein.endswith('{}'.format(mod)):
return mod
elif protein.startswith('{}_'.format(mod)):
return '{}_'.format(mod)
elif protein.startswith('{}'.format(mod)):
return mod
def get_description(record, rectype):
if rectype == 'ensembl':
desc_spl = [x.split(':') for x in record.description.split()]
try:
descix = [ix for ix, x in enumerate(desc_spl) if x[0] == 'description'][0]
except IndexError:
return 'NA'
desc = ' '.join([':'.join(x) for x in desc_spl[descix:]])[12:]
return desc
elif rectype == 'swiss':
desc = []
for part in record.description.split()[1:]:
if len(part.split('=')) > 1:
break
desc.append(part)
return ' '.join(desc)
def get_other_gene(record, fastadelim, genefield):
return record.description.split(fastadelim)[genefield]
def get_genes_pickfdr(fastafn, outputtype, fastadelim, genefield):
"""Called by protein FDR module for both ENSG and e.g. Uniprot"""
for rec in parse_fasta(fastafn):
rtype = get_record_type(rec)
if rtype == 'ensembl' and outputtype == 'ensg':
yield get_ensg(rec)
elif outputtype == 'genename':
yield get_symbol(rec, rtype, fastadelim, genefield)
def get_ensg(record):
fields = [x.split(':') for x in record.description.split()]
try:
return [x[1] for x in fields if x[0] == 'gene' and len(x) == 2][0]
except IndexError:
raise RuntimeError('ENSEMBL detected but cannot find gene ENSG in fasta')
def get_symbol(record, rectype, fastadelim, genefield):
if rectype == 'ensembl':
fields = [x.split(':') for x in record.description.split()]
sym = [x[1] for x in fields if x[0] == 'gene_symbol' and len(x) == 2]
elif rectype == 'swiss':
fields = [x.split('=') for x in record.description.split()]
sym = [x[1] for x in fields if x[0] == 'GN' and len(x) == 2]
elif fastadelim and fastadelim in record.description and genefield:
return record.description.split(fastadelim)[genefield]
else:
return 'NA'
try:<|fim▁hole|> return 'NA'
def get_uniprot_evidence_level(record, rtype):
"""Returns uniprot protein existence evidence level for a fasta header.
Evidence levels are 1-5, but we return 5 - x since sorting still demands
that higher is better."""
if rtype != 'swiss':
return -1
for item in record.description.split():
item = item.split('=')
try:
if item[0] == 'PE' and len(item) == 2:
return 5 - int(item[1])
except IndexError:
continue
return -1<|fim▁end|> | return sym[0]
except IndexError: |
<|file_name|>task.py<|end_file_name|><|fim▁begin|>import argparse
import json
import os
import shutil
from .model import train_and_evaluate
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# File arguments
parser.add_argument(
"--train_file_pattern",
help="GCS location to read training data.",
required=True
)
parser.add_argument(
"--eval_file_pattern",
help="GCS location to read evaluation data.",
required=True
)
parser.add_argument(
"--output_dir",
help="GCS location to write checkpoints and export models.",
required=True
)
parser.add_argument(
"--job-dir",
help="This model ignores this field, but it is required by gcloud.",
default="junk"
)
# Sequence shape hyperparameters
parser.add_argument(
"--seq_len",
help="Number of timesteps to include in each example.",
type=int,
default=30
)<|fim▁hole|> parser.add_argument(
"--train_batch_size",
help="Number of examples in training batch.",
type=int,
default=32
)
parser.add_argument(
"--eval_batch_size",
help="Number of examples in evaluation batch.",
type=int,
default=32
)
parser.add_argument(
"--train_steps",
help="Number of batches to train.",
type=int,
default=1024
)
parser.add_argument(
"--learning_rate",
help="How quickly or slowly we train our model by scaling the gradient.",
type=float,
default=0.1
)
parser.add_argument(
"--start_delay_secs",
help="Number of seconds to wait before first evaluation.",
type=int,
default=60
)
parser.add_argument(
"--throttle_secs",
help="Number of seconds to wait between evaluations.",
type=int,
default=120
)
## LSTM hyperparameters
parser.add_argument(
"--lstm_hidden_units",
help="Hidden layer sizes to use for LSTM.",
type=str,
default="64,32,16"
)
# Parse all arguments
args = parser.parse_args()
arguments = args.__dict__
# Unused args provided by service
arguments.pop("job_dir", None)
arguments.pop("job-dir", None)
# Fix list arguments
arguments["lstm_hidden_units"] = [
int(x) for x in arguments["lstm_hidden_units"].split(",")]
# Append trial_id to path if we are doing hptuning
# This code can be removed if you are not using hyperparameter tuning
arguments["output_dir"] = os.path.join(
arguments["output_dir"],
json.loads(
os.environ.get("TF_CONFIG", "{}")
).get("task", {}).get("trial", "")
)
# Run the model
shutil.rmtree(path=arguments["output_dir"], ignore_errors=True) # start fresh each time
train_and_evaluate(arguments)<|fim▁end|> |
# Training parameters |
<|file_name|>issue-22025.rs<|end_file_name|><|fim▁begin|>// compile-flags: -Cmetadata=aux
pub mod foo {
pub trait Foo {}
pub struct Bar;<|fim▁hole|> impl Foo for Bar {}
}<|fim▁end|> | |
<|file_name|>test.js<|end_file_name|><|fim▁begin|>var sys = require("sys"),
my_http = require("http"),
path = require("path"),
url = require("url"),
filesys = require("fs");
my_http.createServer(function(request,response){
var my_path = url.parse(request.url).pathname;
var full_path = path.join(process.cwd(),my_path);
path.exists(full_path,function(exists){
if(!exists){
response.writeHeader(404, {"Content-Type": "text/plain"});
response.write("404 Not Found\n");
response.end();<|fim▁hole|> if(err) {
response.writeHeader(500, {"Content-Type": "text/plain"});
response.write(err + "\n");
response.end();
}
else{
response.writeHeader(200);
response.write(file, "binary");
response.end();
}
});
}
});
}).listen(8080);
sys.puts("Server Running on 8080");<|fim▁end|> | }
else{
filesys.readFile(full_path, "binary", function(err, file) { |
<|file_name|>tickets.js<|end_file_name|><|fim▁begin|>/**
* Setup (required for Joomla! 3)
*/
if(typeof(akeeba) == 'undefined') {
var akeeba = {};
}
if(typeof(akeeba.jQuery) == 'undefined') {
akeeba.jQuery = jQuery.noConflict();
}
akeeba.jQuery(document).ready(function($){
function atsAssignmentClick()
{
var parent = akeeba.jQuery(this).parent('td');
var id = akeeba.jQuery(this).parents('td').find('input.ticket_id').val();
var hide = ['.loading img', '.loading .icon-warning-sign'];
var show = ['.loading .icon-ok'];
var assign_to = 0;
if(akeeba.jQuery(this).hasClass('assignme'))
{
assign_to = akeeba.jQuery('#user').val();
}
else if(akeeba.jQuery(this).parent('.assignto'))
{
assign_to = akeeba.jQuery(this).parent().find('input.assignto').val();
}
if(this.hasClass('unassign')){
hide.push('.unassign');
show.push('.assignme');
}
else{
hide.push('.assignme');
show.push('.unassign');
}
var structure = {
_rootElement: this,
type: "POST",
dataType: 'json',
url : ATS_ROOT_URL + 'index.php?option=com_ats&view=ticket&format=json&' + akeeba.jQuery('#token').attr('name') + '=1',
data: {
'task' : 'assign',
'id' : id,
'assigned_to' : assign_to
},
beforeSend: function() {
var wait = akeeba.jQuery(this._rootElement).parents('td').find('.loading');
wait.css('display','inline').find('i').css('display', 'none');
wait.find('img').css('display', 'inline-block');
},
success: function(responseJSON)
{
var assigned = akeeba.jQuery(this._rootElement).parents('td').find('.assigned_to');
var unassign = akeeba.jQuery(this._rootElement).hasClass('unassign');
if(responseJSON.result == true){
assigned.html(responseJSON.assigned);
unassign ? assigned.removeClass('badge-info') : assigned.addClass('badge-info');
for (var i = 0; i < hide.length; i++)
{
var elementDefinition = hide[i];
akeeba.jQuery(this._rootElement).parents('td').find(elementDefinition).css('display', 'none');
}
for (var i = 0; i < show.length; i++)
{
var elementDefinition = show[i];
akeeba.jQuery(this._rootElement).parents('td').find(elementDefinition).css('display', 'inline-block');
}
}
else
{<|fim▁hole|> wait.find('.icon-warning-sign').show('fast');
}
}
};
akeeba.jQuery.ajax( structure );
}
akeeba.jQuery('.unassign a').click(atsAssignmentClick);
akeeba.jQuery('.assignme a').click(atsAssignmentClick);
akeeba.jQuery('.assignto li a').click(atsAssignmentClick);
akeeba.jQuery('.select-status li a').click(function(){
var image = akeeba.jQuery(this).parent().find('img');
var self = this;
akeeba.jQuery.ajax(ATS_ROOT_URL + 'index.php?option=com_ats&view=tickets&task=ajax_set_status&format=json&'+jQuery('#token').attr('name')+'=1',{
type : 'POST',
dataType : 'json',
data : {
'id' : akeeba.jQuery(this).parents('tr').find('.ats_ticket_id').val(),
'status' : akeeba.jQuery(this).data('status')
},
beforeSend : function(){
image.show();
},
success : function(responseJSON){
image.hide();
if(responseJSON.err){
alert(responseJSON.err);
}
else{
var label = akeeba.jQuery(self).parents('td').find('span[class*="label-"]');
label.attr('class', 'ats-status label pull-right ' + responseJSON.ats_class).html(responseJSON.msg);
}
}
})
})
});<|fim▁end|> | var wait = akeeba.jQuery(this._rootElement).parents('td').find('.loading');
wait.find('.icon-ok,img').css('display', 'none'); |
<|file_name|>setup.py<|end_file_name|><|fim▁begin|>from setuptools import setup, find_packages
from fccsmap import __version__
test_requirements = []
with open('requirements-test.txt') as f:
test_requirements = [r for r in f.read().splitlines()]
setup(
name='fccsmap',
version=__version__,
author='Joel Dubowy',
license='GPLv3+',
author_email='[email protected]',
packages=find_packages(),
scripts=[
'bin/fccsmap'
],
package_data={
'fccsmap': ['data/*.nc']
},<|fim▁hole|> "Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)",
"Programming Language :: Python :: 3.8",
"Operating System :: POSIX",
"Operating System :: MacOS"
],
url='https://github.com/pnwairfire/fccsmap/',
description='supports the look-up of FCCS fuelbed information by lat/lng or vector geo spatial data.',
install_requires=[
"afscripting>=2.0.0",
# Note: numpy and gdal must now be installed manually beforehand
"shapely==1.7.1",
"pyproj==3.0.0.post1",
"rasterstats==0.15.0"
],
dependency_links=[
"https://pypi.airfire.org/simple/afscripting/",
],
tests_require=test_requirements
)<|fim▁end|> | classifiers=[ |
<|file_name|>new-account-form.service.js<|end_file_name|><|fim▁begin|>(function() {
"use strict";
angular.module("blocktrail.setup")
.factory("newAccountFormService", function($log, $http, $q, _, cryptoJS, device, CONFIG, launchService, settingsService, trackingService) {
return new NewAccountFormService($log, $http, $q, _, cryptoJS, device, CONFIG, launchService, settingsService, trackingService);
}
);
function NewAccountFormService($log, $http, $q, _, cryptoJS, device, CONFIG, launchService, settingsService, trackingService) {
var self = this;
self._$log = $log;
self._$http = $http;
self._$q = $q;
self._lodash = _;
self._cryptoJS = cryptoJS;
self._device = device || {};
self._CONFIG = CONFIG;
self._launchService = launchService;
self._settingsService = settingsService;
self._trackingService = trackingService;
}
/**
* Register
* @param data
* @return { promise }
*/
NewAccountFormService.prototype.register = function(data) {
var self = this;
var postData = {
username: null,
email: data.email,
password: self._cryptoJS.SHA512(data.password).toString(),
password_score: data.passwordCheck && data.passwordCheck.score || 0,
platform: ionic.Platform.isIOS() ? "iOS" : "Android",
version: self._CONFIG.VERSION || self._CONFIG.VERSION_REV,
device_uuid: self._device.uuid,
device_name: (self._device.platform || self._device.model) ? ([self._device.platform, self._device.model].clean().join(" / ")) : "Unknown Device",
super_secret: null,
powtcha: null,
browser_fingerprint: null,
skip_two_factor: true, // will make the resulting API key not require 2FA in the future
captcha : window.captchaToken
};
var url = self._CONFIG.API_URL + "/v1/" + data.networkType + "/mywallet/register";
self._$log.debug("M:SETUP:newAccountFormService: register", postData.email, postData.platform, postData.device_name);
return self._$http.post(url, postData)
.then(self._trackEvent.bind(self))
.then(self._setAccountInfo.bind(self))
.catch(self._errorHandler.bind(self));
};
/**
* @param response
* @return response
* @private
*/
NewAccountFormService.prototype._trackEvent = function(response) {
var self = this;
self._trackingService.trackEvent(self._trackingService.EVENTS.SIGN_UP);
return response;
};
/**
* Set the account info
* @param response
* @return { promise }
* @private
*/
NewAccountFormService.prototype._setAccountInfo = function(response) {
var self = this;
var accountInfo = {
username: response.data.username,
email: response.data.email,
apiKey: response.data.api_key,
apiSecret: response.data.api_secret
};
self._$log.debug("M:SETUP:newAccountFormService:_setAccountInfo", accountInfo);
return self._launchService.setAccountInfo(accountInfo)
.then(function() {
return self._launchService.getAccountInfo();
});
};
/**
* Error handler
* @param error<|fim▁hole|> */
NewAccountFormService.prototype._errorHandler = function(error) {
var self = this;
var response;
var ifr = document.querySelector('#ifr');
ifr.contentWindow.postMessage({a: 1}, '*');
// window.fetchCaptchaToken();
self._$log.debug("M:SETUP:newAccountFormService:_errorHandler", error);
if (error && error.data && error.data.msg.toLowerCase().match(/username exists/)) {
response = "MSG_USERNAME_TAKEN";
} else if (error && error.data && error.data.msg.toLowerCase().match(/already in use/)) {
response = "MSG_EMAIL_TAKEN";
} else if (!!error) {
response = "" + (error.message || error.msg || error.data && error.data.msg || error);
}
return this._$q.reject(response);
};
})();<|fim▁end|> | * @return { promise<string> }
* @private |
<|file_name|>label.rs<|end_file_name|><|fim▁begin|>use Scalar;
use color::{Color, Colorable};
use elmesque::Element;
use graphics::character::CharacterCache;
use label::FontSize;
use theme::Theme;
use ui::GlyphCache;
use widget::{self, Widget, WidgetId};
<|fim▁hole|> text: &'a str,
style: Style,
maybe_parent_id: Option<WidgetId>,
}
/// The styling for a Label's renderable Element.
#[allow(missing_docs, missing_copy_implementations)]
#[derive(Clone, Debug, PartialEq, RustcEncodable, RustcDecodable)]
pub struct Style {
maybe_font_size: Option<FontSize>,
maybe_color: Option<Color>,
}
/// The state to be stored between updates for the Label.
#[derive(Clone, Debug, PartialEq)]
pub struct State(String);
impl<'a> Label<'a> {
/// Construct a new Label widget.
pub fn new(text: &'a str) -> Label<'a> {
Label {
common: widget::CommonBuilder::new(),
text: text,
style: Style::new(),
maybe_parent_id: None,
}
}
/// Set the font size for the label.
#[inline]
pub fn font_size(mut self, size: FontSize) -> Label<'a> {
self.style.maybe_font_size = Some(size);
self
}
}
impl<'a> Widget for Label<'a> {
type State = State;
type Style = Style;
fn common(&self) -> &widget::CommonBuilder { &self.common }
fn common_mut(&mut self) -> &mut widget::CommonBuilder { &mut self.common }
fn unique_kind(&self) -> &'static str { "Label" }
fn init_state(&self) -> State { State(String::new()) }
fn style(&self) -> Style { self.style.clone() }
fn default_width<C: CharacterCache>(&self, theme: &Theme, glyph_cache: &GlyphCache<C>) -> Scalar {
glyph_cache.width(self.style.font_size(theme), self.text)
}
fn default_height(&self, theme: &Theme) -> Scalar {
self.style.font_size(theme) as Scalar
}
/// Update the state of the Label.
fn update<'b, 'c, C>(self, args: widget::UpdateArgs<'b, 'c, Self, C>) -> Option<State>
where C: CharacterCache,
{
let widget::UpdateArgs { prev_state, .. } = args;
let widget::State { state: State(ref string), .. } = *prev_state;
if &string[..] != self.text { Some(State(self.text.to_string())) } else { None }
}
/// Construct an Element for the Label.
fn draw<'b, C>(args: widget::DrawArgs<'b, Self, C>) -> Element
where C: CharacterCache,
{
use elmesque::form::{text, collage};
use elmesque::text::Text;
let widget::DrawArgs { state, style, theme, .. } = args;
let widget::State { state: State(ref string), dim, xy, .. } = *state;
let size = style.font_size(theme);
let color = style.color(theme);
let form = text(Text::from_string(string.clone())
.color(color)
.height(size as f64)).shift(xy[0].floor(), xy[1].floor());
collage(dim[0] as i32, dim[1] as i32, vec![form])
}
}
impl Style {
/// Construct the default Style.
pub fn new() -> Style {
Style {
maybe_color: None,
maybe_font_size: None,
}
}
/// Get the Color for an Element.
pub fn color(&self, theme: &Theme) -> Color {
self.maybe_color.unwrap_or(theme.label_color)
}
/// Get the label font size for an Element.
pub fn font_size(&self, theme: &Theme) -> FontSize {
self.maybe_font_size.unwrap_or(theme.font_size_medium)
}
}
impl<'a> Colorable for Label<'a> {
fn color(mut self, color: Color) -> Self {
self.style.maybe_color = Some(color);
self
}
}<|fim▁end|> | /// Displays some given text centred within a rectangle.
#[derive(Clone, Debug)]
pub struct Label<'a> {
common: widget::CommonBuilder, |
<|file_name|>mimetypes_pt.ts<|end_file_name|><|fim▁begin|><?xml version="1.0" ?><!DOCTYPE TS><TS language="pt" version="2.0">
<context>
<name>Hawaii::SystemPreferences::Preflet</name>
<message>
<location filename="../preflet.cpp" line="58"/>
<source>MIME Types</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../preflet.cpp" line="63"/>
<source>Configure the association between MIME types and programs.</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../preflet.cpp" line="73"/>
<source>mime;types;association;program</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>MimeTypesPreflet</name><|fim▁hole|> </message>
<message>
<location filename="../mimetypespreflet.ui" line="52"/>
<source>Description</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../mimetypespreflet.ui" line="58"/>
<source>Internal name:</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../mimetypespreflet.ui" line="68"/>
<source>TextLabel</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../mimetypespreflet.ui" line="75"/>
<source>Type Name:</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../mimetypespreflet.ui" line="88"/>
<source>Description:</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../mimetypespreflet.ui" line="111"/>
<source>File Recognition</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../mimetypespreflet.ui" line="146"/>
<source>Preferred Application</source>
<translation type="unfinished"/>
</message>
</context>
</TS><|fim▁end|> | <message>
<location filename="../mimetypespreflet.ui" line="14"/>
<source>Form</source>
<translation type="unfinished"/> |
<|file_name|>plugin.min.js<|end_file_name|><|fim▁begin|><|fim▁hole|><|fim▁end|> | HostCMS 6.7 = 9fdd2118a94f53ca1c411a7629edf565 |
<|file_name|>c_game.js<|end_file_name|><|fim▁begin|>function c_game(piece, display, score)
{
this.piece = piece;
this.display = display;
this.score = score;
this.speed = 250;
}
c_game.prototype.reassign = function(y)
{
var x;
var tmp;
while (y > 0)
{
tmp = y - 1;
x = 1;
while (this.display.map[tmp][x] != null)
{
this.display.map[y][x] = this.display.map[tmp][x];
x++;
}<|fim▁hole|> y--;
}
}<|fim▁end|> | |
<|file_name|>fixtures.py<|end_file_name|><|fim▁begin|>from .. import models
import datetime
def typical_user():
username = 'alice'
if not models.User.query.filter_by(username=username).first():
return models.User.register(
username=username,<|fim▁hole|> confirmed=True,
)
def typical_dataset():
pass<|fim▁end|> | password='qqq', |
<|file_name|>GetMailService.java<|end_file_name|><|fim▁begin|>package org.artifactory.ui.rest.service.admin.configuration.mail;
import org.artifactory.api.config.CentralConfigService;
import org.artifactory.descriptor.config.MutableCentralConfigDescriptor;
import org.artifactory.rest.common.service.ArtifactoryRestRequest;
import org.artifactory.rest.common.service.RestResponse;
import org.artifactory.rest.common.service.RestService;
import org.artifactory.ui.rest.model.admin.configuration.mail.MailServer;
import org.artifactory.ui.rest.service.utils.AolUtils;
import org.artifactory.util.HttpUtils;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.config.BeanDefinition;
import org.springframework.context.annotation.Scope;
import org.springframework.stereotype.Component;
/**
* @author Chen Keinan
*/
@Component
@Scope(BeanDefinition.SCOPE_PROTOTYPE)
public class GetMailService implements RestService {
@Autowired
private CentralConfigService centralConfigService;
<|fim▁hole|> @Override
public void execute(ArtifactoryRestRequest request, RestResponse response) {
AolUtils.assertNotAol("GetMail");
String contextUrl = HttpUtils.getServletContextUrl(request.getServletRequest());
MailServer mailServer = getMailServerFromConfigDescriptor(contextUrl);
// update response with mail server model
response.iModel(mailServer);
}
/**
* get mail server from config descriptor and populate data to mail server model
*
* @return mail server model
* @param contextUrl
*/
private MailServer getMailServerFromConfigDescriptor(String contextUrl) {
MutableCentralConfigDescriptor configDescriptor = centralConfigService.getMutableDescriptor();
if (configDescriptor.getMailServer() != null) {
return new MailServer(configDescriptor.getMailServer());
} else {
MailServer mailServer = new MailServer();
mailServer.setArtifactoryUrl(contextUrl);
return mailServer;
}
}
}<|fim▁end|> | |
<|file_name|>FeeValuePO.java<|end_file_name|><|fim▁begin|>package org.sdmlib.openbank.util;
import org.sdmlib.models.pattern.PatternObject;
import org.sdmlib.openbank.FeeValue;
import org.sdmlib.openbank.TransactionTypeEnum;
import org.sdmlib.models.pattern.AttributeConstraint;
import org.sdmlib.models.pattern.Pattern;
import java.math.BigInteger;
import org.sdmlib.openbank.util.BankPO;
import org.sdmlib.openbank.Bank;
import org.sdmlib.openbank.util.FeeValuePO;
public class FeeValuePO extends PatternObject<FeeValuePO, FeeValue>
{
public FeeValueSet allMatches()
{
this.setDoAllMatches(true);
FeeValueSet matches = new FeeValueSet();
while (this.getPattern().getHasMatch())
{
matches.add((FeeValue) this.getCurrentMatch());
this.getPattern().findMatch();
}
return matches;
}
public FeeValuePO(){
newInstance(null);
}
public FeeValuePO(FeeValue... hostGraphObject) {
if(hostGraphObject==null || hostGraphObject.length<1){
return ;
}
newInstance(null, hostGraphObject);
}
public FeeValuePO(String modifier)
{
this.setModifier(modifier);
}
public FeeValuePO createTransTypeCondition(TransactionTypeEnum value)
{
new AttributeConstraint()
.withAttrName(FeeValue.PROPERTY_TRANSTYPE)
.withTgtValue(value)
.withSrc(this)
.withModifier(this.getPattern().getModifier())
.withPattern(this.getPattern());
super.filterAttr();
return this;
}
public FeeValuePO createTransTypeAssignment(TransactionTypeEnum value)
{
new AttributeConstraint()
.withAttrName(FeeValue.PROPERTY_TRANSTYPE)
.withTgtValue(value)
.withSrc(this)
.withModifier(Pattern.CREATE)
.withPattern(this.getPattern());
super.filterAttr();
return this;
}
public TransactionTypeEnum getTransType()
{
if (this.getPattern().getHasMatch())
{
return ((FeeValue) getCurrentMatch()).getTransType();
}
return null;
}
public FeeValuePO withTransType(TransactionTypeEnum value)
{
if (this.getPattern().getHasMatch())
{
((FeeValue) getCurrentMatch()).setTransType(value);
}
return this;
}
public FeeValuePO createPercentCondition(BigInteger value)
{
new AttributeConstraint()
.withAttrName(FeeValue.PROPERTY_PERCENT)
.withTgtValue(value)
.withSrc(this)
.withModifier(this.getPattern().getModifier())
.withPattern(this.getPattern());
super.filterAttr();
return this;
}
public FeeValuePO createPercentAssignment(BigInteger value)
{
new AttributeConstraint()
.withAttrName(FeeValue.PROPERTY_PERCENT)
.withTgtValue(value)
.withSrc(this)
.withModifier(Pattern.CREATE)
.withPattern(this.getPattern());
super.filterAttr();
return this;
}
public BigInteger getPercent()
{
if (this.getPattern().getHasMatch())
{
return ((FeeValue) getCurrentMatch()).getPercent();
}
return null;
}
public FeeValuePO withPercent(BigInteger value)
{
if (this.getPattern().getHasMatch())
{
((FeeValue) getCurrentMatch()).setPercent(value);
}
return this;
}
public BankPO createBankPO()
{
BankPO result = new BankPO(new Bank[]{});
result.setModifier(this.getPattern().getModifier());
super.hasLink(FeeValue.PROPERTY_BANK, result);
return result;
}
public BankPO createBankPO(String modifier)
{
BankPO result = new BankPO(new Bank[]{});
result.setModifier(modifier);
super.hasLink(FeeValue.PROPERTY_BANK, result);
return result;
}
public FeeValuePO createBankLink(BankPO tgt)
{
return hasLinkConstraint(tgt, FeeValue.PROPERTY_BANK);
}
public FeeValuePO createBankLink(BankPO tgt, String modifier)
{
return hasLinkConstraint(tgt, FeeValue.PROPERTY_BANK, modifier);
}
public Bank getBank()
{
if (this.getPattern().getHasMatch())
{
return ((FeeValue) this.getCurrentMatch()).getBank();
}
return null;
}<|fim▁hole|><|fim▁end|> |
} |
<|file_name|>version.go<|end_file_name|><|fim▁begin|>package maintenance
import "github.com/Azure/azure-sdk-for-go/version"
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
// UserAgent returns the UserAgent string to use when sending http.Requests.
func UserAgent() string {
return "Azure-SDK-For-Go/" + Version() + " maintenance/2018-06-01-preview"
}
<|fim▁hole|>}<|fim▁end|> | // Version returns the semantic version (see http://semver.org) of the client.
func Version() string {
return version.Number |
<|file_name|>model_control_one_enabled_Difference_LinearTrend_Seasonal_Hour_NoAR.py<|end_file_name|><|fim▁begin|>import tests.model_control.test_ozone_custom_models_enabled as testmod
<|fim▁hole|><|fim▁end|> |
testmod.build_model( ['Difference'] , ['LinearTrend'] , ['Seasonal_Hour'] , ['NoAR'] ); |
<|file_name|>caldgemm.cpp<|end_file_name|><|fim▁begin|>/**
* CPU side of CALDGEMM implementation.
*
* Copyright 2015:
* - David Rohr ([email protected])
* - Matthias Bach ([email protected])
* - Matthias Kretz ([email protected])
*
* This file is part of CALDGEMM.
*
* CALDGEMM is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* CALDGEMM is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with CALDGEMM. If not, see <http://www.gnu.org/licenses/>.
*/
#include "caldgemm.h"
#include "cmodules/qmalloc.h"
#include "cmodules/affinity.h"
#include "cmodules/qmath.h"
#include <algorithm>
#ifndef _WIN32
#include <syscall.h>
#include <errno.h>
#include <sys/time.h>
#include <sys/resource.h>
#include <unistd.h>
#else
extern "C"
{
void ___chkstk() {}
void __imp__cprintf() {}
}
#endif
#ifdef USE_OLD_HUGE_MALLOC
#include <sys/mman.h>
#include <sys/ipc.h>
#include <sys/shm.h>
#endif
#include <math.h>
#include <emmintrin.h>
#define MPOL_DEFAULT 0
#define MPOL_PREFERRED 1
#define MPOL_BIND 2
#define MPOL_INTERLEAVE 3
#ifndef SHM_HUGETLB
#define SHM_HUGETLB 04000
#endif
#ifdef USE_MKL
#include <mkl_service.h>
#endif
#include <math.h>
#include "cmodules/os_low_level_helper.h"
#ifdef _NO_AFFINITY
#define sched_setaffinity(a, b, c) 0
#endif
#if !defined(USE_GOTO_BLAS) | defined(_WIN32)
extern "C" {
extern int get_num_procs();
int get_num_procs()
{
char* omp_threads = getenv("OMP_NUM_THREADS");
if (omp_threads != NULL) return(atoi(omp_threads));
return(get_number_of_cpu_cores());
}
}
#endif
extern "C" int HPL_CALDGEMM_gpu_height;
int HPL_CALDGEMM_gpu_height = 1024;
#ifdef DEBUG_MSG_TIMED
inline void printelapsedtime(bool reset = false)
{
static int init = 1;
static long long int begin;
if (init == 1 || reset)
{
init = 0;
timespec b;
clock_gettime(CLOCK_REALTIME, &b);
begin = (long long int) b.tv_sec * 1000000 + (long long int) b.tv_nsec / 1000;
}
timespec a;
clock_gettime(CLOCK_REALTIME, &a);
fprintf(STD_OUT, "%lld ", (long long int) a.tv_sec * 1000000 + (long long int) a.tv_nsec / 1000 - begin);
}
#define fprintf(file, ...) {printelapsedtime();fprintf(STD_OUT, __VA_ARGS__);}
#endif
//#define fprintf(file, ...) {fprintf(STD_OUT, "Thread %d ", gettid());fprintf(stderr, __VA_ARGS__);}
caldgemm::caldgemm_config_backend* caldgemm::create_caldgemm_config_backend()
{
return(new caldgemm_config_backend);
}
void caldgemm::caldgemm_config_backend::printConfig(caldgemm::caldgemm_config_backend* oldConfig) {}
caldgemm::caldgemm_config_backend::~caldgemm_config_backend() {}
int caldgemm::caldgemm_config_backend::ParseBackendOptions(unsigned int argc, char** argv)
{
if (argc > 1)
{
fprintf(STD_OUT, "Invalid Backend Options\n");
return(1);
}
return(0);
}
void caldgemm::ResetRatios()
{
for (int i = 0;i < caldgemm::max_linpack_callback_types;i++)
{
linpack_last_mn[i] = -1.;
linpackGPURatios[i] = 1.;
linpackBcastTime[i] = 0;
linpackCPUDGEMMTime[i] = 0;
}
}
caldgemm::caldgemm()
{
caldgemm_initialized = false;
ResetRatios();
avggflops = 0;
avgngflops = 0;
conf_numprocs_real = get_number_of_cpu_cores();
char* omp_threads = getenv("OMP_NUM_THREADS");
if (omp_threads != NULL)
{
conf_numprocs = atoi(omp_threads);
}
else
{
conf_numprocs = conf_numprocs_real;
}
FILE* fp;
fp = fopen("/proc/cpuinfo", "r");
conf_cpufreq = 2100;
if (fp)
{
char tmpbuffer[256];
while (!feof(fp))
{
if (fgets(tmpbuffer, 255, fp) == 0) break;
if (strncmp(tmpbuffer, "cpu MHz", 7) == 0)
{
float tmpval;
char* ptr = tmpbuffer;
while (*(ptr++) != ':');
sscanf(ptr, "%f", &tmpval);
conf_cpufreq = (int) tmpval;
break;
}
}
fclose(fp);
}
matrix_m = (size_t) -1;
matrix_n = (size_t) -1;
pipelineBuffer = 0;
cParam.dynamic_size = 0; //Make Valgrind happy
for (unsigned int i = 0;i < max_devices;i++)
{
dma_fetch_queue_tasks[i].k = (size_t) -1;
for (int j = 0;j < obuffercount;j++)
{
dma_pending[i][j] = false;
}
}
conf_gpushaders = 0;
conf_gpufreq = 0;
warn_wrong_memory_allocation = true;
}
caldgemm::~caldgemm()
{
if (caldgemm_initialized) ExitCALDGEMM();
}
caldgemm::caldgemm_config::caldgemm_config()
{
static const char* EmptyOut = "";
Verify = false;
Disassemble = false;
PrintILKernel = false;
Quiet = true;
DisplayTiming = false;
DeviceNum = -1;
ImprovedScheduler = false;
ImprovedSchedulerBalance = 1;
SimpleGPUQueuing = false;
AlternateSimpleQueuing = false;
AlternateSimpleQueuingMulti = false;
NumDevices = max_devices;
NumActiveDevices = 0;
max_bbuffers = 0;
OpenCLPlatform = 0;
Width = 1024;
Height = 0; //Auto Height, Initialize later
AutoHeight = true;
Iterations = 1;
DstMemory = 'c';
ImplicitDriverSync = -1;
VerboseTiming = false;
AsyncTiming = false;
TabularTiming = false;
Debug = false;
MultiThread = true;
MultiThreadDivide = true;
RereserveLinpackCPU = false;
UseGPU = true;
UseCPU = true;
GPURatio = -1.0;
GPURatioDuringFact = 0.0;
GPURatioMax = 1.0;
GPURatioMarginTime = 0.0;
GPURatioMarginTimeDuringFact = 0.3;
GPURatioLookaheadSizeMod = 0.2;
GPURatioPenalties = 1;
GPURatioPenaltyFactor = 0.9;
DynamicSched = true;
ThirdPhaseDynamicRuns = true;
SecondPhaseDynamicRuns = true;
MemPolicy = true;
DumpMatrix = false;
DivideToGPU = false;
AsyncDMA = true;
KeepBuffersMapped = true;
NoPerformanceWarnings = false;
PinCPU = -1;
ForceNumCPUThreads = 0;
CPUCoreOffset = 0;
PinMainThread = -1;
SpawnGPUThread = -2;
PinDeviceRuntimeThreads = -2;
SlowCPU = false;
LinpackNodes = 0;
LinpackSwapN = NULL;
HPLFactorizeRestrictCPUs = 2;
HPLFactorizeRestrictCallback = NULL;
MPIRank = -1;
PreOut = EmptyOut;
GPUClock = 0;
SmallTiles = 0;
ThreadSaveDriver = 0;
SkipCPUProcessing = false;
OutputThreads = -1;
RepinDuringActiveWaitForEvent = 0;
RepinMainThreadAlways = 0;
SleepDuringActiveWait = -1;
NumaPinning = false;
ThirdPhaseThreshold = 0;
AlternateLookahead = 0;
ParallelDMA = 0;
GroupParallelDMA = 0;
LASWPSleep = 0;
MinimizeCPUPart = 0;
MinimizeCPUDuringFact = 0;
PinBroadcastThread = -1;
UseDMAFetchQueue = 0;
GPU_C = -1;
NoConcurrentKernels = 0;
ForceKernelVariant = -1;
PreallocData = 0;
AsyncSideQueue = false;
AsyncSideQueueBalance = 0;
AsyncDGEMMThreshold = 480;
AsyncDTRSMThreshold = 192;
AsyncDTRSM = false;
AsyncSideQueueUseInactiveDeviceSet = 0;
Use3rdPartyTranspose = false;
CPUInContext = 1;
PipelinedOperation = false;
PipelineDoubleBuffer = false;
for (unsigned int i = 0;i < caldgemm::max_devices;i++)
{
GPUMapping[i] = 0;
PostprocessMapping[i] = -1;
AllocMapping[i] = -1;
DMAMapping[i] = 0;
DeviceNums[i] = i;
}
nExcludeCPUCores = 0;
ExcludeCPUCores = NULL;
ShowConfig = 0;
ShowThreadPinning = 0;
PipelinedMidMarker = 0;
linpack_factorize_function = NULL;
linpack_broadcast_function = NULL;
linpack_swap_function = NULL;
InitBackendArgc();
config_backend = NULL;
}
caldgemm::caldgemm_config::caldgemm_config(const caldgemm::caldgemm_config& other)
{
memcpy(this, &other, sizeof(*this));
InitBackendArgc();
if (other.config_backend)
{
config_backend = other.config_backend->Clone();
}
else
{
config_backend = NULL;
}
}
void caldgemm::caldgemm_config::InitBackendArgc()
{
argc_backend = 1;
argv_backend = (char**) malloc(2 * sizeof(char*));
argv_backend[0] = "backend_options";
argv_backend[1] = NULL;
}
void caldgemm::caldgemm_config::AddBackendArgv(char* option)
{
argv_backend = (char**) realloc(argv_backend, (argc_backend + 2) * sizeof(char*));
argv_backend[argc_backend++] = option;
argv_backend[argc_backend] = NULL;
}
int caldgemm::caldgemm_config::InitializeBackendOptions()
{
int retVal = config_backend->ParseBackendOptions(argc_backend, argv_backend);
free(argv_backend);
InitBackendArgc();
return(retVal);
}
int caldgemm::getcpumask(cpu_set_t* set)
{
int retVal = 0;
for (int i = 0;i < 24;i++)
{
if (CPU_ISSET(i, set)) retVal |= (1 << i);
}
return(retVal);
}
void caldgemm::print_submatrices(double* M, size_t width, size_t height, size_t pitch, size_t subx, size_t suby, size_t stridex, size_t stridey, double* M2)
{
fprintf(STD_OUT, "Matrix %lld x %lld, Subblocks %lld x %lld, Strides: %lld / %lld\n", (long long int) width, (long long int) height, (long long int) subx, (long long int) suby, (long long int) stridex, (long long int) stridey);
for (size_t j = 0;j < height;j += stridey)
{
for (size_t jj = j;jj < j + suby && jj < height;jj++)
{
for (size_t i = 0;i < width;i += stridex)
{
for (size_t ii = i;ii < i + subx && ii < width;ii++)
{
if (M2 != NULL)
{
char tmpcolor[16] = "0";
if (cParam.dynamic_run)
{
if (DGEMM_favor_m)
{
if (jj >= gpu_m - cParam.dynamic_run && ii >= gpu_n - cParam.dynamic_size) sprintf(tmpcolor, "01;33");
}
else
{
if (jj >= gpu_m - cParam.dynamic_size && ii >= gpu_n - cParam.dynamic_run) sprintf(tmpcolor, "01;33");
}
}
if (DGEMM_split_m) //favor splitting m because of consecutive memory
{
if (jj >= matrix_m - cParam.cblas_size || ii >= matrix_n - matrix_n % Config->Height) sprintf(tmpcolor, "01;34");
}
else
{
if (jj >= matrix_m - matrix_m % Config->Height || ii >= matrix_n - cParam.cblas_size) sprintf(tmpcolor, "01;34");
}
size_t k = ((gpu_m + Config->Height - 1) / Config->Height) * ((gpu_n + Config->Height - 1) / Config->Height);
for (int l = 0;l < (int) cParam.dynamic_run2;l++)
{
k--;
size_t cpublockm, cpublockn;
DGEMM_getblocks(k, cpublockm, cpublockn);
while ((DGEMM_favor_m ? (cpublockm * Config->Height >= gpu_m - cParam.dynamic_run && cpublockn * Config->Height >= gpu_n - cParam.dynamic_size) :
(cpublockn * Config->Height >= gpu_n - cParam.dynamic_run && cpublockm * Config->Height >= gpu_m - cParam.dynamic_size)))
{
k--;
DGEMM_getblocks(k, cpublockm, cpublockn);
}
if (jj / Config->Height == cpublockm && ii / Config->Height == cpublockn)
{
sprintf(tmpcolor, "01;35");
}
}
int ok = isDoubleEqual(M[jj * pitch + ii], M2[jj * pitch + ii]);
#ifndef _WIN32
fprintf(STD_OUT, "\33[%sm%d\33[%sm", ok ? "01;32" : "01;31", ok, tmpcolor);
#endif
fprintf(STD_OUT, "%+10.3f\t", M[jj * pitch + ii]);
}
else
{
fprintf(STD_OUT, " %+10.3f\t", M[jj * pitch + ii]);
}
}
}
#ifndef _WIN32
fprintf(STD_OUT, "\33[0m");
#endif
fprintf(STD_OUT, "\n");
}
}
fprintf(STD_OUT, "Done\n");
}
void caldgemm::ensure_omp_thread_pinning(const char* baseName)
{
#ifndef USE_GOTO_BLAS
if (!Config->UseCPU) return;
if (Config->Debug) fprintf(STD_OUT, "Performing OpenMP Blas Thread Pinning\n");
int* cpu_order = new int[conf_numprocs];
if (Config->NumaPinning && conf_numprocs % 4 == 0)
{
cpu_order[0] = 0;
int cpu_num = 1;
int old_divider = conf_numprocs;
if (Config->NumaPinning >= 2) old_divider /= 2;
int divider = old_divider / 2;
do
{
int cpu_num_end = cpu_num;
for (int tmp_num = 0;tmp_num < cpu_num_end;tmp_num++)
{
cpu_order[cpu_num++] = cpu_order[tmp_num] + divider;
}
int cpu_num_end2 = cpu_num;
for (int i = 1;i < old_divider / divider - 1;i++)
{
for (int tmp_num = cpu_num_end;tmp_num < cpu_num_end2;tmp_num++)
{
cpu_order[cpu_num++] = cpu_order[tmp_num] + 2 * i;
}
}
old_divider = divider;
divider = (divider % 2 == 0 && divider % 4 != 0 && divider > 2) ? 2 : divider / 2;
} while (divider > 0);
if (Config->NumaPinning >= 2)
{
for (int i = 0;i < conf_numprocs / 2;i++)
{
cpu_order[i + conf_numprocs / 2] = cpu_order[i] + conf_numprocs / 2;
}
}
if (Config->Debug)
{
for (int i = 0;i < conf_numprocs;i++) fprintf(STD_OUT, "Numa ID %d Core %d\n", i, cpu_order[i]);
}
}
else
{
if (Config->NumaPinning) fprintf(STD_OUT, "NUMA Pinning only available if number of processors is divisible by 4\n");
for (int i = 0;i < conf_numprocs;i++) cpu_order[i] = i;
}
static int nInitialization = 0;
nInitialization++;
cpu_set_t oldaffinity;
sched_getaffinity(0, sizeof(oldaffinity), &oldaffinity);
cpu_set_t noaffinity;
CPU_ZERO(&noaffinity);
for (int i = 0;i < conf_numprocs;i++) CPU_SET(i + Config->CPUCoreOffset, &noaffinity);
sched_setaffinity(0, sizeof(noaffinity), &noaffinity);
setUnknownNames("Unknown - Before OMP Thread Creation");
#pragma omp parallel num_threads(conf_numprocs)
{
int thread_id = omp_get_thread_num();
if (getThreadName(-1, NULL) == NULL)
{
char tmp[128];
sprintf(tmp, "OpenMP Init %d %s%s%s Thread %d", nInitialization, baseName ? "(" : "", baseName ? baseName : "", baseName ? ")" : "", thread_id);
setThreadName(tmp);
}
int localcore = thread_id * 2;
#pragma omp critical
{
int nFreeCores = 0;
bool checkBroadcastCore = Config->ForceNumCPUThreads == 0 || broadcast_cpu_core < Config->ForceNumCPUThreads;
if (thread_id == nFreeCores) localcore = main_blas_core;
nFreeCores++;
for (int i = 0;i < conf_numprocs;i++)
{
if (cpuUsed(cpu_order[i]) == false && (!checkBroadcastCore || cpu_order[i] != broadcast_cpu_core) && cpu_order[i] != main_blas_core)
{
if (thread_id == nFreeCores) localcore = cpu_order[i];
nFreeCores++;
}
}
if (checkBroadcastCore)
{
if (thread_id == nFreeCores) localcore = broadcast_cpu_core;
nFreeCores++;
}
for (int j = 0;j < 2;j++)
{
for (int i = 0;i < conf_numprocs;i++)
{
if (cpuUsed(cpu_order[i]) && cpu_order[i] != main_blas_core)
{
size_t m = matrix_m, n = matrix_n;
matrix_m = matrix_n = (size_t) -1;
bool isDMACore = cpuUsed(cpu_order[i]);
matrix_m = matrix_n = 0;
if (cpuUsed(cpu_order[i])) isDMACore = false;
matrix_m = m;
matrix_n = n;
if ((Config->ParallelDMA != 0 && isDMACore) ^ j)
{
if (thread_id == nFreeCores) localcore = cpu_order[i];
nFreeCores++;
}
}
}
}
}
sched_setaffinity_set_core(localcore + Config->CPUCoreOffset);
if (Config->Debug) fprintf(STD_OUT, "OpenMP BLAS thread %d pinned to core %d\n", thread_id, localcore);
}
setUnknownNames("Unknown OMP Thread");
sched_setaffinity(0, sizeof(oldaffinity), &oldaffinity);
delete[] cpu_order;
#endif
}
int caldgemm::CheckParams()
{
if (Config->PipelinedOperation)
{
fprintf(STD_OUT, "Pipelined Mode not supported by backend!\n");
return(1);
}
return(0);
}
int caldgemm::WaitForCALDGEMMProgress(size_t n)
{
return(0); //Default backend does not support pipelined mode, so we do not have to bother.
}
int caldgemm::InitCALDGEMM(caldgemm_config* pInfo, bool nocalinit)
{
Config = pInfo;
if (Config->ForceNumCPUThreads) conf_numprocs = Config->ForceNumCPUThreads;
#if defined(USE_GOTO_BLAS) & !defined(_WIN32)
else conf_numprocs = get_num_procs();
#endif
#ifdef USE_GOTO_BLAS
if (!Config->Quiet) fprintf(STD_OUT, "Initializing GotoBLAS\n");
gotoblas_init();
#endif
if (Config->Iterations > 1 && Config->UseCPU)
{
fprintf(STD_OUT, "ERROR: Multiple Iterations not supported with CPU enabled\n");
return(1);
}
#ifdef _WIN32
strcpy(hostname, "Win32");
#else
gethostname(hostname, 255);
#endif
#ifdef USE_GOTO_BLAS
sched_getaffinity(0, sizeof(oldcpumask), &oldcpumask); //GotoBLAS has its own thread pinning, store old value here.
#endif
if (Config->PinCPU != -1)
{
for (unsigned int i = 0;i < max_devices;i++) Config->GPUMapping[i] = Config->PinCPU;
}
CPU_ZERO(&gpumask);
if (Config->PinMainThread == -1) Config->PinMainThread = Config->GPUMapping[0];
CPU_SET(Config->PinMainThread + Config->CPUCoreOffset, &gpumask);
if (Config->Debug) fprintf(STD_OUT, "Init Caldgemm, setting CPU mask %X\n", getcpumask(&gpumask));
if (0 != sched_setaffinity(0, sizeof(gpumask), &gpumask))
{
fprintf(STD_OUT, "Error setting CPU affinity\n");
return(1);
}
if (Config->SlowCPU)
{
Config->DynamicSched = false;
Config->SmallTiles = 1;
}
if (SimpleQueuingAvailable() < 3 && Config->AlternateSimpleQueuingMulti)
{
fprintf(STD_OUT, "Alternate Simple Multi Queuing not supported by backend, disabling\n");
Config->AlternateSimpleQueuingMulti = false;
}
if (SimpleQueuingAvailable() < 2 && Config->AlternateSimpleQueuing)
{
fprintf(STD_OUT, "Alternate Simple Queuing not supported by backend, disabling\n");
Config->AlternateSimpleQueuing = false;
}
if (SimpleQueuingAvailable() < 1 && Config->SimpleGPUQueuing)
{
fprintf(STD_OUT, "Simple GPU Queuing not supported by backend, disabling\n");
Config->SimpleGPUQueuing = false;
}
if (PipelinedModeAvailable() < 2 && Config->PipelineDoubleBuffer)
{
fprintf(STD_OUT, "Pipelined mode with double buffering not supported by backend, disabling\n");
Config->PipelineDoubleBuffer = false;
}
if (PipelinedModeAvailable() < 1 && Config->PipelinedOperation)
{
fprintf(STD_OUT, "Pipelined operation not supported by backend, disabling\n");
Config->PipelinedOperation = false;
Config->PipelinedMidMarker = 0;
}
if (AsyncModeAvailable() < 2 && Config->AsyncDTRSM)
{
fprintf(STD_OUT, "Async Side-queue with DTRSM not supported by backend, disabling async DTRSM\n");
Config->AsyncDTRSM = false;
}
if (AsyncModeAvailable() < 1 && Config->AsyncSideQueue)
{
fprintf(STD_OUT, "Async Side-queue not supported by backend, disabling\n");
Config->AsyncSideQueue = false;
}
if (Config->AlternateSimpleQueuingMulti) Config->AlternateSimpleQueuing = true;
if (Config->AlternateSimpleQueuing) Config->SimpleGPUQueuing = true;
if (!Config->SimpleGPUQueuing && Config->PipelinedOperation)
{
fprintf(STD_OUT, "Pipeline Operation requires SimpleGPUQueuing!\n");
return(1);
}
if (Config->SimpleGPUQueuing && !Config->GPU_C)
{
fprintf(STD_OUT, "Simple GPU Queuing requires GPU_C!\n");
return(1);
}
if (!Config->PipelinedOperation)
{
Config->PipelinedMidMarker = 0;
Config->PipelineDoubleBuffer = false;
}
if (Config->MultiThread == false) Config->MultiThreadDivide = false;
if (Config->MultiThread == false || !Config->UseCPU) Config->SpawnGPUThread = -2;
if (Config->ParallelDMA || Config->SimpleGPUQueuing) Config->ImprovedScheduler = true;
if ((Config->AsyncSideQueue || Config->SimpleGPUQueuing) && (Config->GPU_C == 0 || UseInputPthreads() || UseOutputPthreads()))
{
fprintf(STD_OUT, "ASYNC Side queue / Simple GPU Queuing can only work with GPU_C\n");
Config->AsyncSideQueue = false;
}
if (!Config->AsyncSideQueue) Config->AsyncDTRSM = false;
setThreadName(Config->SpawnGPUThread == -2 ? "Main (GPU)" : "Main (CPU)");
#ifndef USE_GOTO_BLAS
if (Config->ParallelDMA && Config->linpack_broadcast_function && (Config->ParallelDMA > Config->AlternateLookahead || Config->DynamicSched))
{
fprintf(STD_OUT, "WARNING: There is a possible thread-pinning collision when using Parallel DMA in multi-node HPL if either Dynamic Scheduling is activated or ParallelDMA > AlternateLookahead\n");
}
#endif
if (CheckParams()) return(1);
if (ValidateRuntime()) return(1);
if (Config->Height == 0) Config->Height = 4096; //Runtime did not set suggested value, so we use the default
if (Config->ImplicitDriverSync == -1) Config->ImplicitDriverSync = 1;
buffersSwitchable = (KernelSettings.transposeA ^ KernelSettings.transposeB);
if (Config->Debug) fprintf(STD_OUT, "Initializing Backend\n");
setUnknownNames("Unknown - Before Runtime Initialization");
if (Config->PinDeviceRuntimeThreads != -2)
{
cpu_set_t affinity;
CPU_ZERO(&affinity);
if (Config->PinDeviceRuntimeThreads == -1) for (int i = 0;i < conf_numprocs;i++) CPU_SET(i + Config->CPUCoreOffset, &affinity);
else CPU_SET(Config->PinDeviceRuntimeThreads + Config->CPUCoreOffset, &affinity);
if (0 != sched_setaffinity(0, sizeof(affinity), &affinity))
{
fprintf(STD_OUT, "Error setting CPU affinity\n");
return(1);
}
}
if (Initialize(nocalinit) || !Config->UseGPU)
{
gpu_available = false;
}
if (!gpu_available)
{
if (!AllowCPUFallback()) return(1);
if (!Config->Quiet && Config->UseGPU) fprintf(STD_OUT, "No GPU available, falling back to CPU\n");
nDevices = 0;
Config->UseGPU = 0;
Config->UseCPU = 1;
Config->KeepBuffersMapped = 0;
}
if (Config->PinDeviceRuntimeThreads != -2 && 0 != sched_setaffinity(0, sizeof(gpumask), &gpumask))
{
fprintf(STD_OUT, "Error setting CPU affinity\n");
return(1);
}
if (Config->ParallelDMA && Config->GroupParallelDMA)
{
for (int i = 0;i < nDevices;i++)
{
if (Config->AllocMapping[i] == -1)
{
fprintf(STD_OUT, "Error during initialization, GroupParallelDMA activated but AllocMapping not set for GPU %d\n", i);
return(1);
}
bool found = false;
for (int j = 0;j < nDevices;j++)
{
if (Config->DMAMapping[j] == Config->AllocMapping[i])
{
found = true;
break;
}
}
if (found == false)
{
fprintf(STD_OUT, "Error during initialization, No DMAMapping thread found that maps to the AllocMapping of GPU %d\n", i);
return(1);
}
}
}
if (CheckDevices()) return(1);
outputthreads = Config->OutputThreads == -1 ? (Config->KeepBuffersMapped || Config->DstMemory == 'g' ? CALDGEMM_OUTPUT_THREADS : CALDGEMM_OUTPUT_THREADS_SLOW) : Config->OutputThreads;
if (Config->UseGPU && InitDevices()) return(1);
min_bbuffers = max_bbuffers;
for (int i = 0;i < nDevices;i++)
{
if (bbuffers[i] < min_bbuffers) min_bbuffers = bbuffers[i];
}
if (!Config->Quiet)
{
if (nDevices)
{
fprintf(STD_OUT, "Running on %d devices with %d bbuffers (%s)\n", nDevices, min_bbuffers, hostname);
}
else
{
fprintf(STD_OUT, "Running on CPU only (%s)\n", hostname);
}
}
int thread = (Config->PinDeviceRuntimeThreads >= 0 ? Config->PinDeviceRuntimeThreads : Config->PinMainThread) + Config->CPUCoreOffset;
setUnknownAffinity(1, &thread);
setUnknownNames("Device Runtime");
if (Config->PinBroadcastThread == -1)
{
int linpackCPU = 0;
while (linpackCPU < conf_numprocs)
{
if (cpuUsed(linpackCPU) == false) break;
linpackCPU++;
}
if (linpackCPU >= conf_numprocs) linpackCPU = 0;
broadcast_cpu_core = linpackCPU;
}
else
{
broadcast_cpu_core = Config->PinBroadcastThread;
}
if (Config->Debug) fprintf(STD_OUT, "Broadcast CPU core set to %d\n", broadcast_cpu_core);
#ifndef USE_GOTO_BLAS //If we do not use GotoBLAS thread pinning determine main blas thread only after determining GPU devices to avoid collisions. Store the thread afterward as for GotoBLAS.
if (Config->UseCPU)
{
if (Config->SpawnGPUThread >= 0)
{
main_blas_core = Config->SpawnGPUThread;
if (Config->PinBroadcastThread == -1 && main_blas_core == broadcast_cpu_core)
{
fprintf(STD_OUT, "Your pinning of the Main CPU thread (Config->SpawnGPUThread) collides with autoselected linpack blas core, please set Config->PinBroadcastThread!");
return(1);
}
}
else
{
main_blas_core = 0;
while ((cpuUsed(main_blas_core) || broadcast_cpu_core == main_blas_core) && main_blas_core < conf_numprocs - 1) main_blas_core++;
}
}
else
{
main_blas_core = Config->PinMainThread;
}
if (Config->Debug) fprintf(STD_OUT, "Pinning Main OpenMP BLAS thread to core %d\n", main_blas_core);
sched_setaffinity_set_core(main_blas_core + Config->CPUCoreOffset);
sched_getaffinity(0, sizeof(oldcpumask), &oldcpumask); //As for GotoBLAS above, store pinning here
#else //Set main blas core for GotoBLAS
for (int i = 0;i < conf_numprocs;i++)
{
main_blas_core = 0;
if (CPU_ISSET(i, &oldcpumask))
{
main_blas_core = i;
break;
}
}
#endif
if (Config->MultiThread && UseOutputPthreads())
{
for (int device_num = 0;device_num < nDevices;device_num++)
{
for (int i = 0;i < (Config->OutputThreads == -1 ? max_outputthreads : Config->OutputThreads);i++)
{
mParam[device_num][i].num_device = device_num;
mParam[device_num][i].cls = this;
mParam[device_num][i].terminate = false;
mParam[device_num][i].nMergeThread = i;
pthread_t thr;
pthread_create(&thr, NULL, merge_wrapper, &mParam[device_num][i]);
while (mParam[device_num][i].mergeThreadMutex[0].Trylock() != EBUSY) mParam[device_num][i].mergeThreadMutex[0].Unlock();
}
}
}
if (Config->MultiThread && UseMutexPerDevice())
{
for (int i = 0;i < nDevices;i++)
{
pthread_mutex_init(&device_mutex[i], NULL);
}
}
sched_setaffinity(0, sizeof(gpumask), &gpumask);
#ifdef CALDGEMM_DIVIDE_STATIC_BUFFER
divide_tmpBuffer = allocDivideBuffer();
#endif
if (Config->AlternateLookahead)
{
pthread_mutex_init(&tilesRemainingMutex, NULL);
alternateLookaheadMutex.Lock();
}
if (Config->MultiThread)
{
linpackParameters.terminate = false;
linpackParameters.linpackMutex[1].Lock();
pthread_t thr;
pthread_create(&thr, NULL, linpack_broadcast_wrapper, this);
if (Config->Debug) fprintf(STD_OUT, "Waiting for linpack slave to start\n");
while (linpackParameters.linpackMutex[0].Trylock() != EBUSY) linpackParameters.linpackMutex[0].Unlock();
pthread_mutex_init(&scheduleMutex, NULL);
divideThreads = 0;
if (Config->MultiThreadDivide && UseInputPthreads())
{
for (int i = 0;i < nDevices;i++)
{
DGEMMTasks[i].mutex_start.Lock();
DGEMMTasks[i].mutex_finished.Lock();
if (Config->GPUMapping[i] == Config->PinMainThread) continue;
int found = 0;
for (int j = 0;j < i;j++)
{
if (Config->GPUMapping[i] == Config->GPUMapping[j])
{
found = 1;
break;
}
}
if (found == 0)
{
pthread_t thr;
dParam[divideThreads].cls = this;
dParam[divideThreads].CPUCore = Config->GPUMapping[i];
dParam[divideThreads].nThread = divideThreads;
dParam[divideThreads].terminate = 0;
pthread_create(&thr, NULL, divide_wrapper, &dParam[divideThreads]);
DGEMMTasks[divideThreads].mutex_finished.Lock();
divideThreads++;
}
}
}
}
for (int l = 0;l < nDevices;l++)
{
for (int i = 0;i < obuffercount;i++) DGEMMPrepareTaskEventReady[l][i] = false;
DGEMMTasks[l].thread_running = 0;
DGEMMTasks[l].skip_device_to = -1;
DGEMMTasks[l].device = l;
}
if (Config->Debug) fprintf(STD_OUT, "Using %d CPU cores at %d MHz, %d GPUs of %d shaders at %d MHz\n", conf_numprocs, conf_cpufreq, nDevices, conf_gpushaders, conf_gpufreq);
ensure_omp_thread_pinning(Config->SpawnGPUThread != -2 ? NULL : "Main");
if (Config->UseCPU)
{
cParam.cls = this;
cParam.terminate = false;
cParam.cblasMutex[0].Lock();
if (Config->MultiThread)
{
pthread_t thr;
pthread_create(&thr, NULL, cblas_wrapper, &cParam);
if (Config->Debug) fprintf(STD_OUT, "Waiting for cblas slave to start\n");
while (cParam.cblasMutex[1].Trylock() != EBUSY) cParam.cblasMutex[1].Unlock();
}
}
if (Config->ParallelDMA && nDevices)
{
DMAThreads.SetNumberOfThreads(nDevices - 1, this, &caldgemm::DMA_wrapper, 1, &Config->DMAMapping[1]);
}
if (Config->ThreadSaveDriver == -1)
{
pthread_mutex_init(&globalDriverLock, NULL);
}
if (Config->UseDMAFetchQueue)
{
for (int i = 0;i < nDevices;i++)
{
pthread_mutex_init(&dma_fetch_queue_tasks[i].mutex, NULL);
}
}
#ifndef _WIN32
if (Config->UseGPU && Config->UseCPU)
{
for (int i = 0;i < conf_numprocs;i++)
{
if (CPU_ISSET(i, &oldcpumask) && cpuUsed(i)) fprintf(STD_OUT, "WARNING: Core %d used by GotoBLAS main thread and CALDGEMM, be sure not to use CPU and GPU at the same time!\n", i);
}
}
#endif
if (Config->MemPolicy)
{
#ifdef _WIN32
#else
unsigned long nodemask = 0xffffff;
syscall(SYS_set_mempolicy, MPOL_INTERLEAVE, &nodemask, sizeof(nodemask) * 8);
#endif
}
if (Config->PreallocData)
{
if (Preallocate()) return(1);
}
/*fprintf(STD_OUT, "Setting FIFO scheduler\n");
sched_param param;
sched_getparam(0, ¶m);
param.sched_priority = 1;
if (0 != sched_setscheduler(0, SCHED_FIFO, ¶m))
{
fprintf(STD_OUT, "Error setting scheduler\n");
return(1);
}*/
//setpriority(PRIO_PROCESS, 0, -20);
if (Config->Debug) fprintf(STD_OUT, "Caldgemm Init complete, setting CPU mask %X\n", getcpumask(&oldcpumask));
sched_setaffinity(0, sizeof(oldcpumask), &oldcpumask);
goto_set_num_threads(conf_numprocs);
if (FinishDataInit()) return(1);
finishData->running = false;
for (int i = 0;i < nDevices;i++) for (int j = 0;j < 2;j++) DGEMMTasks[i].PrepareTasks[j].j = DGEMMTasks[i].PrepareTasks[j].k = 0; //Fix valgrind warning
cParam.cblas_size = cParam.dynamic_run = 0;
nDevicesInitialized = nDevices;
if (Config->NumActiveDevices > 0 && Config->NumActiveDevices < nDevices) nDevices = Config->NumActiveDevices;
caldgemm_initialized = true;
if (Config->ShowConfig) printConfig();
return(0);
}
int caldgemm::broadcastcore()
{
return(broadcast_cpu_core);
}
bool caldgemm::cpuUsed(int cpu)
{
if (Config->UseGPU && cpu == Config->PinMainThread) return(true);
for (int i = 0;i < nDevices;i++)
{
if (UseInputPthreads())
{
int procsreq = 1;
for (int j = i;j < nDevices;j++)
{
if (Config->GPUMapping[i] == Config->GPUMapping[j] && Config->PostprocessMapping[j] == -1) procsreq += outputthreads;
}
if ((Config->MultiThreadDivide ? (cpu >= Config->GPUMapping[i]) : (cpu > Config->GPUMapping[i])) && cpu < Config->GPUMapping[i] + procsreq) return(true);
}
if (UseOutputPthreads())
{
if (Config->PostprocessMapping[i] != -1 && cpu >= Config->PostprocessMapping[i] && cpu < Config->PostprocessMapping[i] + outputthreads) return(true);
}
if (Config->ParallelDMA && matrix_n >= Config->ParallelDMA)
{
if (((matrix_n < Config->GroupParallelDMA || (signed) Config->GroupParallelDMA == -1) ? Config->AllocMapping[i] : Config->DMAMapping[i]) == cpu) return(true);
}
}
for (int i = 0;i < Config->nExcludeCPUCores;i++) if (Config->ExcludeCPUCores[i] == cpu) return(true);
if (Config->PinDeviceRuntimeThreads == cpu) return(true);
return(false);
}
int caldgemm::reserve_cpu_cores()
{
int nthreads = 0;
int mainfound = 0;
if (UseOutputPthreads() || UseInputPthreads() || Config->ParallelDMA || Config->GroupParallelDMA)
{
for (int i = 0;i < nDevices;i++)
{
int offset = 0;
for (int j = 0;j < i;j++)
{
if (Config->GPUMapping[i] == Config->GPUMapping[j] && Config->PostprocessMapping[j] != -1) offset++;
}
if (matrix_n >= Config->ParallelDMA && Config->ParallelDMA != 0)
{
if (matrix_n < Config->GroupParallelDMA)
{
if (Config->AllocMapping[i] != Config->PinMainThread)
{
bool found = false;
for (int j = 0;j < i;j++)
{
if (Config->AllocMapping[j] == Config->AllocMapping[i])
{
found = true;
break;
}
}
if (!found)
{
caldgemm_goto_reserve_cpu(Config->AllocMapping[i], 1);
if (Config->Debug) fprintf(STD_OUT, "Reserving Core %d for Grouped DMA Thread\n", Config->AllocMapping[i]);
nthreads++;
}
}
}
else if (i)
{
caldgemm_goto_reserve_cpu(Config->DMAMapping[i], 1);
if (Config->Debug) fprintf(STD_OUT, "Reserving Core %d for DMA Thread\n", Config->DMAMapping[i]);
nthreads++;
}
}
else if (offset == 0 && Config->MultiThreadDivide && UseInputPthreads())
{
caldgemm_goto_reserve_cpu(Config->GPUMapping[i], 1);
if (Config->Debug) fprintf(STD_OUT, "Reserving Core %d for DivideBuffer\n", Config->GPUMapping[i]);
nthreads++;
if (Config->GPUMapping[i] == Config->PinMainThread) mainfound = 1;
}
if (UseOutputPthreads())
{
for (int j = 0;j < outputthreads;j++)
{
const int merge_core = Config->PostprocessMapping[i] == -1 ? (Config->GPUMapping[i] + 1 + offset * outputthreads + j) : (Config->PostprocessMapping[i] + j);
caldgemm_goto_reserve_cpu(merge_core, 1);
if (Config->Debug) fprintf(STD_OUT, "Reserving Core %d for MergeBuffer\n", merge_core);
}
nthreads += outputthreads;
}
}
}
if (mainfound == 0 || !Config->MultiThreadDivide)
{
caldgemm_goto_reserve_cpu(Config->PinMainThread, 1);
if (Config->Debug) fprintf(STD_OUT, "Reserving Core %d for Main Thread\n", Config->PinMainThread);
if (Config->ForceNumCPUThreads == 0 || Config->PinMainThread < Config->ForceNumCPUThreads) nthreads++;
}
for (int i = 0;i < Config->nExcludeCPUCores;i++)
{
caldgemm_goto_reserve_cpu(Config->ExcludeCPUCores[i], 1);
if (Config->Debug) fprintf(STD_OUT, "Excluding Core %d\n", Config->ExcludeCPUCores[i]);
}
if (Config->ForceNumCPUThreads) nthreads += Config->nExcludeCPUCores;
if (Config->PinDeviceRuntimeThreads >= 0)
{
caldgemm_goto_reserve_cpu(Config->PinDeviceRuntimeThreads, 1);
if (Config->ForceNumCPUThreads == 0 || Config->PinDeviceRuntimeThreads < Config->ForceNumCPUThreads) nthreads++;
nthreads++;
}
if (Config->Debug) fprintf(STD_OUT, "Reserved %d cores\n", nthreads);
return(nthreads);
}
void caldgemm::DMA_wrapper(caldgemm::clsDMAParam* par)
{
{
char tmpName[32];
sprintf(tmpName, "DMA Thread %d", par->threadNum);
setThreadName(tmpName);
}
if (Config->Debug) fprintf(STD_OUT, "DMA wrapper thread %d running\n", par->threadNum);
while(par->WaitForTask())
{
if (Config->Debug) fprintf(STD_OUT, "DMA wrapper thread %d starting processing\n", par->threadNum);
RunCALDGEMMMain(par->threadNum);
}
if (Config->Debug) fprintf(STD_OUT, "DMA wrapper thread %d terminating\n", par->threadNum);
}
void* caldgemm::linpack_broadcast_wrapper(void* arg)
{
return ((caldgemm*) arg)->linpack_broadcast_wrapper_a();
}
void* caldgemm::linpack_broadcast_wrapper_a()
{
setThreadName("Linpack Broadcast Wrapper");
if (Config->Debug) fprintf(STD_OUT, "Linpack broadcast helper thread started\n");
int linpackCPU = broadcast_cpu_core;
if (linpackCPU >= conf_numprocs_real) linpackCPU = 0;
if (Config->Debug) fprintf(STD_OUT, "Linpack Thread, core %d\n", linpackCPU);
sched_setaffinity_set_core(linpackCPU + Config->CPUCoreOffset);
linpackParameters.linpackMutex[0].Lock();
while (linpackParameters.linpackMutex[0].Lock() == 0 && linpackParameters.terminate == false)
{
Timers.LinpackTimer2.Start();
Config->linpack_broadcast_function();
Timers.LinpackTimer2.Stop();
Timers.BcastTimer.Start();
linpackParameters.linpackMutex[1].Unlock();
}
if (Config->Debug) fprintf(STD_OUT, "linpack slave terminating\n");
linpackParameters.linpackMutex[1].Unlock();
pthread_exit(NULL);
return(NULL);
}
int caldgemm::cpuScheduler()
{
int retVal = 0;
if (Config->UseCPU && Config->MultiThread && Config->DynamicSched && (Config->ParallelDMA == 0 || Config->ParallelDMA > matrix_n))
{
const size_t mb = (gpu_m + Config->Height - 1) / Config->Height;
const size_t nb = (gpu_n + Config->Height - 1) / Config->Height;
size_t nBlocks = mb * nb;
pthread_mutex_lock(&scheduleMutex);
const size_t k = gpu_k_barrier == -1 ? 0 : gpu_k_barrier;
if ((size_t) gpu_k_barrier < nBlocks - 1)
{
size_t blockm, blockn;
DGEMM_getblocks(k, blockm, blockn);
if (cParam.dynamic_run == 0 && Config->SecondPhaseDynamicRuns)
{
cParam.dynamic_size = ((1.0f - gpu_ratio_used) * (float) (nBlocks - k - 1) + 0.5) * Config->Height;
if (cParam.dynamic_size > (nBlocks - k - 1) * Config->Height) cParam.dynamic_size = (nBlocks - k - 1) * Config->Height;
if (cParam.dynamic_size > Config->Height)
{
cParam.dynamic_run = 1 + cParam.dynamic_size / mymin(gpu_m, gpu_n);
cParam.dynamic_size /= cParam.dynamic_run;
cParam.dynamic_size -= cParam.dynamic_size % Config->Height;
cParam.dynamic_run *= Config->Height;
if (cParam.dynamic_size && (DGEMM_favor_m ? gpu_n : gpu_m) % Config->Height)
{
const size_t adjustment = Config->Height - (DGEMM_favor_m ? gpu_n : gpu_m) % Config->Height;
if (Config->Debug) fprintf(STD_OUT, "Adjusting second phase run size for small tiles: %lld - %lld = %lld\n", (long long int) cParam.dynamic_size, (long long int) adjustment, (long long int) cParam.dynamic_size - adjustment);
cParam.dynamic_size -= adjustment;
}
if (cParam.dynamic_run && (DGEMM_favor_m ? gpu_m : gpu_n) % Config->Height)
{
const size_t adjustment = Config->Height - (DGEMM_favor_m ? gpu_m : gpu_n) % Config->Height;
if (Config->Debug) fprintf(STD_OUT, "Adjusting second phase run row size for small tiles: %lld - %lld = %lld\n", (long long int) cParam.dynamic_run, (long long int) adjustment, (long long int) cParam.dynamic_run - adjustment);
cParam.dynamic_run -= adjustment;
}
while (DGEMM_favor_m ? (blockm * Config->Height >= gpu_m - cParam.dynamic_run && blockn * Config->Height >= gpu_n - cParam.dynamic_size) :
(blockn * Config->Height >= gpu_n - cParam.dynamic_run && blockm * Config->Height >= gpu_m - cParam.dynamic_size))
{
if (cParam.dynamic_run > Config->Height)
{
cParam.dynamic_run -= Config->Height;
cParam.dynamic_size = mymin(gpu_m, gpu_n);
}
else
{
if (cParam.dynamic_size > Config->Height)
{
cParam.dynamic_size -= Config->Height;
}
else
{
cParam.dynamic_run = cParam.dynamic_size = 0;
}
}
if (Config->Debug) fprintf(STD_OUT, "cParam dynamic size reduced to: %lld blockrows (%lld), %lld blocks (%lld)\n", (long long int) cParam.dynamic_run / Config->Height, (long long int) cParam.dynamic_run, (long long int) cParam.dynamic_size / Config->Height, (long long int) cParam.dynamic_size);
}
if (nBlocks >= 256 && nBlocks - k - 1 > 16 && cParam.dynamic_run == Config->Height && cParam.dynamic_size < mymin(gpu_m, gpu_n)) cParam.dynamic_size += Config->Height;
if (!Config->Quiet) fprintf(STD_OUT, "Scheduling Additional CPU DGEMM Run over %lld blockrows (%lld), %lld blocks (%lld)\n", (long long int) cParam.dynamic_run / Config->Height, (long long int) cParam.dynamic_run, (long long int) cParam.dynamic_size / Config->Height, (long long int) cParam.dynamic_size);
retVal = 1;
}
else
{
cParam.dynamic_size = 0;
goto TryThirdRun;
}
}
else
{
TryThirdRun:
if (Config->ThirdPhaseDynamicRuns)
{
size_t test_cpu_k = cpu_k_barrier - 1;
size_t cpublockm, cpublockn;
DGEMM_getblocks(test_cpu_k, cpublockm, cpublockn);
while (test_cpu_k > k && (DGEMM_favor_m ? (cpublockm * Config->Height >= gpu_m - cParam.dynamic_run && cpublockn * Config->Height >= gpu_n - cParam.dynamic_size) :
(cpublockn * Config->Height >= gpu_n - cParam.dynamic_run && cpublockm * Config->Height >= gpu_m - cParam.dynamic_size)))
{
test_cpu_k--;
DGEMM_getblocks(test_cpu_k, cpublockm, cpublockn);
}
if ((long long int) test_cpu_k > 0 && (signed) k <= (signed) test_cpu_k - 2 * nDevices + Config->ThirdPhaseThreshold)
{
if (!Config->Quiet) fprintf(STD_OUT, "Scheduling dynamic 3rd phase run, CPU taking tile %lld (k=%lld,m=%lld,n=%lld) from GPU (GPU k = %lld)\n", (long long int) test_cpu_k, (long long int) k, (long long int) cpublockm, (long long int) cpublockn, (long long int) gpu_k_barrier);
cParam.dynamic_run2++;
cParam.cpu_k = test_cpu_k;
cpu_k_barrier = test_cpu_k;
retVal = 1;
}
}
}
}
pthread_mutex_unlock(&scheduleMutex);
}
return(retVal);
}
void caldgemm::RunLinpackFactorization(int old_goto_threads, int& require_threads)
{
const CBLAS_TRANSPOSE TransposeA = this->TransposeA ? CblasTrans : CblasNoTrans;
const CBLAS_TRANSPOSE TransposeB = this->TransposeB ? CblasTrans : CblasNoTrans;
const size_t A_pitch_use = (this->TransposeA ? 1 : A_pitch);
if (ExecLinpack >= 2)
{
if (Config->AlternateLookahead > matrix_n)
{
if (!Config->Quiet) fprintf(STD_OUT, "\t\t\tWaiting for GPUs to finish initial DGEMM part to start Linpack factorization\n");
alternateLookaheadMutex.Lock();
if (Config->SimpleGPUQueuing)
{
CheckAlternateTilesRemainingSQ();
}
_mm_mfence();
}
else
{
if (!Config->Quiet) fprintf(STD_OUT, "\t\t\tDoing initial cblas runs to prepare Linpack factorization\n");
Timers.CPUTimer.Start();
cblas_dgemm(CblasRowMajor, TransposeA, TransposeB, Config->Width, matrix_n, Config->Width, Alpha, A - Config->Width * A_pitch_use, A_pitch, B, B_pitch, Beta, C - Config->Width * C_pitch, C_pitch);
Timers.CPUTimer.Stop();
}
if (!Config->Quiet) fprintf(STD_OUT, "\t\t\tStarting Linpack factorization\n");
if (Config->HPLFactorizeRestrictCPUs == 1)
{
if (8 < old_goto_threads - require_threads) goto_set_num_threads(8);
}
else if (Config->HPLFactorizeRestrictCPUs >= 2)
{
caldgemm_goto_restrict_cpus(Config->HPLFactorizeRestrictCPUs);
}
Timers.LinpackTimer1.Start();
Config->linpack_factorize_function();
Timers.LinpackTimer1.Stop();
if (Config->HPLFactorizeRestrictCPUs >= 2) caldgemm_goto_restrict_cpus(0);
}
if (Config->LinpackNodes > 1)
{
if (Config->MultiThread)
{
caldgemm_goto_reserve_cpu(broadcast_cpu_core, 1);
if (Config->ForceNumCPUThreads == 0 || broadcast_cpu_core < Config->ForceNumCPUThreads) require_threads++;
linpackParameters.linpackMutex[0].Unlock();
}
else
{
Timers.LinpackTimer2.Start();
Config->linpack_broadcast_function();
Timers.LinpackTimer2.Stop();
}
}
goto_set_num_threads(old_goto_threads - require_threads);
}
void* caldgemm::cblas_wrapper(void* arg)
{
return ((cblasParameters*) arg)->cls->cblas_wrapper_a(true);
}
int caldgemm::caldgemm_part_cpu()
{
const size_t A_pitch_use = (TransposeA ? 1 : A_pitch);
const size_t B_pitch_use = (TransposeB ? B_pitch : 1);
const CBLAS_TRANSPOSE TransposeA = this->TransposeA ? CblasTrans : CblasNoTrans;
const CBLAS_TRANSPOSE TransposeB = this->TransposeB ? CblasTrans : CblasNoTrans;
if (!Config->Quiet) fprintf(STD_OUT, "\t\tSlave thread starting cblas (m: %lld, n: %lld, cblas_size: %lld (%lld), dynamic: %lld/%lld, cpu_k: %lld)\n", (long long int) matrix_m, (long long int) matrix_n, (long long int) cParam.cblas_size, (long long int) Config->Height, (long long int) cParam.dynamic_run, (long long int) cParam.dynamic_size, (long long int) cParam.cpu_k);
int old_goto_threads = conf_numprocs;
int require_threads_base = reserve_cpu_cores();
if (Config->Debug) fprintf(STD_OUT, "Reserving %d threads for gpu \n", require_threads_base);
if (old_goto_threads > require_threads_base)
{
goto_set_num_threads(old_goto_threads - require_threads_base);
}
else
{
goto_set_num_threads(1);
caldgemm_goto_reserve_cpus(0);
}
Timers.TotalCPUTimer.Start();
Timers.LinpackTimer3.Start();
bool cpus_restricted = false;
if (Config->HPLFactorizeRestrictCPUs >= 2 && (Config->LinpackSwapN != NULL || (ExecLinpack && Config->AlternateLookahead <= matrix_n)))
{
caldgemm_goto_restrict_cpus(Config->HPLFactorizeRestrictCPUs);
cpus_restricted = true;
}
if (Config->LinpackSwapN != NULL)
{
Config->linpack_swap_function();
}
Timers.LinpackTimer3.Stop();
if (Config->HPLFactorizeRestrictCallback != NULL) require_threads_base += Config->HPLFactorizeRestrictCallback(matrix_n);
int require_threads = require_threads_base;
if ((ExecLinpack && Config->AlternateLookahead <= matrix_n) || ExecLinpack == 1)
{
RunLinpackFactorization(old_goto_threads, require_threads);
}
if (cpus_restricted)
{
caldgemm_goto_restrict_cpus(0);
if (old_goto_threads > require_threads)
{
goto_set_num_threads(old_goto_threads - require_threads);
}
else
{
goto_set_num_threads(1);
}
}
Timers.CPUTimer.Start();
bool linpackfinished = false;
do
{
if (cParam.dynamic_run2)
{
size_t blockm, blockn;
DGEMM_getblocks(cParam.cpu_k, blockm, blockn);
VT_USER_START_A("CPU DGEMM Phase 3");
cblas_dgemm(CblasRowMajor, TransposeA, TransposeB, blockm == gpu_m / Config->Height ? (gpu_m % Config->Height) : Config->Height, blockn == gpu_n / Config->Height ? (gpu_n % Config->Height) : Config->Height, Config->Width, Alpha, A + blockm * Config->Height * A_pitch_use, A_pitch, B + blockn * Config->Height * B_pitch_use, B_pitch, Beta, C + blockm * Config->Height * C_pitch + blockn * Config->Height, C_pitch);
VT_USER_END_A("CPU DGEMM Phase 3");
}
else
{
if (cParam.dynamic_run)
{
VT_USER_START_A("CPU DGEMM Phase 2");
if (DGEMM_favor_m)
{
cblas_dgemm(CblasRowMajor, TransposeA, TransposeB, cParam.dynamic_run, cParam.dynamic_size, Config->Width, Alpha, A + (gpu_m - cParam.dynamic_run) * A_pitch_use, A_pitch, B + (gpu_n - cParam.dynamic_size) * B_pitch_use, B_pitch, Beta, C + (gpu_m - cParam.dynamic_run) * C_pitch + gpu_n - cParam.dynamic_size, C_pitch);
}
else
{
cblas_dgemm(CblasRowMajor, TransposeA, TransposeB, cParam.dynamic_size, cParam.dynamic_run, Config->Width, Alpha, A + (gpu_m - cParam.dynamic_size) * A_pitch_use, A_pitch, B + (gpu_n - cParam.dynamic_run) * B_pitch_use, B_pitch, Beta, C + (gpu_m - cParam.dynamic_size) * C_pitch + gpu_n - cParam.dynamic_run, C_pitch);
}
VT_USER_END_A("CPU DGEMM Phase 2");
}
size_t cblas2;
if (Config->RereserveLinpackCPU)
{
if (ExecLinpack && Config->LinpackNodes > 1 && Config->MultiThread && (((double) matrix_m * (double) matrix_n) - linpack_last_mn[ExecLinpack]) / linpack_last_mn[ExecLinpack] < 0.3 && linpackCPUDGEMMTime[ExecLinpack] - linpackBcastTime[ExecLinpack] > 5.0)
{
cblas2 = (double) (DGEMM_split_m ? matrix_n : matrix_m) * (linpackBcastTime[ExecLinpack] + 3.0) / linpackCPUDGEMMTime[ExecLinpack];
if (!Config->Quiet) fprintf(STD_OUT, "Splitting CPU DGEMM for later enabling additional cores, cblas2=%lld\n", (long long int) cblas2);
}
else
{
cblas2 = 0;
}
if (cblas2 % 8) cblas2 += 8 - cblas2 % 8;
}
else
{
cblas2 = 0;
}
if (DGEMM_split_m) //favor splitting m because of consecutive memory
{
if (matrix_n != gpu_n && cParam.borders_done == false)
{
VT_USER_START_A("CPU DGEMM Borders");
cblas_dgemm(CblasRowMajor, TransposeA, TransposeB, matrix_m - cParam.cblas_size, matrix_n - gpu_n, Config->Width, Alpha, A, A_pitch, B + gpu_n * B_pitch_use, B_pitch, Beta, C + gpu_n, C_pitch);
VT_USER_END_A("CPU DGEMM Borders");
}
if (ExecLinpack >= 2 && cParam.borders_done == false && Config->AlternateLookahead > matrix_n)
{
Timers.CPUTimer.Stop();
RunLinpackFactorization(old_goto_threads, require_threads);
Timers.CPUTimer.Start();
}
if (cParam.dynamic_run == 0)
{
VT_USER_START_A("CPU DGEMM Phase 1");
if (cblas2)
{
cblas_dgemm(CblasRowMajor, TransposeA, TransposeB, cParam.cblas_size, cblas2, Config->Width, Alpha, A + (matrix_m - cParam.cblas_size) * A_pitch_use, A_pitch, B, B_pitch, Beta, C + (matrix_m - cParam.cblas_size) * C_pitch, C_pitch);
if (linpackParameters.linpackMutex[1].Trylock() == EBUSY)
{
if (!Config->NoPerformanceWarnings) fprintf(STD_OUT, "WARNING: Linpack broadcast was not finished at predicted time, running CPU DGEMM with reduced core count\n");
}
else
{
Timers.BcastTimer.Stop();
if (!Config->NoPerformanceWarnings && Timers.BcastTimer.GetElapsedTime() > 1.0) fprintf(STD_OUT, "Bcast core idle for %2.4f seconds\n", Timers.BcastTimer.GetElapsedTime());
int require_threads_new = require_threads_base;
if (Config->Debug) fprintf(STD_OUT, "Reserving %d threads for gpu during second cpu run\n", require_threads_new);
if (old_goto_threads > require_threads_new)
{
goto_set_num_threads(old_goto_threads - require_threads_new);
caldgemm_goto_reserve_cpu(broadcast_cpu_core, 0);
}
else
{
goto_set_num_threads(1);
caldgemm_goto_reserve_cpus(0);
}
linpackfinished = true;
}
}
cblas_dgemm(CblasRowMajor, TransposeA, TransposeB, cParam.cblas_size, matrix_n - cblas2, Config->Width, Alpha, A + (matrix_m - cParam.cblas_size) * A_pitch_use, A_pitch, B + cblas2 * B_pitch_use, B_pitch, Beta, C + (matrix_m - cParam.cblas_size) * C_pitch + cblas2, C_pitch);
VT_USER_END_A("CPU DGEMM Phase 1");
}
}
else
{
if (cParam.dynamic_run == 0)
{
VT_USER_START_A("CPU DGEMM Phase 1");
if (cblas2)
{
cblas_dgemm(CblasRowMajor, TransposeA, TransposeB, cblas2, cParam.cblas_size, Config->Width, Alpha, A, A_pitch, B + (matrix_n - cParam.cblas_size) * B_pitch_use, B_pitch, Beta, C + matrix_n - cParam.cblas_size, C_pitch);
if (linpackParameters.linpackMutex[1].Trylock() == EBUSY)
{
if (!Config->NoPerformanceWarnings) fprintf(STD_OUT, "Linpack broadcast was not finished at predicted time, running CPU DGEMM with reduced core count\n");
}
else
{
int require_threads_new = require_threads_base;
if (old_goto_threads > require_threads_new)
{
if (Config->Debug) fprintf(STD_OUT, "Reserving %d threads for gpu during second cpu run\n", require_threads_new);
goto_set_num_threads(old_goto_threads - require_threads_new);
caldgemm_goto_reserve_cpu(broadcast_cpu_core, 0);
}
else
{
goto_set_num_threads(1);
caldgemm_goto_reserve_cpus(0);
}
linpackfinished = true;
}
}
cblas_dgemm(CblasRowMajor, TransposeA, TransposeB, matrix_m - cblas2, cParam.cblas_size, Config->Width, Alpha, A + cblas2 * A_pitch_use, A_pitch, B + (matrix_n - cParam.cblas_size) * B_pitch_use, B_pitch, Beta, C + cblas2 * C_pitch + matrix_n - cParam.cblas_size, C_pitch);
VT_USER_END_A("CPU DGEMM Phase 1");
}
if (ExecLinpack >= 2 && cParam.borders_done == false && Config->AlternateLookahead > matrix_n)
{
Timers.CPUTimer.Stop();
RunLinpackFactorization(old_goto_threads, require_threads);
Timers.CPUTimer.Start();
}
if (matrix_m != gpu_m && cParam.borders_done == false)
{
VT_USER_START_A("CPU DGEMM Borders");
cblas_dgemm(CblasRowMajor, TransposeA, TransposeB, matrix_m - gpu_m, matrix_n - cParam.cblas_size, Config->Width, Alpha, A + gpu_m * A_pitch_use, A_pitch, B, B_pitch, Beta, C + gpu_m * C_pitch, C_pitch);
VT_USER_END_A("CPU DGEMM Borders");
}
}
}
cParam.borders_done = true;
if (Config->Debug) fprintf(STD_OUT, "cblas run completed\n");
} while (cpuScheduler());
Timers.CPUTimer.Stop();
if (linpackfinished == false && ExecLinpack && Config->MultiThread && Config->LinpackNodes > 1)
{
linpackParameters.linpackMutex[1].Lock();
}
Timers.TotalCPUTimer.Stop();
goto_set_num_threads(old_goto_threads);
caldgemm_goto_reserve_cpus(0);
return(0);
}
int caldgemm::caldgemm_part_gpu()
{
const size_t mb = (gpu_m + Config->Height - 1) / Config->Height;
const size_t nb = (gpu_n + Config->Height - 1) / Config->Height;
const size_t nBlocks = mb * nb;
if (Config->Debug)
{
if (DGEMM_favor_m)
{
fprintf(STD_OUT, "Favoring m direction, %lld blocks (%lld x %lld) (mb x nb)\n", (long long int) nBlocks, (long long int) mb, (long long int) nb);
}
else
{
fprintf(STD_OUT, "Not favoring m direction, %lld blocks (%lld x %lld) (mb x nb)\n", (long long int) nBlocks, (long long int) mb, (long long int) nb);
}
}
if (!Config->NoPerformanceWarnings && (buffersSwitchable ? mymin(nb, mb) : nb) > (size_t) (bbuffers[0] * nDevices)) fprintf(STD_OUT, "WARNING: Insufficient buffers for Input Matrices, retransfer required\n");
Timers.GPUTimer.Start();
for (unsigned int i = 0; i < Config->Iterations; ++i)
{
AlternateLookaheadBlocksM = (std::min<size_t>(Config->Width, gpu_m) - 1) / Config->Height + 1;
AlternateLookaheadTilesRemaining = AlternateLookaheadTilesFull = nb * AlternateLookaheadBlocksM;
if (Config->ImprovedScheduler)
{
if (!Config->PreallocData) tileDistribution = new int[nBlocks];
for (int l = 0;l < nDevices;l++) first_device_k[l] = -1;
size_t block_correction_factor = 0;
if (Config->Height > CALDGEMM_MIN_CORRECTION_SIZE && Config->SmallTiles && Config->ImprovedSchedulerBalance == 1)
{
size_t undersize;
size_t scaler;
if (DGEMM_favor_m)
{
undersize = gpu_n % Config->Height;
scaler = mb;
}
else
{
undersize = gpu_m % Config->Height;
scaler = nb;
}
if (undersize)
{
if (undersize < CALDGEMM_MIN_CORRECTION_SIZE) undersize = CALDGEMM_MIN_CORRECTION_SIZE;
block_correction_factor = (Config->Height - undersize) * scaler / Config->Height;
}
}
bool balance2 = Config->ImprovedSchedulerBalance == 2 && (DGEMM_favor_m ? gpu_n : gpu_m) % Config->Height;
int mb_use, nb_use, nBlocks_use;
if (balance2)
{
mb_use = DGEMM_favor_m ? mb : (mb - 1);
nb_use = DGEMM_favor_m ? (nb - 1) : nb;
nBlocks_use = mb_use * nb_use;
}
else
{
mb_use = mb;
nb_use = nb;
nBlocks_use = nBlocks;
}
//size_t numt[4] = {0,0,0,0}, sizet[4] = {0,0,0,0};
for (size_t l = 0;l < nBlocks;l++)
{
size_t blockn, blockm;
int k;
if (DGEMM_favor_m)
{
blockn = l % nb;
blockm = l / nb;
if (balance2 && blockn == nb - 1)
{
tileDistribution[l] = nDevices - 1 - nDevices * blockm / mb;
}
else
{
k = blockn * mb_use + blockm;
tileDistribution[l] = std::min<int>(nDevices * k / (nBlocks_use - block_correction_factor), nDevices - 1);
}
}
else
{
blockm = l % mb;
blockn = l / mb;
if (balance2 && blockm == mb - 1)
{
tileDistribution[l] = nDevices - 1 - nDevices * blockn / nb;
}
else
{
k = blockn + blockm * nb_use;
tileDistribution[l] = std::min<int>(nDevices * k / (nBlocks_use - block_correction_factor), nDevices - 1);
}
}
/*numt[tileDistribution[l]]++;
size_t height1 = (int) (((size_t) blockn == gpu_n / Config->Height) ? (gpu_n % Config->Height) : Config->Height);
size_t height2 = (int) (((size_t) blockm == gpu_m / Config->Height) ? (gpu_m % Config->Height) : Config->Height);
sizet[tileDistribution[l]] += height1 * height2;*/
if (first_device_k[tileDistribution[l]] == -1) first_device_k[tileDistribution[l]] = l;
//if (Config->Debug) fprintf(STD_OUT, "Tile %lld (%lld / %lld) processed by device %d\n", (long long int) l, (long long int) blockm, (long long int) blockn, tileDistribution[l]);
}
//for (int l = 0;l < 4;l++) fprintf(STD_OUT, "TILESTAT %d: %3lld - %lld\n", l, (long long int) numt[l], (long long int) sizet[l]);
//fprintf(STD_OUT, "TILESIZE %lld (factor %lld - miss %lld)\n", (long long int) (Config->Height * Config->Height), (long long int) block_correction_factor, (long long int) ((sizet[2] - sizet[3]) / (Config->Height * Config->Height)));
if (Config->Debug)
{
for (size_t l = 0;l < nBlocks;l++)
{
fprintf(STD_OUT, "%d ", tileDistribution[l]);
if ((l + 1) % (DGEMM_favor_m ? nb : mb) == 0) fprintf(STD_OUT, "\n");
}
}
}
for (int ii = 0;ii < nDevices;ii++)
{
buffersMajor[ii] = -1;
for (int j = 0;j < bbuffers[ii];j++) buffersMinor[ii][j] = -1;
next_buffer_A[ii] = 0;
next_buffer_B[ii] = 0;
}
if (Config->PreallocData && ((int) mb > Config->PreallocData || (int) nb > Config->PreallocData))
{
fprintf(STD_OUT, "Value of PreallocData too small for current block count! (mb %d nb %d pre %d)", (int) mb, (int) nb, Config->PreallocData);
return(1);
}
if (RunCALDGEMM_Init()) return(1);
if (Config->ParallelDMA != 0 && matrix_n >= Config->ParallelDMA)
{
DMAThreads.Start();
RunCALDGEMMMain(0);
DMAThreads.Sync();
}
else
{
if (RunCALDGEMMMain()) return(1);
}
if (RunCALDGEMM_Exit()) return(0);
if (Config->ImprovedScheduler)
{
if (!Config->PreallocData) delete[] tileDistribution;
}
if(Config->Verify && i < Config->Iterations - 1) AnalyzeResults();
}
Timers.GPUTimer.Stop();
if (Config->MultiThread && Config->UseCPU)
{
Timers.ATime.Reset();
Timers.ATime.Start();
}
if (Config->SpawnGPUThread == -2)
{
if (Config->Debug) fprintf(STD_OUT, "Caldgemm Main Thread, setting CPU mask %X\n", getcpumask(&oldcpumask));
sched_setaffinity(0, sizeof(oldcpumask), &oldcpumask);
}
return(0);
}
void* caldgemm::cblas_wrapper_a(bool thread)
{
if (thread)
{
setThreadName(Config->SpawnGPUThread != -2 ? "GPU Wrapper" : "CBLAS Wrapper");
if (Config->Debug) fprintf(STD_OUT, "Cblas helper thread started\n");
if (Config->SpawnGPUThread == -2)
{
ensure_omp_thread_pinning("CBLAS");
}
if (Config->SpawnGPUThread != -2)
{
sched_setaffinity(0, sizeof(gpumask), &gpumask);
}
else if (Config->GPUMapping[0] + outputthreads * nDevices + 1 >= conf_numprocs)
{
sched_setaffinity_set_core(0 + Config->CPUCoreOffset);
}
else
{
if (Config->Debug) fprintf(STD_OUT, "Cblas thread Thread, setting CPU mask %X\n", getcpumask(&oldcpumask));
sched_setaffinity(0, sizeof(oldcpumask), &oldcpumask);
}
cParam.cblasMutex[1].Lock();
}
while (cParam.cblasMutex[1].Lock() == 0 && cParam.terminate == false)
{
if (Config->SpawnGPUThread != -2)
{
caldgemm_part_gpu();
}
else
{
caldgemm_part_cpu();
}
if (Config->Debug) fprintf(STD_OUT, "\t\tUnlocking cblasmutex 0\n");
cParam.cblasMutex[0].Unlock();
if (!thread) break;
}
if (thread)
{
if (Config->Debug) fprintf(STD_OUT, "blas slave terminating\n");
cParam.cblasMutex[0].Unlock();
pthread_exit(NULL);
}
return(NULL);
}
void* caldgemm::divide_wrapper(void* arg)
{
return ((divideParameters*) arg)->cls->divide_wrapper_a((divideParameters*) arg);
}
void* caldgemm::divide_wrapper_a(divideParameters* par)
{
if (Config->Debug) fprintf(STD_OUT, "Divide Thread %d for core %d started\n", par->nThread, par->CPUCore);
{
char tmp[128];
sprintf(tmp, "Divide %d", par->nThread);
setThreadName(tmp);
}
sched_setaffinity_set_core(par->CPUCore + Config->CPUCoreOffset);
par->curDevice = -1;
for (int i = 0;i < nDevices;i++)
{
if (Config->GPUMapping[i] == par->CPUCore)
{
if (par->curDevice == 1) par->curDevice = i;
DGEMMTasks[i].next_device = &par->curDevice;
}
}
double* tmpBuffer = allocDivideBuffer();
for (int i = 0;i < nDevices;i++)
{
if (Config->GPUMapping[i] == par->CPUCore)
{
par->firstDevice = i;
break;
}
}
int mutex_to_unlock = par->nThread;
int i = 0;
while (true)
{
if (Config->GPUMapping[i] == par->CPUCore)
{
par->reset = 0;
par->curDevice = i;
DGEMMTasks[mutex_to_unlock].mutex_finished.Unlock();
if (Config->Debug) fprintf(STD_OUT, "Divide Thread %d on Core %d waiting to operate on device %d\n", par->nThread, par->CPUCore, i);
DGEMMTasks[i].mutex_start.Lock();
if (par->terminate) break;
if (par->reset)
{
if (Config->Debug) fprintf(STD_OUT, "Divide Thread %d resetting\n", par->nThread);
i = par->firstDevice;
mutex_to_unlock = i;
continue;
}
if (DGEMMTasks[i].skip_device_to != -1)
{
//fprintf(STD_OUT, "Skipping device %d, switching to %d\n", i, DGEMMTasks[i].skip_device_to);
const int oldi = i;
i = DGEMMTasks[i].skip_device_to;
DGEMMTasks[oldi].skip_device_to = -1;
DGEMMTasks[i].mutex_start.Lock();
}
if (Config->Debug) fprintf(STD_OUT, "Divide Thread for device %d Starting processing (k = %d)\n", i, DGEMMTasks[i].k);
DGEMMPrepareAndExecute(DGEMMTasks[i] CALDGEMM_DIVBUFB);
mutex_to_unlock = i;
}
i = (i + 1) % nDevices;
}
freeDivideBuffer(tmpBuffer);
if (Config->Debug) fprintf(STD_OUT, "Divide Thread %d for Core %d terminating\n", par->nThread, par->CPUCore);
DGEMMTasks[par->nThread].mutex_finished.Unlock();
pthread_exit(NULL);
return(NULL);
}
void* caldgemm::merge_wrapper(void* arg)
{
return ((mergeParameters*) arg)->cls->merge_wrapper_a((mergeParameters*) arg);
}
void* caldgemm::merge_wrapper_a(mergeParameters* par)
{
{
char tmp[128];
sprintf(tmp, "Merge %d/%d", par->num_device, par->nMergeThread);
setThreadName(tmp);
}
if (Config->Debug) fprintf(STD_OUT, "Merger Thread %d started\n", par->nMergeThread);
int merge_core;
if (Config->PostprocessMapping[par->num_device] == -1)
{
merge_core = Config->GPUMapping[par->num_device] + par->nMergeThread + 1;
for (int i = 0;i < par->num_device;i++)
{
if (Config->GPUMapping[i] == Config->GPUMapping[par->num_device]) merge_core += outputthreads;
}
}
else
{
merge_core = Config->PostprocessMapping[par->num_device] + par->nMergeThread;
}
if (Config->Debug) fprintf(STD_OUT, "Merge Thread %d, core %d\n", par->nMergeThread, merge_core);
sched_setaffinity_set_core(merge_core % conf_numprocs_real + Config->CPUCoreOffset);
//HighResTimer mergeTimer;
par->mergeThreadMutex[0].Lock();
while (par->mergeThreadMutex[0].Lock() == 0 && par->terminate == false)
{
if (Config->Debug) fprintf(STD_OUT, "\t\tSlave thread %d (device %d) starting merge process for obuffer %d (k = %lld)\n", par->nMergeThread, par->num_device, par->nContext, (long long int) par->k);
size_t blockm, blockn;
DGEMM_getblocks(par->k, blockm, blockn);
/*if (Config->Debug)
{
mergeTimer.Reset();
mergeTimer.Start();
}*/
RunMergeBuffers(par->dst, par->num_device, par->nContext, (blockn == gpu_n / Config->Height) ? (gpu_n % Config->Height) : Config->Height, (blockm == gpu_m / Config->Height) ? (gpu_m % Config->Height) : Config->Height, BufferHeight, BufferHeight, C_pitch);
/*if (Config->Debug)
{
mergeTimer.Stop();
fprintf(STD_OUT, "\t\tMerge time: %2.3f\n", mergeTimer.GetElapsedTime());
}*/
if (!Config->SimpleGPUQueuing) CheckAlternateTilesRemaining(blockm);
if (Config->Debug) fprintf(STD_OUT, "\t\tUnlocking mutex device %d obuffer %d (Slavethread %d)\n", par->num_device, par->nContext, par->nMergeThread);
obufferMutex[par->num_device][par->nContext].Unlock();
par->mergeThreadMutex[1].Unlock();
}
if (Config->Debug) fprintf(STD_OUT, "merge slave %d terminating\n", par->nMergeThread);
par->mergeThreadMutex[1].Unlock();
pthread_exit(NULL);
return(NULL);
}
int caldgemm::DumpMatrix(double* a, double* b, double* c, double alpha, double beta, int tmp_m, int tmp_k, int tmp_n, int Apitch, int Bpitch, int Cpitch)
{
int i = 0;
char filename[256];
FILE* fp = NULL;
do
{
if (fp) fclose(fp);
sprintf(filename, "dump%d.out", i++);
} while ((fp = fopen(filename, "r")) != NULL && i < 100);
if (i == 100)
{
if (fp) fclose(fp);
return(1);
}
fp = fopen(filename, "w+b");
int nWritten = 0;
nWritten += fwrite(&a, sizeof(a), 1, fp);
nWritten += fwrite(&b, sizeof(b), 1, fp);
nWritten += fwrite(&c, sizeof(c), 1, fp);
nWritten += fwrite(&alpha, sizeof(alpha), 1, fp);
nWritten += fwrite(&beta, sizeof(beta), 1, fp);
nWritten += fwrite(&tmp_m, sizeof(tmp_m), 1, fp);
nWritten += fwrite(&tmp_k, sizeof(tmp_k), 1, fp);
nWritten += fwrite(&tmp_n, sizeof(tmp_n), 1, fp);
nWritten += fwrite(&Apitch, sizeof(Apitch), 1, fp);
nWritten += fwrite(&Bpitch, sizeof(Bpitch), 1, fp);
nWritten += fwrite(&Cpitch, sizeof(Cpitch), 1, fp);
for (i = 0;i < tmp_m;i++)
{
nWritten += fwrite(a + i * Apitch, sizeof(double), tmp_k, fp);
}
for (i = 0;i < tmp_k;i++)
{
nWritten += fwrite(b + i * Bpitch, sizeof(double), tmp_n, fp);
}
fclose(fp);
if (nWritten == 0) return(1);
return(0);
}
void caldgemm::WaitForLASWP(size_t blockm)
{
if (Config->LinpackSwapN != NULL)
{
int shown = false;
size_t need = (blockm + 1) * Config->Height;
if (need > gpu_m) need = gpu_m;
if (ExecLinpack >= 2 && Config->AlternateLookahead <= matrix_n) need += Config->Width;
//if (Config->Debug) fprintf(STD_OUT, "Checking LASWP / DTRSM... current: %lld need: %lld\n", (long long int) *Config->LinpackSwapN, (long long int) need);
while (*Config->LinpackSwapN < need)
{
if (Config->Debug && shown == false)
{
fprintf(STD_OUT, "Waiting for LASWP / DTRSM... current: %lld need: %lld\n", (long long int) *Config->LinpackSwapN, (long long int) need);
shown = true;
}
#ifdef _WIN32
if (Config->LASWPSleep) Sleep(Config->LASWPSleep / 1000);
#else
if (Config->LASWPSleep) usleep(Config->LASWPSleep);
#endif
}
}
}
int caldgemm::CheckAlternateTilesRemainingSQ()
{
return(0);
}
void caldgemm::CheckAlternateTilesRemaining(size_t m)
{
if (ExecLinpack >= 2 && Config->AlternateLookahead > matrix_n && AlternateLookaheadTilesRemaining)
{
//if (Config->Debug) fprintf(STD_OUT, "Checking Alternate Tiles: m = %lld - Remaining = %d\n", (long long int) m, (int) AlternateLookaheadTilesRemaining);
if ((int) m < AlternateLookaheadBlocksM)
{
pthread_mutex_lock(&tilesRemainingMutex);
if (--AlternateLookaheadTilesRemaining == 0)
{
if (Config->Debug) fprintf(STD_OUT, "GPU done with initial part, factorization may start\n");
alternateLookaheadMutex.Unlock();
}
pthread_mutex_unlock(&tilesRemainingMutex);
}
}
}
int caldgemm::Preallocate()
{
for (int l = 0;l < nDevices;l++)
{
buffer_pointers_A[l] = new int[Config->PreallocData];
buffer_pointers_B[l] = new int[Config->PreallocData];
memset(buffer_pointers_A[l], 0, Config->PreallocData * sizeof(int));
memset(buffer_pointers_B[l], 0, Config->PreallocData * sizeof(int));
}
tileDistribution = new int[Config->PreallocData * Config->PreallocData];
memset(tileDistribution, 0, Config->PreallocData * Config->PreallocData * sizeof(int));
return(0);
}
int caldgemm::PreallocateFree()
{
for (int l = 0;l < nDevices;l++)
{
delete[] buffer_pointers_A[l];
delete[] buffer_pointers_B[l];
}
delete[] tileDistribution;
return(0);
}
void caldgemm::SetNumberDevices(int n)
{
nDevices = n;
if (nDevices <= 0) nDevices = 1;
if (nDevices > nDevicesInitialized) nDevices = nDevicesInitialized;
}
int caldgemm::RunAsyncSingleTileDGEMM(const double* A, const double* B, double* C, double alpha, double beta, size_t m, size_t k, size_t n, size_t Apitch, size_t Bpitch, size_t Cpitch, bool orderColMajor, bool TransA, bool TransB)
{
fprintf(STD_OUT, "Async Queue not supported by backend\n");
return(1);
}
int caldgemm::RunAsyncSingleTileDTRSM(const CBLAS_ORDER Order, const CBLAS_SIDE Side, const CBLAS_UPLO Uplo, const CBLAS_TRANSPOSE TransA, const CBLAS_DIAG Diag, const size_t M, const size_t N, const double alpha, const double *A, const size_t lda, double *B, const size_t ldb)
{
fprintf(STD_OUT, "Async Queue not supported by backend\n");
return(1);
}
int caldgemm::RunCALDGEMMMain(int parallelDevice)
{
const size_t mb = (gpu_m + Config->Height - 1) / Config->Height;
const size_t nb = (gpu_n + Config->Height - 1) / Config->Height;
const size_t nBlocks = mb * nb;
//Check for double == 1.0 is unsafe and causes compiler warning
const unsigned long long int double_one = 0x3FF0000000000000; //1.0 in double
#if defined(CALDGEMM_44) && !defined(CALDGEMM_USE_MEMEXPORT)
const unsigned long long int double_minus_one = 0xBFF0000000000000;
const int kernel_num = Config->ForceKernelVariant != -1 ? Config->ForceKernelVariant : (((Config->Width == BufferWidth && reinterpret_cast<unsigned long long int &>(reinterpret_cast<char &>(Beta)) == double_one && reinterpret_cast<unsigned long long int &>(reinterpret_cast<char &>(Alpha)) == double_minus_one) ? 2 : (reinterpret_cast<unsigned long long int &>(reinterpret_cast<char &>(Alpha)) == double_one)));
#else
const int kernel_num = Config->ForceKernelVariant != -1 ? Config->ForceKernelVariant : ((reinterpret_cast<unsigned long long int &>(Alpha) == double_one));
#endif
if ((Config->Debug) && Config->UseGPU) fprintf(STD_OUT, "Using Kernel %d (alpha=0x%llX (%2.3f), width = %lld)\n", kernel_num, (reinterpret_cast<long long int &>(Alpha)), Alpha, (long long int) Config->Width);
int oldj[max_devices];
int j[max_devices];
int iMergeThread[max_devices];
size_t blockm = 0, blockn = 0;
unsigned long long int lastk[max_devices];
size_t nextk = 0;
size_t next_device_k[max_devices];
int ImprovedSchedPhase1 = Config->ImprovedScheduler;
int forcePreparation[max_devices];
int myUseDevice = 0;
int myNDevices;
int myDevices[max_devices] = {0};
if (parallelDevice == -1)
{
myNDevices = nDevices;
for (int i = 0;i < nDevices;i++) myDevices[i] = i;
}
else if (matrix_n >= Config->GroupParallelDMA)
{
myNDevices = 1;
myDevices[0] = parallelDevice;
}
else
{
myNDevices = 0;
for (int i = 0;i < nDevices;i++)
{
if (Config->AllocMapping[i] == Config->DMAMapping[parallelDevice]) myDevices[myNDevices++] = i;
}
if (myNDevices == 0) return(0);
}
int use_device = myDevices[myUseDevice];
for (int tl = 0;tl < myNDevices;tl++)
{
int l = myDevices[tl];
next_device_k[l] = (!Config->ImprovedScheduler || first_device_k[l] == -1) ? 0 : first_device_k[l];
j[l] = 0;
iMergeThread[l] = 0;
lastk[l] = -1;
forcePreparation[l] = 0;
if (!Config->PreallocData)
{
buffer_pointers_A[l] = new int[mb];
buffer_pointers_B[l] = new int[nb];
}
for (size_t ll = 0;ll < mb;ll++) buffer_pointers_A[l][ll] = -1;
for (size_t ll = 0;ll < nb;ll++) buffer_pointers_B[l][ll] = -1;
}
bool cpu_k_barrier_hit = false;
if (gpu_n && gpu_m)
{
int currentPinning = Config->PinMainThread;
#ifdef CALDGEMM_LOOP_DETECTION
int loop_detect = -1, loop_detect2 = -1;
#endif
for (size_t k = 0;k < nBlocks + 2 * myNDevices;k++)
{
restartkloop:
//fprintf(STD_OUT, "!!!!! k %lld nd k %lld nextk %lld\n", (long long int) k, (long long int) next_device_k[use_device], (long long int) nextk);
if (Config->ImprovedScheduler && !ImprovedSchedPhase1 && tileDistribution[next_device_k[use_device]] < 0) next_device_k[use_device] = 0;
if (next_device_k[use_device] != 0) k = next_device_k[use_device];
else if (nextk && nextk >= k) k = nextk + 1;
if (next_device_k[use_device] >= nBlocks) next_device_k[use_device] = 0;
if (k > nextk) nextk = k;
if (k < nBlocks)
{
if (ImprovedSchedPhase1)
{
while (k < nBlocks && tileDistribution[k] != use_device)
{
if (Config->Debug) fprintf(STD_OUT, "Skipping tile %lld (m=%lld n=%lld) for device %d, will be processed by device %d\n", (long long int) k, (long long int) blockm, (long long int) blockn, use_device, tileDistribution[k]);
k++;
}
if (k == nBlocks && parallelDevice == -1 && (Config->DynamicSched || (signed) nBlocks < 2 * nDevices)) goto endimprovedphase;
if (k >= nBlocks)
{
next_device_k[use_device] = 0;
if(!((obuffercount > 1) ? ((signed) lastk[use_device] != -1) : (k < nBlocks))) break;
}
}
#ifdef CALDGEMM_LOOP_DETECTION
if (loop_detect2 == (signed) k)
{
fprintf(STD_OUT, "SCHEDULING ERROR A: Loop Detected, device = %d, k = %lld, next_device_k = %lld, nextk = %lld, ImprovedSched = %d, Phase1 = %d\n", use_device, (long long int) k, (long long int) next_device_k[use_device], (long long int) nextk, (int) Config->ImprovedScheduler, (int) ImprovedSchedPhase1);
exit(1);
}
loop_detect2 = k;
#endif
}
if (k < nBlocks)
{
if (Config->ImprovedScheduler)
{
if (k >= nBlocks || tileDistribution[k] < 0)
{
if (Config->Debug)
{
DGEMM_getblocks(k, blockm, blockn);
fprintf(STD_OUT, "Tile %lld (m=%lld n=%lld) already processed, skipping\n", (long long int) k, (long long int) blockm, (long long int) blockn);
}
#ifdef CALDGEMM_LOOP_DETECTION
if (loop_detect == (signed) k)
{
fprintf(STD_OUT, "SCHEDULING ERROR B: Loop Detected, k = %lld, next_device_k = %lld, nextk = %lld, ImprovedSched = %d, Phase1 = %d\n", (long long int) k, (long long int) next_device_k[use_device], (long long int) nextk, (int) Config->ImprovedScheduler, (int) ImprovedSchedPhase1);
exit(1);
}
loop_detect = k;
#endif
next_device_k[use_device] = 0;
continue;
}
}
#ifdef CALDGEMM_LOOP_DETECTION
loop_detect = loop_detect2 = -1;
#endif
DGEMM_getblocks(k, blockm, blockn);
if (cParam.dynamic_run)
{
if (DGEMM_favor_m)
{
if (blockm * Config->Height >= gpu_m - cParam.dynamic_run && blockn * Config->Height >= gpu_n - cParam.dynamic_size)
{
if (Config->Debug) fprintf(STD_OUT, "GPU skipping k = %lld (m=%lld n=%lld) (Dynamic Run 2nd Phase)\n", (long long int) k, (long long int) blockm, (long long int) blockn);
next_device_k[use_device] = 0;
continue;
}
}
else
{
if (blockn * Config->Height >= gpu_n - cParam.dynamic_run && blockm * Config->Height >= gpu_m - cParam.dynamic_size)
{
if (Config->Debug) fprintf(STD_OUT, "GPU skipping k = %lld (m=%lld n=%lld)(Dynamic Run 2nd Phase)\n", (long long int) k, (long long int) blockm, (long long int) blockn);
next_device_k[use_device] = 0;
continue;
}
}
}
if (Config->MultiThread) pthread_mutex_lock(&scheduleMutex);
if ((signed) k < cpu_k_barrier)
{
if ((signed int) k > (signed int) gpu_k_barrier)
{
gpu_k_barrier = k;
}
}
else
{
if (Config->Debug) fprintf(STD_OUT, "gpu_k %lld (m=%lld n=%lld) reached cpu_k_barrier %lld, skipping remaining k (Dynamic Run 3rd Phase)\n", (long long int) k, (long long int) blockm, (long long int) blockn, (long long int) cpu_k_barrier);
k = nBlocks;
if (nextk < nBlocks) nextk = nBlocks;
next_device_k[use_device] = 0;
cpu_k_barrier_hit = true;
}
if (Config->MultiThread) pthread_mutex_unlock(&scheduleMutex);
}
if (ImprovedSchedPhase1 && k >= nBlocks && parallelDevice == -1 && (Config->DynamicSched || (signed) nBlocks < 2 * nDevices))
{
endimprovedphase:
if (Config->Debug) fprintf(STD_OUT, "First improved scheduling phase ended\n");
ImprovedSchedPhase1 = 0;
k = nextk = 0;
for (int l = 0;l < nDevices;l++)
{
next_device_k[l] = 0;
forcePreparation[l] = 1;
}
goto restartkloop;
}
if (Config->RepinMainThreadAlways && currentPinning != Config->AllocMapping[use_device])
{
sched_setaffinity_set_core(Config->AllocMapping[use_device] + Config->CPUCoreOffset);
if (Config->Debug) fprintf(STD_OUT, "Repinning to %d\n", Config->AllocMapping[use_device]);
currentPinning = Config->AllocMapping[use_device];
}
if (k < nBlocks)
{
if (Config->Debug) fprintf(STD_OUT, "Iteration k = %lld, m = %lld, n = %lld (device %d obuffer %d)\n", (long long int) k, (long long int) blockm, (long long int) blockn, use_device, j[use_device]);
if (Config->MultiThreadDivide && parallelDevice == -1 && Config->GPUMapping[use_device] != Config->PinMainThread && UseInputPthreads() && DGEMMTasks[use_device].thread_running)
{
DGEMMTasks[use_device].thread_running = 0;
if (Config->Debug) fprintf(STD_OUT, "Waiting for divide thread for device %d (k=%lld lastk = %lld j=%d)\n", use_device, (long long int) k, lastk[use_device], oldj[use_device]);
int tmpval = DGEMMTasks[use_device].mutex_finished.Trylock();
if (tmpval == EBUSY)
{
int tmp_device = *(DGEMMTasks[use_device].next_device);
if (tmp_device != use_device && DGEMMTasks[tmp_device].thread_running == 0)
{
if (Config->Debug) fprintf(STD_OUT, "Divide thread waiting for wrong device, skipping device %d\n", tmp_device);
DGEMMTasks[tmp_device].skip_device_to = use_device;
DGEMMTasks[tmp_device].mutex_start.Unlock();
}
DGEMMTasks[use_device].mutex_finished.Lock();
}
else if (tmpval) fprintf(STD_OUT, "ERROR locking mutex_finished: %s - %d\n", __FILE__, __LINE__);
if (Config->Debug) fprintf(STD_OUT, "Main thread: Divide thread for device %d finished\n", use_device);
}
DGEMMPrepareAndExecuteTask& Task = DGEMMTasks[use_device];
Task.PrepareTasks[0].j = Task.PrepareTasks[1].j = -1;
Task.kernel_num = kernel_num;
Task.k = k;
Task.j = j[use_device];
if (next_device_k[use_device] == 0 || (signed) lastk[use_device] == -1 || obuffercount == 1 || Config->AsyncDMA == false || forcePreparation[use_device])
{
Task.PrepareTasks[0].k = k;
Task.PrepareTasks[0].j = j[use_device];
if (Config->ImprovedScheduler && !ImprovedSchedPhase1)
{
if ((size_t) buffersMajor[use_device] != (DGEMM_favor_m ? blockm : blockn))
{
if (Config->Debug) fprintf(STD_OUT, "Resetting favored directions buffers for device %d\n", use_device);
buffersMajor[use_device] = -1;
}
}
forcePreparation[use_device] = 0;
}
if (obuffercount > 1 && (signed) lastk[use_device] != -1 && Config->AsyncDMA && k + (myNDevices - myUseDevice - 1) % myNDevices + 1 < nBlocks && cpu_k_barrier_hit == false)
{
if (ImprovedSchedPhase1) nextk = k + 1;
else nextk++;
size_t nextblockm, nextblockn;
DGEMM_getblocks(nextk, nextblockm, nextblockn);
if (cParam.dynamic_run || Config->ImprovedScheduler)
{
while ( nextk < nBlocks && (
(cParam.dynamic_run && (DGEMM_favor_m ? (nextblockm * Config->Height >= gpu_m - cParam.dynamic_run && nextblockn * Config->Height >= gpu_n - cParam.dynamic_size) :
(nextblockn * Config->Height >= gpu_n - cParam.dynamic_run && nextblockm * Config->Height >= gpu_m - cParam.dynamic_size))) ||
(Config->ImprovedScheduler && tileDistribution[nextk] < 0) ||
(ImprovedSchedPhase1 && tileDistribution[nextk] != use_device)
)
)
{
nextk++;
DGEMM_getblocks(nextk, nextblockm, nextblockn);
}
}
if ((signed) nextk < cpu_k_barrier)
{
Task.PrepareTasks[1].k = nextk;
Task.PrepareTasks[1].j = (j[use_device] + 1) % obuffercount;
}
next_device_k[use_device] = nextk;
}
else
{
if (ImprovedSchedPhase1)
{
next_device_k[use_device] = k + 1;
forcePreparation[use_device] = 1;
}
else
{
next_device_k[use_device] = 0;
}
}
if (Config->ImprovedScheduler) tileDistribution[k] = -1;
if (Config->MultiThreadDivide && parallelDevice == -1 && Config->GPUMapping[use_device] != Config->PinMainThread && UseInputPthreads() && cpu_k_barrier_hit == false)
{
if (Config->Debug) fprintf(STD_OUT, "Starting PrepareAndExecute task on divide thread for device %d (k = %lld)\n", use_device, (long long int) k);
DGEMMTasks[use_device].mutex_start.Unlock();
DGEMMTasks[use_device].thread_running = 1;
}
else
{
#ifdef CALDGEMM_DIVIDE_STATIC_BUFFER
double* __restrict__ tmpBuffer = divide_tmpBuffer;
#endif
if (DGEMMPrepareAndExecute(Task CALDGEMM_DIVBUFB)) return(1);
}
}
if (obuffercount == 1)
{
oldj[use_device] = j[use_device];
lastk[use_device] = k;
}
if ((obuffercount > 1) ? ((signed) lastk[use_device] != -1) : (k < nBlocks))
{
if (nBlocks <= k && (signed) lastk[use_device] < cpu_k_barrier && Config->MultiThreadDivide && parallelDevice == -1 && Config->GPUMapping[use_device] != Config->PinMainThread && UseInputPthreads() && DGEMMTasks[use_device].thread_running)
{
DGEMMTasks[use_device].thread_running = 0;
if (Config->Debug) fprintf(STD_OUT, "Waiting for divide thread for device %d (late phase, k=%lld lastk = %lld j=%d)\n", use_device, (long long int) k, lastk[use_device], oldj[use_device]);
int tmpval = DGEMMTasks[use_device].mutex_finished.Trylock();
if (tmpval == EBUSY)
{
int tmp_device = *(DGEMMTasks[use_device].next_device);
if (tmp_device != use_device && DGEMMTasks[tmp_device].thread_running == 0)
{
if (Config->Debug) fprintf(STD_OUT, "Divide thread waiting for wrong device (late phase), skipping device %d\n", tmp_device);
DGEMMTasks[tmp_device].skip_device_to = use_device;
DGEMMTasks[tmp_device].mutex_start.Unlock();
}
DGEMMTasks[use_device].mutex_finished.Lock();
}
else if (tmpval) fprintf(STD_OUT, "ERROR trylocking mutex_finished: %s - %d\n", __FILE__, __LINE__);
}
size_t lastm, lastn;
DGEMM_getblocks(lastk[use_device], lastm, lastn);
int must_lock = 0;
if (Config->ThreadSaveDriver != 1)
{
if (parallelDevice >= 0)
{
must_lock = 1;
}
else if (Config->MultiThreadDivide) for (int ii = 0;ii < nDevices;ii++)
{
if (Config->GPUMapping[ii] != Config->PinMainThread)
{
must_lock = 1;
break;
}
}
}
if ((signed long int) lastk[use_device] != -1 && lastk[use_device] < nBlocks)
{
while (DGEMMPrepareTaskEventReady[use_device][oldj[use_device]] == false);
DGEMMPrepareTaskEventReady[use_device][oldj[use_device]] = false;
if (WaitForEvent(oldj[use_device], use_device, must_lock)) return(1);
if (Config->Debug && Config->GPU_C == 0) fprintf(STD_OUT, "Processing Output (Iteration %lld) for device %d tile %lld (m = %lld, n = %lld)\n", (long long int) k, use_device, (long long int) lastk[use_device], (long long int) lastm, (long long int) lastn);
if (Config->UseDMAFetchQueue >= matrix_n && Config->DstMemory == 'g')
{
if (CheckDMAQueue(use_device, oldj[use_device])) return(1);
}
else if (Config->ImplicitDriverSync == 0 && Config->DstMemory == 'g')
{
if (FetchResult(use_device, oldj[use_device], lastm, lastn, Config->MultiThread && UseMutexPerDevice())) {fprintf(STD_OUT, "Error copying from GPU\n");return(1);}
if (WaitForEvent(oldj[use_device], use_device)) return(1);
}
}
if (Config->VerboseTiming) Timers.CounterMerge.Start();
if (k == nBlocks + 2 * myNDevices - 1 || Config->MultiThread == false || UseOutputPthreads() == 0)
{
if (lastk[use_device] < nBlocks)
{
if (Config->Debug && Config->GPU_C == 0) fprintf(STD_OUT, "\tMerging buffer (device %d, obuffer %d, k = %lld, main thread)\n", use_device, oldj[use_device], (long long int) lastk[use_device]);
if (RunMergeBuffers(C + lastn * Config->Height + lastm * C_pitch * Config->Height, use_device, oldj[use_device], (lastn == gpu_n / Config->Height) ? (gpu_n % Config->Height) : Config->Height, (lastm == gpu_m / Config->Height) ? (gpu_m % Config->Height) : Config->Height, BufferHeight, BufferHeight, C_pitch)) {fprintf(STD_OUT, "Error merging\n"); return(1);}
if (!Config->SimpleGPUQueuing) CheckAlternateTilesRemaining(lastm);
if (Config->Debug) fprintf(STD_OUT, "Main thread unlocking obuffer mutex device %d obuffer %d\n", use_device, oldj[use_device]);
if (Config->MultiThread && UseOutputPthreads()) obufferMutex[use_device][oldj[use_device]].Unlock();
}
if (Config->MultiThread && UseOutputPthreads())
{
for (int l = 0;l < obuffercount;l++)
{
for (int tll = 0;tll < myNDevices;tll++)
{
int ll = myDevices[tll];
if ((ll != use_device || l != oldj[ll]) && (signed) lastk[ll] != -1)
{
if (Config->Debug) fprintf(STD_OUT, "Waiting to finish merge process for device %d obuffer %d\n", ll, l);
obufferMutex[ll][l].Lock();
obufferMutex[ll][l].Unlock();
}
}
}
}
}
else if (lastk[use_device] < nBlocks)
{
if (Config->AsyncTiming)
{
Timers.ATime.Reset();
Timers.ATime.Start();
}
mParam[use_device][iMergeThread[use_device]].mergeThreadMutex[1].Lock();
if (Config->AsyncTiming)
{
Timers.ATime.Stop();
if ((!Config->NoPerformanceWarnings && Timers.ATime.GetElapsedTime() > 0.001) || Config->Debug) fprintf(STD_OUT, "\t\tWARNING: Wait Time for merge thread: %1.5f\n", Timers.ATime.GetElapsedTime());
}
if (Config->Debug) fprintf(STD_OUT, "\t\tUnlocking outputthread mutex %d to process device %d obuffer %d\n", iMergeThread[use_device], use_device, oldj[use_device]);
mParam[use_device][iMergeThread[use_device]].nContext = oldj[use_device];
mParam[use_device][iMergeThread[use_device]].dst = C + (lastn * Config->Height + lastm * C_pitch * Config->Height);
mParam[use_device][iMergeThread[use_device]].k = lastk[use_device];
mParam[use_device][iMergeThread[use_device]].mergeThreadMutex[0].Unlock();
iMergeThread[use_device] = (iMergeThread[use_device] + 1) % outputthreads;
}
if (Config->VerboseTiming) Timers.CounterMerge.Stop();
}
oldj[use_device] = j[use_device];
j[use_device] = (j[use_device] + 1) % obuffercount;
lastk[use_device] = k;
if (Config->MultiThread)
{
myUseDevice = (myUseDevice + 1) % myNDevices;
use_device = myDevices[myUseDevice];
}
}
if (currentPinning != Config->PinMainThread)
{
sched_setaffinity(0, sizeof(gpumask), &gpumask);
}
}
if (Config->MultiThreadDivide && parallelDevice == -1 && UseInputPthreads())
{
for (int l = 0;l < divideThreads;l++)
{
if (dParam[l].curDevice != dParam[l].firstDevice)
{
dParam[l].reset = 1;
DGEMMTasks[dParam[l].curDevice].mutex_start.Unlock();
DGEMMTasks[dParam[l].firstDevice].mutex_finished.Lock();
}
}
}
if (Config->PreallocData == 0)
{
for (int tl = 0;tl < myNDevices;tl++)
{
int l = myDevices[tl];
delete[] buffer_pointers_A[l];
delete[] buffer_pointers_B[l];
}
}
return(0);
}
int caldgemm::RunCALDGEMM(double* a, double* b, double* c, double alpha, double beta, size_t tmp_m, size_t tmp_k, size_t tmp_n, size_t Apitch, size_t Bpitch, size_t Cpitch, bool orderColMajor, bool TransA, bool TransB, int ExecuteLinpackCallbacks, int pipelined)
{
if (!caldgemm_initialized)
{
fprintf(STD_OUT, "Caldgemm not initialized, aborting DGEMM run\n");
return(1);
}
#ifdef DEBUG_MSG_TIMED
if (Config->Debug) printelapsedtime("Resetting Timer\n");
#endif
if (tmp_m == 0 || tmp_k == 0 || tmp_n == 0)
{
if (Config->LinpackSwapN != NULL)
{
HPL_CALDGEMM_gpu_height = 0;
Config->linpack_swap_function();
Config->LinpackSwapN = 0;
}
if (ExecuteLinpackCallbacks)
{
Timers.LinpackTimer1.Start();
Config->linpack_factorize_function();
Timers.LinpackTimer1.Stop();
if (Config->LinpackNodes > 1)
{
Timers.LinpackTimer2.Start();
Config->linpack_broadcast_function();
Timers.LinpackTimer2.Stop();
}
}
return(0); //Do Nothing
}
bool forceCPU = false;
bool forceReinit = false;
double GPURatio;
int old_outputthreads = outputthreads;
size_t MaxGpuM, MaxGpuN; //Maximal values of m and n that can be given to GPU, This is below m,n if ExecuteLinpackCallback = true
A = a;
B = b;
C = c;
Alpha = alpha;
Beta = beta;
matrix_m = tmp_m;
matrix_n = tmp_n;
if ((signed) tmp_k != -1) Config->Width = tmp_k;
A_pitch = ((signed) Apitch != -1) ? Apitch : Config->Width;
B_pitch = ((signed) Bpitch != -1) ? Bpitch : matrix_n;
C_pitch = ((signed) Cpitch != -1) ? Cpitch : matrix_n;
ResetTimers();
if (orderColMajor)
{
double* tmpd;
size_t tmpi;
bool tmpt;
tmpd = A; A = B; B = tmpd;
tmpi = matrix_m; matrix_m = matrix_n; matrix_n = tmpi;
tmpi = A_pitch; A_pitch = B_pitch; B_pitch = tmpi;
tmpt = TransA;TransA = TransB;TransB = tmpt;
}
if (!Config->Quiet) fprintf(STD_OUT, "Starting DGEMM Run m=%lld k=%lld n=%lld Alpha=%f Beta=%f LDA=0x%lx LDB=0x%lx LDC=0x%lx At=%d Bt=%d ColMajor=%d (A=0x%llx, B=0x%llx, C=0x%llx, (C-A=%lld, (C-B)/w=%lld), Linpack=%d)\n", (long long int) matrix_m, (long long int) Config->Width, (long long int) matrix_n, Alpha, Beta, A_pitch, B_pitch, C_pitch, (int) (TransA), (int) (TransB), (int) (orderColMajor), (long long int) A, (long long int) B, (long long int) C, (long long int) ((size_t) C - (size_t) A) / sizeof(double), (long long int) ((size_t) C - (size_t) B) / sizeof(double) / Config->Width, (int) ExecuteLinpackCallbacks);
TransposeA = TransA;
TransposeB = TransB;
ExecLinpack = ExecuteLinpackCallbacks;
pipelinedRun = pipelined;
orig_m = matrix_m;
orig_n = matrix_n;
orig_a = A;
orig_b = B;
orig_c = C;
if (Config->Verify)
{
if (Config->PipelinedOperation)
{
fprintf(STD_OUT, "PipelinedOperation cannot be used in combination with Verify!\n");
return(1);
}
D = new double[(size_t) matrix_m * (size_t) C_pitch];
if (D == NULL)
{
fprintf(STD_OUT, "Memory allocation error\n");
return(1);
}
memcpy(D, C, matrix_m * C_pitch * sizeof(double));
}
if (Config->DumpMatrix) DumpMatrix(A, B, C, Alpha, Beta, matrix_m, Config->Width, matrix_n, A_pitch, B_pitch, C_pitch);
Timers.System.Start();
if (ExecLinpack >= 2 && Config->AlternateLookahead <= matrix_n)
{
if (matrix_m < Config->Width)
{
MaxGpuM = 0;
}
else
{
MaxGpuM = matrix_m - Config->Width;
}
}
else
{
MaxGpuM = matrix_m;
}
MaxGpuN = matrix_n;
#ifndef TESTMODE
//Check if the GPU can/shall process the required dgemm task
if (Config->Iterations > 1 || !Config->UseCPU);
else if (Config->Width % 8 || Config->Width < 256) forceCPU = true;
else if (MaxGpuM < Config->Height / 2 || MaxGpuN < Config->Height / 2) forceCPU = true;
#ifdef _WIN32
else if (Alpha == 0.) forceCPU = true;
#else
else if (__fpclassify(Alpha) == FP_ZERO) forceCPU = true;
#endif
else if (((size_t) A) & (vcpysize - 1) || ((size_t) B) & (vcpysize - 1) || ((size_t) C) & (vcpysize - 1) ||
A_pitch & (vcpysize / sizeof(double) - 1) || B_pitch & (vcpysize / sizeof(double) - 1) || C_pitch & (vcpysize / sizeof(double) - 1))
{
fprintf(STD_OUT, "Input addresses not aligned correctly: A 0x%llX B 0x%llX C 0x%llX Pitch 0x%llX 0x%llX 0x%llX\n", (long long int) A, (long long int) B, (long long int) C, (long long int) A_pitch, (long long int) B_pitch, (long long int) C_pitch);
forceCPU = true;
}
#endif
if (Config->AutoHeight)
{
#ifdef CALDGEMM_CUSTOM_AUTO_HEIGHT
#include CALDGEMM_CUSTOM_AUTO_HEIGHT
#else
if (CaldgemmCustomAutoHeight(MaxGpuM, MaxGpuN, nDevices) == 0)
{
if (ExecLinpack >= 2 && !Config->SmallTiles)
{
if (MaxGpuM < 1024 || MaxGpuN < 1024)
{
Config->Height = 512;
}
else if (MaxGpuM < 2048 || MaxGpuN < 2048 || (MaxGpuM * MaxGpuN < 13 * 14 * 1024 * 1024 && mymax(MaxGpuN, MaxGpuM) % 2048 >= 1024) || (MaxGpuM * MaxGpuN < 16 * 1024 * 1024))
{
Config->Height = 1024;
}
else if (MaxGpuM < 3072 || MaxGpuN < 3072 || (MaxGpuM * MaxGpuN < 20 * 21 * 1024 * 1024 && mymax(MaxGpuN, MaxGpuM) % 3072 >= 2048) || (MaxGpuM * MaxGpuN < 120 * 1024 * 1024))
{
Config->Height = 2048;
}
else if (MaxGpuM < 4096 || MaxGpuN < 4096 || MaxGpuM * MaxGpuN < 27 * 28 * 1024 * 1024)
{
Config->Height = 3072;
}
else
{
Config->Height = 4096;
}
}
else
{
if (MaxGpuM < 1024 || MaxGpuN < 1024)
{
Config->Height = 512;
}
else if (MaxGpuM < 2048 || MaxGpuN < 2048 || MaxGpuM * MaxGpuN < (size_t) nDevices * 16 * 1024 * 1024)
{
Config->Height = 1024;
}
else if (MaxGpuM < 3072 || MaxGpuN < 3072 || MaxGpuM * MaxGpuN < (size_t) nDevices * 120 * 1024 * 1024)
{
Config->Height = 2048;
}
else if (MaxGpuM < 4096 || MaxGpuN < 4096 || MaxGpuM * MaxGpuN < (size_t) nDevices * 40 * 40 * 1024 * 1024)
{
Config->Height = 3072;
}
else
{
Config->Height = 4096;
}
while (Config->SlowCPU && !Config->SmallTiles && Config->Height > 1024 && (MaxGpuM % Config->Height > 1024 || MaxGpuN % Config->Height > 1024)) Config->Height -= 1024;
}
}
#endif
if (Config->Height > BufferHeight) Config->Height = BufferHeight;
if (Config->Height % KernelSettings.min_tile_size)
{
Config->Height = Config->Height > (size_t) KernelSettings.min_tile_size ? (Config->Height - Config->Height % KernelSettings.min_tile_size) : KernelSettings.min_tile_size;
}
if (Config->Debug) fprintf(STD_OUT, "Using Height %lld of max %lld\n", (long long int) Config->Height, (long long int) BufferHeight);
}
HPL_CALDGEMM_gpu_height = Config->Height;
if (Config->UseGPU && (Config->Width > BufferWidth || Config->Height > BufferHeight)) forceReinit = true;
if (Config->UseCPU)
{
if (Config->UseGPU == false || (forceReinit && (long long int) MaxGpuM * (long long int) MaxGpuN * (long long int) Config->Width < (long long int) 24 * 1024 * 1024 * 1024) || (Config->Width < 1024 && Config->Height < 1024) || (ExecLinpack && matrix_m < Config->Width)) forceCPU = true;
}
AlternateLookaheadTilesFull = 0;
if (forceCPU)
{
if (Config->Debug) fprintf(STD_OUT, "Running CPU only DGEMM\n");
if (Config->ShowThreadPinning) printThreadPinning();
if (Config->LinpackSwapN != NULL)
{
HPL_CALDGEMM_gpu_height = 0;
Config->linpack_swap_function();
}
if (ExecLinpack)
{
size_t usewidth = Config->Width > matrix_m ? matrix_m : Config->Width;
Timers.CPUTimer.Start();
cblas_dgemm(CblasRowMajor, TransposeA ? CblasTrans : CblasNoTrans, TransposeB ? CblasTrans : CblasNoTrans, usewidth, matrix_n, Config->Width, Alpha, A, A_pitch, B, B_pitch, Beta, C, C_pitch);
Timers.CPUTimer.Stop();
if (Config->Debug) fprintf(STD_OUT, "DGEMM was running on CPU only, executing linpack callback functions\n");
Timers.LinpackTimer1.Start();
Config->linpack_factorize_function();
Timers.LinpackTimer1.Stop();
if (Config->LinpackNodes > 1)
{
Timers.LinpackTimer2.Start();
Config->linpack_broadcast_function();
Timers.LinpackTimer2.Stop();
}
matrix_m -= usewidth;
A += usewidth * (TransposeA ? 1 : A_pitch);
C += usewidth * (C_pitch);
}
Timers.CPUTimer.Start();
goto_set_num_threads(conf_numprocs);
if (matrix_m) cblas_dgemm(CblasRowMajor, TransposeA ? CblasTrans : CblasNoTrans, TransposeB ? CblasTrans : CblasNoTrans, matrix_m, matrix_n, Config->Width, Alpha, A, A_pitch, B, B_pitch, Beta, C, C_pitch);
Timers.CPUTimer.Stop();
CPUOnlyRun = true;
}
else
{
CPUOnlyRun = false;
if (ExecLinpack)
{
outputthreads = mymin(CALDGEMM_OUTPUT_THREADS_SLOW, outputthreads + CALDGEMM_EXTRA_OUTPUT_THREADS_LINPACK);
}
if (Config->SpawnGPUThread == -2)
{
if (Config->Debug) fprintf(STD_OUT, "Caldgemm Main Thread, setting CPU mask %X\n", getcpumask(&gpumask));
sched_setaffinity(0, sizeof(cpu_set_t), &gpumask);
}
if (forceReinit)
{
fprintf(STD_OUT, "WARNING: Reinit for increased buffer width / height\n");
fprintf(STD_OUT, "Reinit not yet implemented correctly, exiting");
exit(1);
if (ReinitDevices()) return(1);
}
InitConstantData(alpha);
if (Config->SlowCPU || matrix_n < Config->MinimizeCPUPart || (Config->MinimizeCPUDuringFact && ExecLinpack >= 2) || Config->GPURatio >= 1.0)
{
GPURatio = 1.0;
}
else
{
if (Config->GPURatio <= -0.999) //Auto determination (code must be adapted for each CPU / GPU config)
{
//Optimal ratio found using combined runs
if ((long long int) MaxGpuM * (long long int) MaxGpuN > (long long int) 5000000000) GPURatio = 0.75;
else if ((long long int) MaxGpuM * (long long int) MaxGpuN > (long long int) 600000000) GPURatio = 0.74;
else if ((long long int) MaxGpuM * (long long int) MaxGpuN > (long long int) 500000000) GPURatio = 0.73;
else if ((long long int) MaxGpuM * (long long int) MaxGpuN > (long long int) 200000000) GPURatio = 0.73;
else if ((long long int) MaxGpuM * (long long int) MaxGpuN > (long long int) 100000000) GPURatio = 0.72;
else if ((long long int) MaxGpuM * (long long int) MaxGpuN > (long long int) 7000000) GPURatio = 0.70;
else if ((long long int) MaxGpuM * (long long int) MaxGpuN > (long long int) 5000000) GPURatio = 0.67;
else if ((long long int) MaxGpuM * (long long int) MaxGpuN > (long long int) 2500000) GPURatio = 0.60;
else if ((long long int) MaxGpuM * (long long int) MaxGpuN > (long long int) 1000000) GPURatio = 0.55;
else GPURatio = 0.50;
if (Config->Width < 1024) GPURatio *= (double) Config->Width / (double) 1024;
if (Config->Height < 1024) GPURatio *= (double) Config->Height / (double) 1024 * (double) Config->Height / (double) 1024;
const int require_threads = outputthreads * nDevices + 1 + (ExecLinpack && Config->LinpackNodes > 1);
const double CPUscale = (double) (conf_cpufreq * mymax(conf_numprocs - require_threads, 1)) / (double) (2100 * (24 - require_threads));
const double GPUscale = (double) nDevices * conf_gpushaders * conf_gpufreq / (double) (850 * 20 * 64);
if (Config->Debug) fprintf(STD_OUT, "GPU Curve Ration: %1.3f, CPUScale %1.3f, GPUScale %1.3f\n", GPURatio, CPUscale, GPUscale);
GPURatio = GPUscale * GPURatio / (GPUscale * GPURatio + (1.0 - GPURatio) * CPUscale);
if (Config->Debug) fprintf(STD_OUT, "GPURatio automatically set to %1.3f\n", GPURatio);
if (GPURatio > 1.) GPURatio = 1.0;
if ((matrix_n + 4) % 4096 < 8 && GPURatio > 0.5) GPURatio = 1. - 0.95 * (1. - GPURatio);
}
else
{
if (ExecLinpack > 1 && Config->GPURatioDuringFact > 0) GPURatio = Config->GPURatioDuringFact;
else GPURatio = fabs(Config->GPURatio);
}
if (ExecLinpack && (Config->GPURatio < 0 || GPURatio < 0.99) && !Config->SlowCPU)
{
if (Config->GPURatio <= -0.99) //Auto determination
{
if (ExecLinpack > 1) GPURatio = 1.0 - (1.0 - GPURatio) * 0.80 * Config->Width / 1024;
else GPURatio = 1.0 - (1.0 - GPURatio) * 0.90;
if (GPURatio > 1.0) GPURatio = 1.0;
}
if (linpack_last_mn[ExecLinpack] > 0 && (((double) MaxGpuM * (double) MaxGpuN) - linpack_last_mn[ExecLinpack]) / linpack_last_mn[ExecLinpack] < 0.3 && linpackGPURatios[ExecLinpack] > 0.0001)
{
GPURatio = linpackGPURatios[ExecLinpack];
if (Config->Debug||1) fprintf(STD_OUT, "Taking GPU Ratio from table, entry %d, val %2.3f\n", ExecLinpack, 100 * GPURatio);
}
else
{
linpackGPURatios[ExecLinpack] = GPURatio;
if (Config->Debug||1) fprintf(STD_OUT, "Initializing ratio table entry %d with %2.3f\n", ExecLinpack, 100 * GPURatio);
}
}
if (Config->GPURatioMax > 0 && GPURatio > Config->GPURatioMax) GPURatio = Config->GPURatioMax;;
if (Config->GPURatio < 0 && Config->GPURatio > -0.99)
{
double threshold = (ExecLinpack > 1 && Config->GPURatioDuringFact > 0.) ? Config->GPURatioDuringFact : -Config->GPURatio;
if (GPURatio < threshold) GPURatio = threshold;
}
//if (Config->AlternateLookahead > matrix_n) GPURatio = 1. - (1. - GPURatio) * 0.88;
}
gpu_ratio_used = GPURatio;
if (ExecLinpack >= 2 && Config->AlternateLookahead <= matrix_n)
{
matrix_m -= Config->Width;
A += Config->Width * (TransposeA ? 1 : A_pitch);
C += Config->Width * (C_pitch);
HPL_CALDGEMM_gpu_height += Config->Width;
}
cParam.dynamic_run = 0;
cParam.dynamic_run2 = 0;
cParam.borders_done = false;
SmallTileHeight = (Config->SmallTiles == 1 ? KernelSettings.min_tile_size : Config->Height);
recalculate_ratio:
if (Config->UseCPU == true && Config->UseGPU == true)
{
if ((DGEMM_split_m = ((Config->LinpackSwapN == NULL && (ExecLinpack == 0 || Config->AlternateLookahead <= matrix_n)) ? (matrix_m >= matrix_n) : 1)))
{
size_t virtualm = matrix_m + (matrix_n % SmallTileHeight) * matrix_m / matrix_n;
if (ExecLinpack >= 2 && Config->AlternateLookahead <= matrix_n) virtualm += Config->Width * (Config->GPURatioLookaheadSizeMod + (float) matrix_m / matrix_n);
gpu_m = GPURatio * (float) virtualm + (SmallTileHeight - 1);
if (gpu_m > matrix_m)
{
if (Config->SmallTiles == 2 && SmallTileHeight > (size_t) KernelSettings.min_tile_size)
{
if (SmallTileHeight > 1024) SmallTileHeight = 1024;
else SmallTileHeight = KernelSettings.min_tile_size;
goto recalculate_ratio;
}
gpu_m = matrix_m;
}
gpu_m -= gpu_m % SmallTileHeight;
cParam.cblas_size = matrix_m - gpu_m;
gpu_n = matrix_n;
gpu_n -= gpu_n % SmallTileHeight;
if (Config->Debug) fprintf(STD_OUT, "Splitting: GPU: %lld x %lld, CPU: %lld x %lld, Tilesize %lld\n", (long long int) gpu_m, (long long int) gpu_n, (long long int) matrix_m - gpu_m, (long long int) gpu_n, (long long int) SmallTileHeight);
}
else
{
size_t virtualn = matrix_n + (matrix_m % SmallTileHeight) * matrix_n / matrix_m;
if (ExecLinpack >= 2 && Config->AlternateLookahead <= matrix_n) virtualn += Config->Width * (Config->GPURatioLookaheadSizeMod + (float) matrix_n / matrix_m);
gpu_n = GPURatio * (float) virtualn + (SmallTileHeight - 1);
if (gpu_n > matrix_n)
{
if (Config->SmallTiles == 2 && SmallTileHeight > (size_t) KernelSettings.min_tile_size)
{
if (SmallTileHeight > 1024) SmallTileHeight = 1024;
else SmallTileHeight = KernelSettings.min_tile_size;
goto recalculate_ratio;
}
gpu_n = matrix_n;
}
gpu_n -= gpu_n % SmallTileHeight;
cParam.cblas_size = matrix_n - gpu_n;
gpu_m = matrix_m;
gpu_m -= gpu_m % SmallTileHeight;
if (Config->Debug) fprintf(STD_OUT, "Splitting: GPU: %lld x %lld, CPU: %lld x %lld, Tilesize %lld\n", (long long int) gpu_m, (long long int) gpu_n, (long long int) matrix_m, (long long int) matrix_n - gpu_n, (long long int) SmallTileHeight);
}
const size_t over_m = gpu_m % Config->Height, over_n = gpu_n % Config->Height;
if (over_m < CALDGEMM_MIN_TILE_DIM2) gpu_m -= over_m;
else
{
#ifdef CALDGEMM_CUSTOM_HEIGHT_MOD
#define MOD_OVER over_m
#define MOD_GPU gpu_m
#include CALDGEMM_CUSTOM_HEIGHT_MOD
#undef MOD_OVER
#undef MOD_GPU
#else
CaldgemmCustomModHeight(over_m, gpu_m);
#endif
}
if (over_n < CALDGEMM_MIN_TILE_DIM2) gpu_n -= over_n;
else
{
#ifdef CALDGEMM_CUSTOM_HEIGHT_MOD
#define MOD_OVER over_n
#define MOD_GPU gpu_n
#include CALDGEMM_CUSTOM_HEIGHT_MOD
#undef MOD_OVER
#undef MOD_GPU
#else
CaldgemmCustomModHeight(over_n, gpu_n);
#endif
}
cParam.cblas_size = DGEMM_split_m ? (matrix_m - gpu_m) : (matrix_n - gpu_n);
}
else
{
if (warn_wrong_memory_allocation && (Config->GPU_C || Config->DstMemory == 'c'))
{
warn_wrong_memory_allocation = false; //Only warn once
fprintf(STD_OUT, "WARNING, you are using GPU_C or '-o g' option, but apparently you did not use CALDGEMM memory allocation with gpu_accessible feature ('-_' is missing).\nYou must take care to allocate GPU accessible memory yourself, or this can lead to invalid memory accesses.\n");
}
DGEMM_split_m = 0;
if (matrix_n % SmallTileHeight || matrix_m % SmallTileHeight)
{
fprintf(STD_OUT, "Invalid matrix size for GPU only (%lld %% %lld = %lld, %lld %% %lld = %lld)\n", (long long int) matrix_n, (long long int) SmallTileHeight, (long long int) matrix_n % SmallTileHeight, (long long int) matrix_m, (long long int) SmallTileHeight, (long long int) matrix_m % SmallTileHeight);
return(1);
}
if (ExecLinpack)
{
fprintf(STD_OUT, "Linpack callbacks in CALDGEMM are only possible with UseCPU = true!\n");
return(1);
}
gpu_n = matrix_n;
gpu_m = matrix_m;
}
DGEMM_favor_m = (Config->LinpackSwapN == NULL && (ExecLinpack == 0 || Config->AlternateLookahead <= matrix_n)) ? (gpu_m >= gpu_n) : 1;
if (!Config->Quiet) fprintf(STD_OUT, "Ratio %f - gpu_m %lld gpu_n %lld - Split %c Favor %c - Height %lld (/ %lld), Min Tiling %lld (%lld, %lld)\n", GPURatio, (long long int) gpu_m, (long long int) gpu_n, DGEMM_split_m ? 'm' : 'n', DGEMM_favor_m ? 'm' : 'n', (long long int) Config->Height, (long long int) BufferHeight, (long long int) SmallTileHeight, (long long int) (gpu_m % Config->Height), (long long int) (gpu_n % Config->Height));
if (Config->ShowThreadPinning) printThreadPinning();
const size_t mb = (gpu_m + Config->Height - 1) / Config->Height;
const size_t nb = (gpu_n + Config->Height - 1) / Config->Height;
const size_t nBlocks = mb * nb;
cParam.cpu_k = nBlocks;
cpu_k_barrier = nBlocks;
gpu_k_barrier = -1;
if (Config->UseCPU)
{
if (!Config->MultiThread)
{
cblas_wrapper_a();
}
cParam.cblasMutex[1].Unlock();
}
else if (Config->LinpackSwapN != NULL)
{
HPL_CALDGEMM_gpu_height = 0;
Config->linpack_swap_function();
}
if (Config->SpawnGPUThread != -2)
{
if (caldgemm_part_cpu()) return(1);
}
else
{
if (caldgemm_part_gpu()) return(1);
}
if (Config->UseCPU)
{
if (Config->Debug) fprintf(STD_OUT, "Waiting for CPU DGEMM to finish\n");
cParam.cblasMutex[0].Lock();
if (Config->MultiThread)
{
Timers.ATime.Stop();
cpu_wait_time = Timers.ATime.GetElapsedTime();
if (Config->DynamicSched && !Config->NoPerformanceWarnings && Timers.ATime.GetElapsedTime() >= 0.15 && cParam.cblas_size > 0)
{
fprintf(STD_OUT, "WARNING: CPU synchronisation took %2.4f sec\n", Timers.ATime.GetElapsedTime());
}
else if (Config->Debug)
{
fprintf(STD_OUT, "CPU synchronisation took %2.4f sec\n", Timers.ATime.GetElapsedTime());
}
}
}
}
if (Config->LinpackSwapN != NULL) *Config->LinpackSwapN = 0;
outputthreads = old_outputthreads;
Timers.System.Stop();
if (Config->Debug) fprintf(STD_OUT, "DGEMM Run Complete\n");
if (finishData->running)
{
if (!Config->Quiet) fprintf(STD_OUT, "Waiting for previous pipelined DGEMM iteration to finish\n");
int retVal = FinishCALDGEMM();
if (retVal) return(retVal);
}
finishData->matrix_m = matrix_m; finishData->matrix_n = matrix_n; finishData->SmallTileHeight = SmallTileHeight; finishData->orig_m = orig_m; finishData->orig_n = orig_n;
finishData->gpu_ratio_used = gpu_ratio_used; finishData->cpu_wait_time = cpu_wait_time;
finishData->ExecLinpack = ExecLinpack;
finishData->CPUOnlyRun = CPUOnlyRun; finishData->DGEMM_split_m = DGEMM_split_m;
finishData->System = Timers.System.GetElapsedTime(); finishData->CPUTimer = Timers.CPUTimer.GetElapsedTime(); finishData->GPUTimer = Timers.GPUTimer.GetElapsedTime(); finishData->TotalCPUTimer = Timers.TotalCPUTimer.GetElapsedTime();
finishData->LinpackTimer1 = Timers.LinpackTimer1.GetElapsedTime(); finishData->LinpackTimer2 = Timers.LinpackTimer2.GetElapsedTime(); finishData->LinpackTimer3 = Timers.LinpackTimer3.GetElapsedTime(); finishData->BcastTimer = Timers.BcastTimer.GetElapsedTime();
finishData->divideA = Timers.divideA; finishData->divideB = Timers.divideB; finishData->divideC = Timers.divideC;
finishData->device_kernel = Timers.device_kernel;
finishData->cblas_size = cParam.cblas_size;
finishData->dynamic_run = cParam.dynamic_run;
finishData->dynamic_size = cParam.dynamic_size;
finishData->cpu_k = cParam.cpu_k;
finishData->dynamic_run2 = cParam.dynamic_run2;
finishData->MidMarkerPos = Config->PipelinedMidMarker;
FinishDataFill();
if (Config->PipelineDoubleBuffer) pipelineBuffer ^= 1;
if (Config->PipelinedOperation && !CPUOnlyRun && pipelinedRun)
{
finishData->running = true;
return(0);
}
else
{
finishData->running = false;
return(FinishCALDGEMM(true));
}
}
int caldgemm::FinishDataInit()
{
finishData = new finishStruct;
return(finishData == NULL);
}
void caldgemm::FinishDataFill(){}
int caldgemm::FinishCALDGEMM(bool force)
{
if (!(force || finishData->running)) return(0);
if (Config->PipelinedOperation)
{
int retVal = RunCALDGEMM_Finish();
finishData->running = false;
if (retVal) return(retVal);
}
#ifdef TESTMODE
print_submatrices(C, 12, 24, C_pitch, 1, 1, 1, 1);
#endif
if (!Config->NoPerformanceWarnings && Config->DynamicSched && Config->UseCPU && Config->UseGPU && !finishData->CPUOnlyRun && fabs(finishData->TotalCPUTimer - finishData->GPUTimer) > 1.0)
{
fprintf(STD_OUT, "WARNING: Bad GPU / CPU Splitting: GPU Time: %2.4f, CPU Time: %2.4f (m = %lld, n = %lld)\n", finishData->GPUTimer, finishData->TotalCPUTimer, (long long int) finishData->matrix_m, (long long int) finishData->matrix_n);
}
displayMatrixTiming("caldgemm");
if (Config->Verify)
{
A = orig_a;
B = orig_b;
C = orig_c;
matrix_m = orig_m;
matrix_n = orig_n;
AnalyzeResults();
delete[] D;
}
if (finishData->ExecLinpack)
{
if (Config->GPURatioPenalties >= 2)
{
if (finishData->CPUTimer < 2.0)
{
finishData->gpu_ratio_used = 1. - Config->GPURatioPenaltyFactor * (1. - finishData->gpu_ratio_used);
}
if (finishData->ExecLinpack >= 2 && finishData->GPUTimer - finishData->LinpackTimer1 < 1.0)
{
finishData->gpu_ratio_used = 1. - Config->GPURatioPenaltyFactor * (1. - finishData->gpu_ratio_used);
}
}
if (Config->GPURatioPenalties >= 1)
{
if (finishData->cpu_wait_time >= 0.05)
{
finishData->gpu_ratio_used = 1. - Config->GPURatioPenaltyFactor * (1. - finishData->gpu_ratio_used);
}
}
const double tmpratio = finishData->cpu_wait_time > 0.15 ? 0.0 : 0.5;
const double newratio = tmpratio * linpackGPURatios[finishData->ExecLinpack] + (1.0 - tmpratio) * finishData->gpu_ratio_used;
if (Config->Debug) fprintf(STD_OUT, "updating ratio table entry %d (old: %2.3f, new: %2.3f, factor: %2.3f) => %2.3f\n", finishData->ExecLinpack, 100 * linpackGPURatios[finishData->ExecLinpack], 100 * finishData->gpu_ratio_used, tmpratio, 100 * newratio);
linpackGPURatios[finishData->ExecLinpack] = newratio;
linpackCPUDGEMMTime[finishData->ExecLinpack] = finishData->CPUTimer;
linpackBcastTime[finishData->ExecLinpack] = finishData->LinpackTimer2;
linpack_last_mn[finishData->ExecLinpack] = (double) finishData->orig_m * (double) finishData->orig_n;
}
return(0);
}
int caldgemm::RunCALDGEMM_Finish() {return(0);}
int caldgemm::DGEMMPrepareAndExecute(caldgemm::DGEMMPrepareAndExecuteTask& Task CALDGEMM_DIVBUFA)
{
if (Config->MultiThread && UseMutexPerDevice()) pthread_mutex_lock(&device_mutex[Task.device]);
if (Config->Debug) fprintf(STD_OUT, "DGEMMPrepareAndExecute device %d k1 %d j1 %d k2 %d j2 %d\n", Task.device, (int) Task.PrepareTasks[0].k, Task.PrepareTasks[0].j, (int) Task.PrepareTasks[1].k, Task.PrepareTasks[1].j);
for (int l = 0;l < 2;l++)
{
if (Task.PrepareTasks[l].j != -1)
{
if (DGEMM_prepare(Task.PrepareTasks[l].k, Task.PrepareTasks[l].j, Task.device CALDGEMM_DIVBUFB)) return(1);
}
}
if (Config->MultiThread && UseOutputPthreads())
{
if (Config->Debug) fprintf(STD_OUT, "\tLocking obuffer mutex %d/%d\n", Task.device, Task.j);
if (Config->AsyncTiming)
{
Timers.ATime.Reset();
Timers.ATime.Start();
}
if (Config->MultiThread && UseMutexPerDevice()) pthread_mutex_unlock(&device_mutex[Task.device]);
obufferMutex[Task.device][Task.j].Lock();
if (Config->MultiThread && UseMutexPerDevice()) pthread_mutex_lock(&device_mutex[Task.device]);
if (Config->AsyncTiming)
{
Timers.ATime.Stop();
if ((!Config->NoPerformanceWarnings && Timers.ATime.GetElapsedTime() > 0.001) || Config->Debug) fprintf(STD_OUT, "\t\tWait Time for output buffer: %1.5f\n", Timers.ATime.GetElapsedTime());
}
}
size_t blockm, blockn;
DGEMM_getblocks(Task.k, blockm, blockn);
if (buffer_pointers_A[Task.device][blockm] < 0 || buffer_pointers_B[Task.device][blockn] < 0)
{
if (!Config->NoPerformanceWarnings) fprintf(STD_OUT, "WARNING, Buffer falsified by previous iteration, need to retransfer (ptr_a = %d, ptr_b = %d)\n", buffer_pointers_A[Task.device][blockm], buffer_pointers_B[Task.device][blockn]);
if (DGEMM_prepare(Task.k, Task.j, Task.device CALDGEMM_DIVBUFB)) return(1);
}
if (ExecuteKernels(Task, blockm, blockn)) return(1);
if (Config->SimpleGPUQueuing) CheckAlternateTilesRemaining(blockm);
DGEMMPrepareTaskEventReady[Task.device][Task.j] = true;
if (Config->MultiThread && UseMutexPerDevice()) pthread_mutex_unlock(&device_mutex[Task.device]);
return(0);
}
void caldgemm::SetupBufferSizes()
{
if (Config->Height % KernelSettings.min_tile_size)
{
int new_tile_size = Config->Height > (size_t) KernelSettings.min_tile_size ? (Config->Height - Config->Height % KernelSettings.min_tile_size) : KernelSettings.min_tile_size;
if (!Config->NoPerformanceWarnings) fprintf(STD_OUT, "Default buffer height %d does not fit tile size of %d, adjusting height to %d\n", (int) Config->Height, KernelSettings.min_tile_size, new_tile_size);
Config->Height = new_tile_size;
}
if (Config->Width % KernelSettings.min_k)
{
int new_k = Config->Width > (size_t) KernelSettings.min_k ? (Config->Width - Config->Width % KernelSettings.min_k) : KernelSettings.min_k;
if (!Config->NoPerformanceWarnings) fprintf(STD_OUT, "Default buffer width %d does not fit minimum k value of %d, adjusting width to %d\n", (int) Config->Width, KernelSettings.min_k, new_k);
Config->Width = new_k;
}
BufferHeight = Config->Height;
BufferWidth = Config->Width;
}
int caldgemm::ExitCALDGEMM()
{
if (!caldgemm_initialized)
{
fprintf(STD_OUT, "CALDGEMM not initialized, cannot uninitialize!\n");
return(1);
}
nDevices = nDevicesInitialized;
if (Config->Debug) fprintf(STD_OUT, "Uninitializing CALDGEMM\n");
delete finishData;
if (Config->PreallocData) if (PreallocateFree()) return(1);
if (Config->UseGPU && ExitDevices()) return(1);
if (Config->MultiThread && UseOutputPthreads())
{
for (int num_device = 0;num_device < nDevices;num_device++)
{
for (int i = 0;i < (Config->OutputThreads == -1 ? max_outputthreads : Config->OutputThreads);i++)
{
if (Config->Debug) fprintf(STD_OUT, "Trying to terminate merge slave %d\n", i);
mParam[num_device][i].terminate = true;
mParam[num_device][i].mergeThreadMutex[1].Lock();
mParam[num_device][i].mergeThreadMutex[0].Unlock();
}
}
}
ExitRuntime();
if (Config->UseCPU && Config->UseGPU)
{
if (Config->Debug) fprintf(STD_OUT, "Trying to terminate blas slave\n");
cParam.terminate = true;
if (Config->MultiThread)
{
cParam.cblasMutex[1].Unlock();
if (Config->Debug) fprintf(STD_OUT, "Waiting for blas threads to terminate\n");
cParam.cblasMutex[0].Lock();
}
for (int i = 0;i < 2;i++) cParam.cblasMutex[i].Unlock();
}
if (Config->AlternateLookahead)
{
if (pthread_mutex_destroy(&tilesRemainingMutex)) fprintf(STD_OUT, "ERROR destroying tilesRemainingMutex: %s - %d\n", __FILE__, __LINE__);
}
if (Config->MultiThread)
{
if (Config->Debug) fprintf(STD_OUT, "Trying to terminate linpack slave\n");
linpackParameters.terminate = true;
linpackParameters.linpackMutex[0].Unlock();
if (Config->Debug) fprintf(STD_OUT, "Waiting for linpack slave to terminate\n");
linpackParameters.linpackMutex[1].Lock();
for (int i = 0;i < 2;i++) linpackParameters.linpackMutex[i].Unlock();
if (UseOutputPthreads())
{
if (Config->Debug) fprintf(STD_OUT, "Waiting for merge threads to terminate\n");
for (int i = 0;i < (Config->OutputThreads == -1 ? max_outputthreads : Config->OutputThreads);i++)
{
for (int num_device = 0;num_device < nDevices;num_device++)
{
mParam[num_device][i].mergeThreadMutex[1].Lock();
}
}
}
if (Config->MultiThreadDivide && UseInputPthreads())
{
for (int i = 0;i < divideThreads;i++)
{
dParam[i].terminate = 1;
DGEMMTasks[dParam[i].curDevice].mutex_start.Unlock();
if (Config->Debug) fprintf(STD_OUT, "Waiting for divide threads to terminate\n");
DGEMMTasks[i].mutex_finished.Lock();
}
}
}
if (Config->MultiThread)
{
if (UseOutputPthreads())
{
for (int num_device = 0;num_device < nDevices;num_device++)
{
for (int i = 0;i < (Config->OutputThreads == -1 ? max_outputthreads : Config->OutputThreads);i++)
{
for (int j = 0;j < 2;j++)
{
mParam[num_device][i].mergeThreadMutex[j].Unlock();
}
}
}
}
if (pthread_mutex_destroy(&scheduleMutex)) fprintf(STD_OUT, "ERROR destroying schedule mutex\n");
if (Config->MultiThreadDivide && UseInputPthreads())
{
for (int i = 0;i < nDevices;i++)
{
DGEMMTasks[i].mutex_start.Unlock();
DGEMMTasks[i].mutex_finished.Unlock();
}
}
if (Config->MultiThread && UseMutexPerDevice())
{
for (int i = 0;i < nDevices;i++)
{
if (pthread_mutex_destroy(&device_mutex[i])) fprintf(STD_OUT, "ERROR destroying device_mutex: %s - %d\n", __FILE__, __LINE__);
}
}
}
if (Config->ThreadSaveDriver == -1)
{
pthread_mutex_destroy(&globalDriverLock);
}
if (Config->UseDMAFetchQueue)
{
for (int i = 0;i < nDevices;i++)
{
pthread_mutex_destroy(&dma_fetch_queue_tasks[i].mutex);
}
}
#ifdef CALDGEMM_DIVIDE_STATIC_BUFFER
freeDivideBuffer(divide_tmpBuffer);
#endif
caldgemm_initialized = false;
return(0);
}
void caldgemm::ResetTimers()
{
//Reset Timers
Timers.System.Reset();
Timers.Kernel.Reset();
Timers.CounterDivide.Reset();
Timers.CounterMerge.Reset();
Timers.CounterCopyTo.Reset();
Timers.CounterCopyFrom.Reset();
Timers.CPUTimer.Reset();
Timers.TotalCPUTimer.Reset();
Timers.GPUTimer.Reset();
Timers.divideA = Timers.divideB = Timers.divideC = 0;
Timers.LinpackTimer1.Reset();
Timers.LinpackTimer2.Reset();
Timers.LinpackTimer3.Reset();
Timers.BcastTimer.Reset();
Timers.device_kernel = 0;
}
#define MAX_HUGE_ADDRESSES 256
double* huge_page_addresses[MAX_HUGE_ADDRESSES];
int nHugeAddresses = 0;
#ifndef HUGE_PAGESIZE
#define HUGE_PAGESIZE (1024 * 2048)
#endif
double* caldgemm::AllocMemory(size_t nDoubles, bool page_locked, bool huge_pages, bool gpuaccessible, bool interleave)
{
#ifndef USE_OLD_HUGE_MALLOC
return((double*) qmalloc::qMalloc(nDoubles * sizeof(double), huge_pages, false, page_locked, NULL, interleave));
#else
#ifdef WASTE_MEMORY
nDoubles += 40 * 1024 * 1024;
#endif
double* ptr;
#ifndef _WIN32
if (huge_pages)
{
if (nHugeAddresses >= MAX_HUGE_ADDRESSES - 1)
{
fprintf(STD_OUT, "No more huge_page memory available, increase MAX_HUGE_ADDRESSES\n");
return(NULL);
}
int shmid;
void *address = NULL;
if (Config->Debug) fprintf(STD_OUT, "Running Huge Maloc\n");
if ((shmid = shmget(IPC_PRIVATE, (nDoubles * sizeof(double) + HUGE_PAGESIZE) & ~(HUGE_PAGESIZE - 1), SHM_HUGETLB | IPC_CREAT | 0600)) < 0)
{
fprintf(STD_OUT, "Memory allocation error (shmget).\n");
return(NULL);
}
ptr = (double*) shmat(shmid, NULL, SHM_RND);
if ((long long int) address == -1)
{
fprintf(STD_OUT, "Memory allocation error (shmat).\n");
return(NULL);
}
shmctl(shmid, IPC_RMID, NULL);
if (page_locked && shmctl(shmid, SHM_LOCK, NULL) == -1)
{
fprintf(STD_OUT, "ERROR locking HugePage Memory\n");
shmdt((void*) ptr);
return(NULL);
}
huge_page_addresses[nHugeAddresses++] = ptr;
}
else
#endif
{
#ifdef _WIN32
ptr = (double*) VirtualAlloc(NULL, nDoubles * sizeof(double), MEM_COMMIT, PAGE_READWRITE);
#else
ptr = new double[nDoubles];
#endif
}
if (ptr == NULL) return(NULL);
#ifdef WASTE_MEMORY
nDoubles -= 40 * 1024 * 1024;
ptr += 20 * 1024 * 1024;
#endif
if (!huge_pages && page_locked)
{
#ifdef _WIN32
size_t minp, maxp;
HANDLE pid = GetCurrentProcess();
if (GetProcessWorkingSetSize(pid, &minp, &maxp) == 0) fprintf(STD_OUT, "Error getting minimum working set size\n");
if (SetProcessWorkingSetSize(pid, minp + nDoubles * sizeof(double), maxp + nDoubles * sizeof(double)) == 0) fprintf(STD_OUT, "Error settings maximum working set size\n");
if (VirtualLock(ptr, nDoubles * sizeof(double)) == 0)
#else
if (mlock(ptr, nDoubles * sizeof(double)))
#endif
{
fprintf(STD_OUT, "ERROR locking Pages\n");
if (!huge_pages)
{
#ifdef _WIN32
DWORD err = GetLastError();
fprintf(STD_OUT, "Error Number: %d\n", err);
VirtualFree(ptr, 0, MEM_RELEASE);
#else
delete[] ptr;
#endif
}
return(NULL);
}
}
return(ptr);
#endif
}
int caldgemm::FreeMemory(double* ptr, bool gpuaccessible)
{
#ifndef USE_OLD_HUGE_MALLOC
qmalloc::qFree(ptr);
#else
#ifdef WASTE_MEMORY
ptr -= 20 * 1024 * 1024;
#endif
#ifndef _WIN32
for (int i = 0;i < nHugeAddresses;i++)
{
if (huge_page_addresses[i] == ptr)
{
shmdt((void*) ptr);
huge_page_addresses[i] = huge_page_addresses[--nHugeAddresses];
return;
}
}
#endif
#ifdef _WIN32
VirtualFree(ptr, 0, MEM_RELEASE);
#else
delete[] ptr;
#endif
#endif
return(0);
}
void caldgemm::displayMatrixTiming(const char* name)
{
double gflops_CPU = (double) 1e-09 * finishData->orig_m * finishData->orig_n * (2 * Config->Width + 2) * (double) Config->Iterations / finishData->System;
avggflops = ((double) avgngflops * avggflops + gflops_CPU) / (double) (avgngflops + 1);
avgngflops++;
if (!Config->Quiet || (Config->DisplayTiming /*&& matrix_m * matrix_n >= 16 * 24 * 1024 * 1024*/)) fprintf(STD_OUT, "%sProgram: %s Sizes - A: %lldx%lld B: %lldx%lld C:%lldx%lld (Host: %s) System Time %2.3f System Gflops %2.3f\n", Config->PreOut, name,
(long long int) finishData->orig_m, (long long int) Config->Width, (long long int) Config->Width, (long long int) finishData->orig_n, (long long int) finishData->orig_m, (long long int) finishData->orig_n, hostname, finishData->System, gflops_CPU);
if (Config->UseCPU == true && Config->UseGPU == true)
{
double flopsc, flopsg;
if (finishData->CPUOnlyRun)
{
flopsc = (double) 1e-09 * finishData->orig_m * finishData->orig_n * (2 * Config->Width + 2) * Config->Iterations / finishData->CPUTimer;
flopsg = 0.0;
}
else if (finishData->DGEMM_split_m)
{
flopsc = (double) 1e-09 * (finishData->dynamic_run * finishData->dynamic_size + finishData->cblas_size * finishData->matrix_n + (finishData->matrix_n % finishData->SmallTileHeight) * (finishData->matrix_m - finishData->cblas_size) + finishData->dynamic_run2 * Config->Height * Config->Height + (finishData->ExecLinpack >= 2 && Config->AlternateLookahead <= finishData->matrix_n ? Config->Width * finishData->matrix_n : 0)) * (2 * Config->Width + 2) * Config->Iterations / finishData->CPUTimer;
flopsg = (double) 1e-09 * ((finishData->matrix_m - finishData->cblas_size) * (finishData->matrix_n - finishData->matrix_n % finishData->SmallTileHeight) - finishData->dynamic_run * finishData->dynamic_size - finishData->dynamic_run2 * Config->Height * Config->Height) * (2 * Config->Width + 2) * Config->Iterations / finishData->GPUTimer;
}
else
{
flopsc = (double) 1e-09 * (finishData->dynamic_run * finishData->dynamic_size + finishData->cblas_size * finishData->matrix_m + (finishData->matrix_m % finishData->SmallTileHeight) * (finishData->matrix_n - finishData->cblas_size) + finishData->dynamic_run2 * Config->Height * Config->Height + (finishData->ExecLinpack >= 2 && Config->AlternateLookahead <= finishData->matrix_n ? Config->Width * finishData->matrix_n : 0)) * (2 * Config->Width + 2) * Config->Iterations / finishData->CPUTimer;
flopsg = (double) 1e-09 * ((finishData->matrix_n - finishData->cblas_size) * (finishData->matrix_m - finishData->matrix_m % finishData->SmallTileHeight) - finishData->dynamic_run * finishData->dynamic_size - finishData->dynamic_run2 * Config->Height * Config->Height) * (2 * Config->Width + 2) * Config->Iterations / finishData->GPUTimer;
}
if (Config->GPUClock && finishData->matrix_m * finishData->matrix_n >= 24 * 24 * 1024 * 1024 && flopsg <= (double) 460 * (double) Config->GPUClock / (double) 850 - (double) 20)
{
fprintf(STD_OUT, "%sThrottling: %s (%2.3f GFlops)\n", Config->PreOut, hostname, flopsg);
}
//const double gpu_ratio_used_new = std::min(1.0, flopsg / (flopsc * (Timers.System.GetElapsedTime() - Timers.LinpackTimer1.GetElapsedTime() - (ExecLinpack > 1 ? Config->GPURatioMarginTimeDuringFact : Config->GPURatioMarginTime) - Timers.LinpackTimer3.GetElapsedTime()) / Timers.System.GetElapsedTime() + flopsg));
double gpu_ratio_used_new = mymin(1.0, flopsg / (flopsc * (finishData->CPUTimer - (finishData->ExecLinpack > 1 ? Config->GPURatioMarginTimeDuringFact : Config->GPURatioMarginTime)) / finishData->TotalCPUTimer + flopsg));
if (gpu_ratio_used_new < 0) finishData->gpu_ratio_used = 1.;
if (!Config->Quiet || (Config->DisplayTiming /*&& matrix_m * matrix_n >= 16 * 24 * 1024 * 1024*/))
{
char timingoutputbase[1024];
char *timingoutput = timingoutputbase;
timingoutput += sprintf(timingoutput, "%sGPU Time %2.4f (%2.4f Gflops) CPU Time %2.4f (%2.4f Gflops)", Config->PreOut, finishData->GPUTimer, flopsg, finishData->CPUTimer, flopsc);
if (finishData->ExecLinpack) timingoutput += sprintf(timingoutput, " Linpack Time: %2.4f (%d, %2.4f, %2.4f) Total CPU Time: %2.4f", finishData->LinpackTimer1, finishData->ExecLinpack, finishData->LinpackTimer2, finishData->LinpackTimer3, finishData->TotalCPUTimer);
if (Config->TabularTiming)
{
timingoutput += sprintf(timingoutput, " --- GPU Ratio - Real: %2.3f Corrected: %2.3f Guessed: %2.3f , m*n: %.1E, CPU Wait Time: %2.3f", (flopsg / (flopsc + flopsg)), gpu_ratio_used_new, finishData->gpu_ratio_used, (double) (finishData->matrix_m * finishData->matrix_n), finishData->cpu_wait_time > 0.001 ? finishData->cpu_wait_time : (finishData->TotalCPUTimer - finishData->GPUTimer));
}
sprintf(timingoutput, "\n");
fwrite(timingoutputbase, 1, strlen(timingoutputbase), STD_OUT);
}
finishData->gpu_ratio_used = gpu_ratio_used_new;
}
if ((!Config->Quiet || (Config->DisplayTiming /*&& matrix_n * matrix_m >= 16 * 24 * 1024 * 1024*/)) && Config->VerboseTiming)
{
double gflops = (double) 1e-09 * matrix_m * matrix_n * (2 * Config->Width - 1) * (double)Config->Iterations / Timers.Kernel.GetElapsedTime();
#ifdef CALDGEMM_BENCHMARK_KERNEL
gflops *= (double) CALDGEMM_BENCHMARK_KERNEL;
#endif
double copyto = Config->DivideToGPU ? 0 : ((double) 1e-09 * ((Config->Height * Timers.divideA + Config->Height * Timers.divideB) * Config->Width + Timers.divideC * Config->Height * Config->Height) * sizeof(double) * (double)Config->Iterations / Timers.CounterCopyTo.GetElapsedTime());
double copyfrom = Config->DstMemory == 'g' ? ((double) 1e-09 * matrix_m * matrix_n * sizeof(double) * (double)Config->Iterations / Timers.CounterCopyFrom.GetElapsedTime()) : 0;
double copyMerge = Config->MultiThread || UseOutputPthreads() == 0 ? 0 :((double) 1e-09 * matrix_m * matrix_n * sizeof(double) * (double)Config->Iterations / Timers.CounterMerge.GetElapsedTime());
double copyDivide = UseInputPthreads() ? (double) 1e-09 * (Config->Height * Timers.divideA + Config->Height * Timers.divideB) * Config->Width * sizeof(double) * (double)Config->Iterations / Timers.CounterDivide.GetElapsedTime() : 0;
fprintf(STD_OUT, "Times: Kernel Divide (%d,%d) Merge Copy To Copy From\n", Timers.divideA, Timers.divideB);
fprintf(STD_OUT, " %2.4f (%2.4f Gflops) %2.4f (%2.4f GB/s) %2.4f (%2.4f GB/s) %2.4f (%2.4f GB/s) %2.4f (%2.4f Gb/s)\n", Timers.Kernel.GetElapsedTime(), gflops, Timers.CounterDivide.GetElapsedTime(), copyDivide, Timers.CounterMerge.GetElapsedTime(), copyMerge, Timers.CounterCopyTo.GetElapsedTime(), copyto, Timers.CounterCopyFrom.GetElapsedTime(), copyfrom);
double gflops_device = 0;
if (Timers.device_kernel)
{
gflops_device = (double) matrix_m * matrix_n * (2 * Config->Width - 1) * (double)Config->Iterations / (double) Timers.device_kernel;
fprintf(STD_OUT, " %2.4f (%2.4f Gflops)\n", (double) Timers.device_kernel * 1e-09, gflops_device);
}
if (Config->TabularTiming)
{
fprintf(STD_OUT, "TIMES:\tw\t%lld\th\t%lld\tkernel\t%2.4f / %2.4f\tdivide\t%2.4f\tmerge\t%2.4f\tcopyto\t%2.4f\tcopyfr\t%2.4f\n", (long long int) Config->Width, (long long int) Config->Height, gflops, gflops_device, copyDivide, copyMerge, copyto, copyfrom);
}
}
}
unsigned int caldgemm::AnalyzeResults()
{
size_t errors = 0;
size_t total = 0;
if (!Config->Quiet) fprintf(STD_OUT, "Verifying results can take a long time on large matrices.\n");
HighResTimer Timer;
Timer.Reset();
Timer.Start();
cblas_dgemm(CblasRowMajor, TransposeA ? CblasTrans : CblasNoTrans, TransposeB ? CblasTrans : CblasNoTrans, matrix_m, matrix_n, Config->Width, Alpha, A, A_pitch, B, B_pitch, Beta, D, C_pitch);
Timer.Stop();
if (!Config->Quiet) fprintf(STD_OUT, "CPU Time: %f Gflops: %f\n", Timer.GetElapsedTime(), (double)1e-09 * 2 * matrix_m * matrix_n * Config->Width / Timer.GetElapsedTime());
#ifdef TESTMODE
fprintf(STD_OUT, "Reference Matrix:\n");
print_submatrices(D, 12, 24, C_pitch, 1, 1, 1, 1);
#endif
int nblocksm = 0;
int* errortiles = NULL;
if (Config->Height)
{
nblocksm = matrix_m / Config->Height + 1;
errortiles = (int*) malloc((matrix_n / Config->Height + 1) * nblocksm * sizeof(int));
memset(errortiles, 0, (matrix_n / Config->Height + 1) * nblocksm * sizeof(int));
}
size_t errorsrel[3];
memset(errorsrel, 0, 3 * sizeof(size_t));
for (size_t i=0; i < matrix_m; i++)
{
for (size_t j=0; j < matrix_n; j++)
{
if (!isDoubleEqual(C[i * C_pitch + j],D[i * C_pitch + j]))
{
if (errors < 5) fprintf(STD_OUT, "Error found at row %lld, col %lld: Expected: %3.5le, Found: %3.5le, Diff: %3.5le, Relative: %3.5le\n", (long long int) i, (long long int) j, D[i * C_pitch + j], C[i * C_pitch + j], D[i * C_pitch + j] - C[i * C_pitch + j], (D[i * C_pitch + j] - C[i * C_pitch + j]) / D[i * C_pitch + j]);
++errors;
if (Config->Height) errortiles[j / Config->Height * nblocksm + i / Config->Height]++;
if (fabs((C[i * C_pitch + j] - D[i * C_pitch + j]) / D[i * C_pitch + j]) > 0.05) errorsrel[0]++;
else if (fabs((C[i * C_pitch + j] - D[i * C_pitch + j]) / D[i * C_pitch + j]) < 0.0001) errorsrel[2]++;
else errorsrel[1]++;
}
++total;
}
}
if (errors)
{
fprintf(STD_OUT, "%lld out of %lld elements were incorrect (Rel errors > 0.05: %lld, > 0.0001: %lld, rest: %lld)\n", (long long int) errors, (long long int) total, (long long int) errorsrel[0], (long long int) errorsrel[1], (long long int) errorsrel[2]);
if (errorsrel[0] == 0)
{
fprintf(STD_OUT, "Passed with Warnings!!!\n");
}
else
{
fprintf(STD_OUT, "FAILED (Host %s)\n", hostname);
}
}
else if (!Config->Quiet)
{
fprintf(STD_OUT, "Passed!\n");
}
if (!Config->NoPerformanceWarnings && (errors || Config->Debug) && Config->Height)
{
fprintf(STD_OUT, "GPU output matrix\n");
print_submatrices(C, matrix_n, matrix_m, C_pitch, 1, 1, Config->Height, Config->Height);
fprintf(STD_OUT, "Reference matrix\n");
print_submatrices(D, matrix_n, matrix_m, C_pitch, 1, 1, Config->Height, Config->Height, C);
}
if (!Config->NoPerformanceWarnings && errors && Config->Height)
{
fprintf(STD_OUT, "Number of errors in tiles\n");
for (size_t i = 0;i < matrix_m;i += Config->Height)
{
for (size_t j = 0;j < matrix_n;j += Config->Height)
{
fprintf(STD_OUT, "%8d\t", errortiles[j / Config->Height * nblocksm + i / Config->Height]);
}
fprintf(STD_OUT, "\n");
}
}
if (Config->Height) free(errortiles);
return(errors == 0);
}
bool caldgemm::isDoubleEqual(double a, double b)
{
if (!qIsFinite(a) || !qIsFinite(b)) return(false);
double valmax = fabs(a) > fabs(b) ? fabs(a) : fabs(b);
if (valmax < 1e-15)
{
return(fabs(a - b) < 1e16);
}
else if (valmax < 1e-9)
{
return(fabs((a - b)/valmax) < 5e-2);
}
else if(valmax < 1e-8)
{
return (fabs((a-b)/valmax) < 1e-3);
}
else
{
return (fabs((a-b)/valmax) < 1e-4);
}
}
int caldgemm::DGEMM_prepare(size_t k, int j, unsigned int num_device CALDGEMM_DIVBUFA)
{
#ifdef CALDGEMM_BENCHMARK_KERNEL
return(0);
#endif
size_t blockm, blockn;
DGEMM_getblocks(k, blockm, blockn);
bool buffersSufficiant0, buffersSufficiant;
#ifdef REUSE_BBUFFERS
if (DGEMM_favor_m)
{
buffersSufficiant0 = true;
buffersSufficiant = next_buffer_B[num_device] < bbuffers[num_device];
}
else
{
buffersSufficiant0 = buffersSwitchable;
buffersSufficiant = buffersSwitchable && next_buffer_A[num_device] < bbuffers[num_device];
}
#else
buffersSufficiant0 = buffersSufficiant = false;
#endif<|fim▁hole|>
if (Config->Debug) fprintf(STD_OUT, "Running Preprocessing device = %d k = %lld\n", num_device, (long long int) k);
//if (Config->Debug) fprintf(STD_OUT, "device %d Favor %d major %d minor %d blockm %d blockn %d\n", (int) num_device, (int) DGEMM_favor_m, (int) buffersMajor[num_device], (int) buffersMinor[num_device][DGEMM_favor_m ? blockn : blockm], (int) blockm, (int) blockn);
const bool prepareM = DGEMM_favor_m ? (buffersMajor[num_device] < (signed long long int) blockm) : (!buffersSufficiant0 || buffer_pointers_A[num_device][blockm] == -1);
const bool prepareN = DGEMM_favor_m ? (!buffersSufficiant0 || buffer_pointers_B[num_device][blockn] == -1) : (buffersMajor[num_device] < (signed long long int) blockn);
if (prepareM)
{
WaitForLASWP(blockm);
if (DGEMM_favor_m) buffersMajor[num_device] = blockm;
else if (buffersSufficiant0)
{
const int buffer_pos = next_buffer_A[num_device] % (buffersSufficiant ? bbuffers[num_device] : ibuffercount);
if (buffersMinor[num_device][next_buffer_A[num_device] % bbuffers[num_device]] != -1)
{
static bool bbuffer_warning_shown = false;
if (Config->Debug || !(Config->NoPerformanceWarnings || bbuffer_warning_shown))
{
bbuffer_warning_shown = true;
fprintf(STD_OUT, "WARNING: Insufficient BBuffers, replacing blockm %d by %d in buffer %d\n", buffersMinor[num_device][buffer_pos], (int) blockm, buffer_pos);
}
buffer_pointers_A[num_device][buffersMinor[num_device][buffer_pos]] = -1;
}
buffersMinor[num_device][buffer_pos] = blockm;
}
buffer_pointers_A[num_device][blockm] = next_buffer_A[num_device];
}
else if (Config->Debug) fprintf(STD_OUT, "\tSkipping preprocessing part of A (device = %d, k = %lld, j = %d, m = %lld, n = %lld)\n", num_device, (long long int) k, j, (long long int) blockm, (long long int) blockn);
if (prepareN)
{
if (!DGEMM_favor_m) buffersMajor[num_device] = blockn;
else if (buffersSufficiant0)
{
const int buffer_pos = next_buffer_B[num_device] % (buffersSufficiant ? bbuffers[num_device] : ibuffercount);
if (buffersMinor[num_device][buffer_pos] != -1)
{
static bool bbuffer_warning_shown = false;
if (Config->Debug || !(Config->NoPerformanceWarnings || bbuffer_warning_shown))
{
bbuffer_warning_shown = true;
fprintf(STD_OUT, "WARNING: Insufficient BBuffers, replacing blockn %d by %d in buffer %d\n", buffersMinor[num_device][buffer_pos], (int) blockn, buffer_pos);
}
buffer_pointers_B[num_device][buffersMinor[num_device][buffer_pos]] = -1;
}
buffersMinor[num_device][buffer_pos] = blockn;
}
buffer_pointers_B[num_device][blockn] = next_buffer_B[num_device];
}
else if (Config->Debug) fprintf(STD_OUT, "\tSkipping preprocessing part of B (device = %d, k = %lld, j = %d, m = %lld, n = %lld)\n", num_device, (long long int) k, j, (long long int) blockm, (long long int) blockn);
if (prepareM || prepareN) dma_pending[num_device][j] = true;
if(DGEMM_prepare_backend(k, j, num_device, prepareM, prepareN, buffersSufficiant, buffersSufficiant0 CALDGEMM_DIVBUFB)) return(1);
if (prepareM) next_buffer_A[num_device]++;
if (prepareN) next_buffer_B[num_device]++;
return(0);
}
void caldgemm::printConfig(caldgemm::caldgemm_config* newConfig, caldgemm::caldgemm_config* oldConfig)
{
caldgemm_config* myConfig = newConfig ? newConfig : Config;
PRINT_CONFIG_INT(AsyncDMA);
PRINT_CONFIG_INT(PipelinedOperation);
PRINT_CONFIG_INT(PipelinedMidMarker);
PRINT_CONFIG_INT(PipelineDoubleBuffer);
PRINT_CONFIG_INT(DivideToGPU);
PRINT_CONFIG_CHAR(DstMemory);
PRINT_CONFIG_INT(ImplicitDriverSync);
PRINT_CONFIG_INT(UseDMAFetchQueue);
PRINT_CONFIG_INT(DynamicSched);
PRINT_CONFIG_INT(SecondPhaseDynamicRuns);
PRINT_CONFIG_INT(ThirdPhaseDynamicRuns);
PRINT_CONFIG_INT(ThirdPhaseThreshold);
PRINT_CONFIG_INT(KeepBuffersMapped);
PRINT_CONFIG_INT(MemPolicy);
PRINT_CONFIG_INT(MultiThread);
PRINT_CONFIG_INT(MultiThreadDivide);
PRINT_CONFIG_INT(ImprovedScheduler);
PRINT_CONFIG_INT(ImprovedSchedulerBalance);
PRINT_CONFIG_INT(SimpleGPUQueuing);
PRINT_CONFIG_INT(AlternateSimpleQueuing);
PRINT_CONFIG_INT(AlternateSimpleQueuingMulti);
PRINT_CONFIG_INT(ParallelDMA);
PRINT_CONFIG_INT(GroupParallelDMA);
PRINT_CONFIG_DOUBLE(GPURatio);
PRINT_CONFIG_DOUBLE(GPURatioDuringFact);
PRINT_CONFIG_DOUBLE(GPURatioMax);
PRINT_CONFIG_DOUBLE(GPURatioMarginTime);
PRINT_CONFIG_DOUBLE(GPURatioMarginTimeDuringFact);
PRINT_CONFIG_DOUBLE(GPURatioLookaheadSizeMod);
PRINT_CONFIG_INT(GPURatioPenalties);
PRINT_CONFIG_DOUBLE(GPURatioPenaltyFactor);
PRINT_CONFIG_INT(MinimizeCPUPart);
PRINT_CONFIG_INT(MinimizeCPUDuringFact);
PRINT_CONFIG_INT(UseCPU);
PRINT_CONFIG_INT(UseGPU);
PRINT_CONFIG_INT(RereserveLinpackCPU);
PRINT_CONFIG_INT(GPU_C);
PRINT_CONFIG_INT(NoConcurrentKernels);
PRINT_CONFIG_INT(OpenCLPlatform);
PRINT_CONFIG_INT(DeviceNum);
PRINT_CONFIG_INT(NumDevices);
PRINT_CONFIG_INT(NumActiveDevices);
PRINT_CONFIG_LOOP_INT(DeviceNums, NumDevices);
PRINT_CONFIG_INT(max_bbuffers);
PRINT_CONFIG_INT(PreallocData);
PRINT_CONFIG_INT(CPUInContext);
PRINT_CONFIG_INT(Debug);
PRINT_CONFIG_INT(DumpMatrix);
PRINT_CONFIG_INT(Iterations);
PRINT_CONFIG_INT(Verify);
PRINT_CONFIG_INT(SkipCPUProcessing);
PRINT_CONFIG_INT(ForceKernelVariant);
PRINT_CONFIG_LOOP_INT(GPUMapping, NumDevices);
PRINT_CONFIG_LOOP_INT(PostprocessMapping, NumDevices);
PRINT_CONFIG_LOOP_INT(AllocMapping, NumDevices);
PRINT_CONFIG_LOOP_INT(DMAMapping, NumDevices);
PRINT_CONFIG_INT(PinMainThread);
PRINT_CONFIG_INT(PinDeviceRuntimeThreads);
PRINT_CONFIG_INT(PinBroadcastThread);
PRINT_CONFIG_INT(RepinDuringActiveWaitForEvent);
PRINT_CONFIG_INT(RepinMainThreadAlways);
PRINT_CONFIG_INT(SpawnGPUThread);
PRINT_CONFIG_INT(SleepDuringActiveWait);
PRINT_CONFIG_INT(ThreadSaveDriver);
PRINT_CONFIG_INT(PinCPU);
PRINT_CONFIG_INT(ForceNumCPUThreads);
PRINT_CONFIG_INT(CPUCoreOffset);
PRINT_CONFIG_INT(SlowCPU);
PRINT_CONFIG_INT(OutputThreads);
PRINT_CONFIG_INT(NumaPinning);
PRINT_CONFIG_INT(AlternateLookahead);
PRINT_CONFIG_INT(AsyncSideQueue);
PRINT_CONFIG_INT(AsyncSideQueueBalance);
PRINT_CONFIG_INT(AsyncDGEMMThreshold);
PRINT_CONFIG_INT(AsyncDTRSMThreshold);
PRINT_CONFIG_INT(AsyncDTRSM);
PRINT_CONFIG_INT(AsyncSideQueueUseInactiveDeviceSet);
PRINT_CONFIG_INT(Use3rdPartyTranspose);
PRINT_CONFIG_INT(Height);
PRINT_CONFIG_INT(Width);
PRINT_CONFIG_INT(AutoHeight);
PRINT_CONFIG_INT(SmallTiles);
PRINT_CONFIG_INT(Disassemble);
PRINT_CONFIG_INT(PrintILKernel);
PRINT_CONFIG_INT(AsyncTiming);
PRINT_CONFIG_INT(DisplayTiming);
PRINT_CONFIG_INT(NoPerformanceWarnings);
PRINT_CONFIG_STRING(PreOut);
PRINT_CONFIG_INT(Quiet);
PRINT_CONFIG_INT(TabularTiming);
PRINT_CONFIG_INT(VerboseTiming);
PRINT_CONFIG_INT(LinpackNodes);
PRINT_CONFIG_INT(MPIRank);
PRINT_CONFIG_INT(GPUClock);
PRINT_CONFIG_INT(HPLFactorizeRestrictCPUs);
PRINT_CONFIG_INT(LASWPSleep);
PRINT_CONFIG_LOOP_INT(ExcludeCPUCores, nExcludeCPUCores);
PRINT_CONFIG_INT(ShowConfig);
PRINT_CONFIG_INT(ShowThreadPinning);
PRINT_CONFIG_INT_THIS(BufferWidth);
PRINT_CONFIG_INT_THIS(BufferHeight);
if (myConfig->config_backend && (oldConfig == NULL || oldConfig->config_backend))
{
myConfig->config_backend->printConfig(oldConfig ? oldConfig->config_backend : NULL);
}
}
double caldgemm::getMaxGPUTemperature()
{
return(0.);
}
int caldgemm::RunCALDGEMM_Init()
{
return(0);
}
int caldgemm::RunCALDGEMM_Exit()
{
return(0);
}
void caldgemm::SetDefaultKernelSettings()
{
#ifdef CALDGEMM_TRANSPOSED_A
KernelSettings.transposeA = true;
#else
KernelSettings.transposeA = false;
#endif
#ifdef CALDGEMM_TRANSPOSED_B
KernelSettings.transposeB = true;
#else
KernelSettings.transposeB = false;
#endif
KernelSettings.texture_buffers = true;
KernelSettings.tiling_x = TILING_X;
KernelSettings.tiling_y = TILING_Y;
KernelSettings.group_size_x = 16;
KernelSettings.group_size_y = 16;
KernelSettings.min_tile_size = CALDGEMM_MIN_TILE_DIM;
KernelSettings.min_k = 4;
}
int caldgemm::CaldgemmCustomAutoHeight(size_t MaxGpuM, size_t MaxGpuN, int nDevices) {return 0;}
int caldgemm::CaldgemmCustomModHeight(size_t MOD_OVER, size_t MOD_GPU) {return 0;}
int caldgemm::ParseParameters(unsigned int argc, char** argv, caldgemm_config* Config)
{
#include "caldgemm_parse_parameters.h"
return(0);
}
int caldgemm::ParseParameters(char* params, caldgemm_config* Config)
{
if (Config->Debug) fprintf(STD_OUT, "Parsing CALDGEMM Parameters: '%s'\n", params);
char* tmpParams = new char[strlen(params) + 1]; //This memory will be leaked, in case of string parameters we need to keep a copy, and we do not know how long params will live.
strcpy(tmpParams, params);
int argc = 1;
char** argv = new char*[strlen(params) / 2 + 3];
char* tmppos = tmpParams;
argv[0] = "caldgemm";
while (*tmppos != 0)
{
while (*tmppos == ' ' || *tmppos == ' ') tmppos++;
if (*tmppos == 0) break;
argv[argc++] = tmppos;
while (*tmppos != ' ' && *tmppos != ' ' && *tmppos != 0) tmppos++;
if (*tmppos) *(tmppos++) = 0;
}
argv[argc] = NULL;
int retVal = ParseParameters(argc, argv, Config);
delete[] argv;
retVal |= Config->InitializeBackendOptions();
return(retVal);
}
int caldgemm::AllowCPUFallback() {return(1);}
int caldgemm::SimpleQueuingAvailable() {return(0);}
int caldgemm::PipelinedModeAvailable() {return(0);}
int caldgemm::AsyncModeAvailable() {return(0);}
bool caldgemm::NeedSimpleQueueKernelEvent(int blockm, int blockn, int k, int device)
{
int mb = (gpu_m + Config->Height - 1) / Config->Height;
int nb = (gpu_n + Config->Height - 1) / Config->Height;
if (DGEMM_favor_m ? (blockm != mb - 1) : (blockn != nb - 1))
{
int kklast = k + (DGEMM_favor_m ? nb : mb);
kklast -= kklast % (DGEMM_favor_m ? nb : mb);
int num = 0;
for (int kk = k;kk < kklast;kk++)
{
if (tileDistribution[kk] == device)
{
if (++num == ibuffercount) break;
}
}
if (num < ibuffercount)
{
return(true);
}
}
return(false);
}
#ifndef USE_GOTO_BLAS
static int caldgemm_restrict_cpus = 0;
static int current_num_threads = get_num_procs();
void cblas_dscala(blasint N, double alpha, double *X, blasint incX)
{
int oldthreads = 0;
if (caldgemm_restrict_cpus > 2 && current_num_threads > 8)
{
oldthreads = current_num_threads;
omp_set_num_threads(8);
}
cblas_dscal(N, alpha, X, incX);
if (oldthreads) omp_set_num_threads(oldthreads);
}
void cblas_daxpya(blasint n, double alpha, double *x, blasint incx, double *y, blasint incy)
{
int oldthreads = 0;
if (caldgemm_restrict_cpus > 2 && current_num_threads > 8)
{
oldthreads = current_num_threads;
omp_set_num_threads(8);
}
cblas_daxpy(n, alpha, x, incx, y, incy);
if (oldthreads) omp_set_num_threads(oldthreads);
}
void cblas_dgemma(CBLAS_ENUM CBLAS_ORDER Order, CBLAS_ENUM CBLAS_TRANSPOSE TransA, CBLAS_ENUM CBLAS_TRANSPOSE TransB, blasint M, blasint N, blasint K, double alpha, double *A, blasint lda, double *B, blasint ldb, double beta, double *C, blasint ldc)
{
int oldthreads = 0;
if (caldgemm_restrict_cpus)
{
int nthreads = 0;
long long int tflops = (long long int) M * (long long int) N * (long long int) K;
if (tflops <= 16384) nthreads = 1;
else if (tflops <= 65536) nthreads = 2;
else if (tflops < 200000 || (tflops > 2000000 && tflops < 4000000)) nthreads = 3;
else if (tflops <= 2000000) nthreads = 4;
else if (tflops <= 26542080) nthreads = 8;
else if (tflops <= 56623104) nthreads = 12;
else if (tflops <= 89915392) nthreads = 16;
else if (tflops <= 262144000) nthreads = 20;
if (nthreads && nthreads < current_num_threads)
{
oldthreads = current_num_threads;
omp_set_num_threads(nthreads);
}
}
cblas_dgemm(Order, TransA, TransB, M, N, K, alpha, A, lda, B, ldb, beta, C, ldc);
if (oldthreads) omp_set_num_threads(oldthreads);
}
void cblas_dgemva(CBLAS_ENUM CBLAS_ORDER order, CBLAS_ENUM CBLAS_TRANSPOSE trans, blasint m, blasint n, double alpha, double *a, blasint lda, double *x, blasint incx, double beta, double *y, blasint incy)
{
int oldthreads = 0;
if (caldgemm_restrict_cpus)
{
int nthreads = 0;
if (n >= 4 * m)
{
long long int tflops = (long long int) n * 64;
if (tflops <= 458752) nthreads = 4;
else if (tflops <= 655360) nthreads = 8;
}
else
{
long long int tflops = (long long int) m * (long long int) n;
if (tflops < 102400) nthreads = 1;
else if (tflops < 3686400) nthreads = 3;
else nthreads = 4;
}
if (caldgemm_restrict_cpus > 2 && nthreads > 8) nthreads = 8;
if (nthreads && nthreads < current_num_threads)
{
oldthreads = current_num_threads;
omp_set_num_threads(nthreads);
}
}
cblas_dgemv(order, trans, m, n, alpha, a, lda, x, incx, beta, y, incy);
if (oldthreads) omp_set_num_threads(oldthreads);
}
void cblas_dtrsma(CBLAS_ENUM CBLAS_ORDER Order, CBLAS_ENUM CBLAS_SIDE Side, CBLAS_ENUM CBLAS_UPLO Uplo, CBLAS_ENUM CBLAS_TRANSPOSE TransA, CBLAS_ENUM CBLAS_DIAG Diag, blasint M, blasint N, double alpha, double *A, blasint lda, double *B, blasint ldb)
{
int oldthreads = 0;
if (caldgemm_restrict_cpus)
{
int nthreads = 0;
long long int tflops = (long long int) N * (long long int) N * (long long int) M;
if (tflops <= 32768) nthreads = 1;
else if (tflops <= 110592) nthreads = 3;
else if (tflops <= 100000000) nthreads = 4;
else if (tflops <= 1000000000) nthreads = 16;
if (caldgemm_restrict_cpus > 2 && nthreads > 8) nthreads = 8;
if (nthreads && nthreads < current_num_threads)
{
oldthreads = current_num_threads;
omp_set_num_threads(nthreads);
}
}
cblas_dtrsm(Order, Side, Uplo, TransA, Diag, M, N, alpha, A, lda, B, ldb);
if (oldthreads) omp_set_num_threads(oldthreads);
}
void caldgemm_goto_restrict_cpus(int val)
{
caldgemm_restrict_cpus = val;
}
void goto_set_num_threads(int num)
{
current_num_threads = num;
omp_set_num_threads(num);
#ifdef USE_MKL
mkl_set_num_threads(num);
#endif
}
#endif
// vim: ts=4 sw=4 noet sts=4 tw=100<|fim▁end|> | |
<|file_name|>rust_string.rs<|end_file_name|><|fim▁begin|>extern crate tor_util;
extern crate libc;
use std::ffi::CString;
use tor_util::RustString;
#[test]
fn rust_string_conversions_preserve_c_string() {
let s = CString::new("asdf foo").unwrap();<|fim▁hole|> let r2 = RustString::from(s.clone());
let c = r2.as_ptr();
assert_eq!(unsafe { libc::strlen(c) }, 8);
let c_str = r.into();
assert_eq!(s, c_str);
}
#[test]
fn empty_string() {
let s = CString::new("").unwrap();
let r = RustString::from(s.clone());
let c = r.as_ptr();
assert_eq!(unsafe { libc::strlen(c) }, 0);
let c_str = r.into();
assert_eq!(s, c_str);
}
#[test]
fn c_string_with_unicode() {
// The euro sign is three bytes
let s = CString::new("asd€asd").unwrap();
let r = RustString::from(s.clone());
let c = r.as_ptr();
assert_eq!(unsafe { libc::strlen(c) }, 9);
let c_str = r.into();
assert_eq!(s, c_str);
}<|fim▁end|> | let r = RustString::from(s.clone()); |
<|file_name|>simplifications_explicit.py<|end_file_name|><|fim▁begin|>from miasm.core.utils import size2mask
from miasm.expression.expression import ExprInt, ExprCond, ExprCompose, \
TOK_EQUAL
def simp_ext(_, expr):
if expr.op.startswith('zeroExt_'):
arg = expr.args[0]
if expr.size == arg.size:
return arg
return ExprCompose(arg, ExprInt(0, expr.size - arg.size))
if expr.op.startswith("signExt_"):
arg = expr.args[0]
add_size = expr.size - arg.size
new_expr = ExprCompose(
arg,
ExprCond(
arg.msb(),
ExprInt(size2mask(add_size), add_size),
ExprInt(0, add_size)
)
)
return new_expr
return expr
def simp_flags(_, expr):
args = expr.args
if expr.is_op("FLAG_EQ"):
return ExprCond(args[0], ExprInt(0, 1), ExprInt(1, 1))
elif expr.is_op("FLAG_EQ_AND"):
op1, op2 = args
return ExprCond(op1 & op2, ExprInt(0, 1), ExprInt(1, 1))
elif expr.is_op("FLAG_SIGN_SUB"):
return (args[0] - args[1]).msb()
elif expr.is_op("FLAG_EQ_CMP"):
return ExprCond(
args[0] - args[1],
ExprInt(0, 1),
ExprInt(1, 1),
)
elif expr.is_op("FLAG_ADD_CF"):
op1, op2 = args
res = op1 + op2
return (((op1 ^ op2) ^ res) ^ ((op1 ^ res) & (~(op1 ^ op2)))).msb()
elif expr.is_op("FLAG_SUB_CF"):
op1, op2 = args
res = op1 - op2
return (((op1 ^ op2) ^ res) ^ ((op1 ^ res) & (op1 ^ op2))).msb()
elif expr.is_op("FLAG_ADD_OF"):
op1, op2 = args
res = op1 + op2
return (((op1 ^ res) & (~(op1 ^ op2)))).msb()
elif expr.is_op("FLAG_SUB_OF"):
op1, op2 = args
res = op1 - op2
return (((op1 ^ res) & (op1 ^ op2))).msb()
elif expr.is_op("FLAG_EQ_ADDWC"):
op1, op2, op3 = args
return ExprCond(
op1 + op2 + op3.zeroExtend(op1.size),
ExprInt(0, 1),
ExprInt(1, 1),
)
elif expr.is_op("FLAG_ADDWC_OF"):
op1, op2, op3 = args
res = op1 + op2 + op3.zeroExtend(op1.size)
return (((op1 ^ res) & (~(op1 ^ op2)))).msb()
elif expr.is_op("FLAG_SUBWC_OF"):
op1, op2, op3 = args
res = op1 - (op2 + op3.zeroExtend(op1.size))
return (((op1 ^ res) & (op1 ^ op2))).msb()
elif expr.is_op("FLAG_ADDWC_CF"):
op1, op2, op3 = args
res = op1 + op2 + op3.zeroExtend(op1.size)
return (((op1 ^ op2) ^ res) ^ ((op1 ^ res) & (~(op1 ^ op2)))).msb()
elif expr.is_op("FLAG_SUBWC_CF"):
op1, op2, op3 = args
res = op1 - (op2 + op3.zeroExtend(op1.size))
return (((op1 ^ op2) ^ res) ^ ((op1 ^ res) & (op1 ^ op2))).msb()
elif expr.is_op("FLAG_SIGN_ADDWC"):
op1, op2, op3 = args
return (op1 + op2 + op3.zeroExtend(op1.size)).msb()
elif expr.is_op("FLAG_SIGN_SUBWC"):
op1, op2, op3 = args<|fim▁hole|>
elif expr.is_op("FLAG_EQ_SUBWC"):
op1, op2, op3 = args
res = op1 - (op2 + op3.zeroExtend(op1.size))
return ExprCond(res, ExprInt(0, 1), ExprInt(1, 1))
elif expr.is_op("CC_U<="):
op_cf, op_zf = args
return op_cf | op_zf
elif expr.is_op("CC_U>="):
op_cf, = args
return ~op_cf
elif expr.is_op("CC_S<"):
op_nf, op_of = args
return op_nf ^ op_of
elif expr.is_op("CC_S>"):
op_nf, op_of, op_zf = args
return ~(op_zf | (op_nf ^ op_of))
elif expr.is_op("CC_S<="):
op_nf, op_of, op_zf = args
return op_zf | (op_nf ^ op_of)
elif expr.is_op("CC_S>="):
op_nf, op_of = args
return ~(op_nf ^ op_of)
elif expr.is_op("CC_U>"):
op_cf, op_zf = args
return ~(op_cf | op_zf)
elif expr.is_op("CC_U<"):
op_cf, = args
return op_cf
elif expr.is_op("CC_NEG"):
op_nf, = args
return op_nf
elif expr.is_op("CC_EQ"):
op_zf, = args
return op_zf
elif expr.is_op("CC_NE"):
op_zf, = args
return ~op_zf
elif expr.is_op("CC_POS"):
op_nf, = args
return ~op_nf
return expr<|fim▁end|> | return (op1 - (op2 + op3.zeroExtend(op1.size))).msb()
|
<|file_name|>BaseTestVerticle.java<|end_file_name|><|fim▁begin|>/*
* Copyright 2016 The Simple File Server Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/<|fim▁hole|>import io.vertx.core.Context;
import io.vertx.core.Vertx;
import io.vertx.core.http.HttpClient;
import io.vertx.core.logging.Logger;
import io.vertx.ext.unit.Async;
import io.vertx.ext.unit.TestContext;
import io.vertx.ext.unit.junit.VertxUnitRunner;
import org.junit.Rule;
import org.junit.runner.RunWith;
import org.sfs.RunBootedTestOnContextRx;
import org.sfs.Server;
import org.sfs.TestSubscriber;
import org.sfs.VertxContext;
import rx.Observable;
import java.nio.file.Path;
import java.util.concurrent.Callable;
import static io.vertx.core.logging.LoggerFactory.getLogger;
@RunWith(VertxUnitRunner.class)
public class BaseTestVerticle {
private static final Logger LOGGER = getLogger(BaseTestVerticle.class);
@Rule
public RunBootedTestOnContextRx runTestOnContext = new RunBootedTestOnContextRx();
public VertxContext<Server> vertxContext() {
return runTestOnContext.getVertxContext();
}
public HttpClient httpClient() {
return runTestOnContext.getHttpClient();
}
public Vertx vertx() {
return vertxContext().vertx();
}
public Path tmpDir() {
return runTestOnContext.getTmpDir();
}
public void runOnServerContext(TestContext testContext, RunnableWithException callable) {
Async async = testContext.async();
Context c = vertxContext().verticle().getContext();
c.runOnContext(event -> {
try {
callable.run();
async.complete();
} catch (Exception e) {
testContext.fail(e);
}
});
}
public void runOnServerContext(TestContext testContext, Callable<Observable<Void>> callable) {
Async async = testContext.async();
Context c = vertxContext().verticle().getContext();
c.runOnContext(event -> {
try {
callable.call().subscribe(new TestSubscriber(testContext, async));
} catch (Exception e) {
testContext.fail(e);
}
});
}
public interface RunnableWithException {
void run() throws Exception;
}
}<|fim▁end|> |
package org.sfs.integration.java;
|
<|file_name|>testkit_test.go<|end_file_name|><|fim▁begin|>// Copyright 2017 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.<|fim▁hole|>// See the License for the specific language governing permissions and
// limitations under the License.
package testkit
import (
"testing"
"github.com/pingcap/check"
)
var _ = check.Suite(&testKitSuite{})
func TestT(t *testing.T) {
check.TestingT(t)
}
type testKitSuite struct {
}
func (s testKitSuite) TestSort(c *check.C) {
result := &Result{
rows: [][]string{{"1", "1", "<nil>", "<nil>"}, {"2", "2", "2", "3"}},
c: c,
comment: check.Commentf(""),
}
result.Sort().Check(Rows("1 1 <nil> <nil>", "2 2 2 3"))
}<|fim▁end|> | |
<|file_name|>person.cpp<|end_file_name|><|fim▁begin|>#include "person.h"
Person::Person()
{
hunger=0;
toilet=0;
WaitForDo=0;
}
void Person::AddToDo(Deal todo, float time)
{
ToDoList.append(qMakePair(todo,time));
if (WaitForDo==0) WaitForDo=time;
}
void Person::CheckDeal()
{
WaitForDo-=1;
if (WaitForDo<0)
{
if (!ToDoList.isEmpty())
{
ToDoList.removeFirst();
while(true)
{
if((WaitForDo+ToDoList.at(0).second)>0) break;
if (!ToDoList.isEmpty()) ToDoList.removeFirst();
}
}
else WaitForDo=0;
}
}
Deal::Deal()
{
name="noname";
timeToDeal=0;
}
Deal::Deal(int time, QString n)
{
name=n;
timeToDeal=time;
}
bool Deal::Execution()
{
return true;
}<|fim▁hole|>{
hunger-=NutritionalValue;
Deal noname(1,"n");
return noname;
}<|fim▁end|> |
Deal Person::Eat(float NutritionalValue, float timeToEat) |
<|file_name|>sign_in_totp_code.js<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
const $ = require('jquery');
const _ = require('underscore');
const {assert} = require('chai');
const Account = require('models/account');
const AuthErrors = require('lib/auth-errors');
const Backbone = require('backbone');
const BaseBroker = require('models/auth_brokers/base');
const Metrics = require('lib/metrics');
const Relier = require('models/reliers/relier');
const sinon = require('sinon');
const View = require('views/sign_in_totp_code');
const WindowMock = require('../../mocks/window');
const TOTP_CODE = '123123';
describe('views/sign_in_totp_code', () => {
let account;
let broker;
let metrics;
let model;
let notifier;
let relier;
let view;
let windowMock;
beforeEach(() => {
windowMock = new WindowMock();
relier = new Relier({
window: windowMock
});
broker = new BaseBroker({
relier: relier,
window: windowMock
});
account = new Account({
email: '[email protected]',
sessionToken: 'someToken',
uid: 'uid'
});
model = new Backbone.Model({
account: account,
lastPage: 'signin',
password: 'password'
});
notifier = _.extend({}, Backbone.Events);
metrics = new Metrics({
notifier,
sentryMetrics: {
captureException () {}
}
});
view = new View({
broker,
canGoBack: true,
metrics,
model,
notifier,
relier,
viewName: 'sign-in-totp-code',
window: windowMock
});
sinon.stub(view, 'getSignedInAccount').callsFake(() => model.get('account'));
$(windowMock.document.body).attr('data-flow-id', '0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef');
$(windowMock.document.body).attr('data-flow-begin', Date.now());
sinon.spy(view, 'logFlowEvent');
return view.render()
.then(() => $('#container').html(view.$el));
});
afterEach(() => {
metrics.destroy();
view.remove();
view.destroy();
view = metrics = null;
});
describe('render', () => {
it('renders the view', () => {
assert.lengthOf(view.$('#fxa-totp-code-header'), 1);
assert.include(view.$('.verification-totp-message').text(), 'security code');
assert.equal(view.$('#use-recovery-code-link').attr('href'), '/signin_recovery_code');
assert.equal(view.$('.different-account-link').attr('href'), '/signin');
});
describe('without an account', () => {
beforeEach(() => {
account = model.get('account').unset('sessionToken');
sinon.spy(view, 'navigate');
return view.render();
});
it('redirects to the signin page', () => {
assert.isTrue(view.navigate.calledWith('signin'));
});
});
});
describe('validateAndSubmit', () => {
beforeEach(() => {
sinon.stub(view, 'submit').callsFake(() => Promise.resolve());
sinon.spy(view, 'showValidationError');
});
describe('with an empty code', () => {
beforeEach(() => {
view.$('#totp-code').val('');
return view.validateAndSubmit().then(assert.fail, () => {});
});
it('displays a tooltip, does not call submit', () => {
assert.isTrue(view.showValidationError.called);
assert.isFalse(view.submit.called);
});
});
const validCodes = [
TOTP_CODE,
' ' + TOTP_CODE,
TOTP_CODE + ' ',
' ' + TOTP_CODE + ' ',
'001-001',
'111 111'
];
validCodes.forEach((code) => {
describe(`with a valid code: '${code}'`, () => {
beforeEach(() => {
view.$('.totp-code').val(code);
return view.validateAndSubmit();
});
it('calls submit', () => {
assert.equal(view.submit.callCount, 1);
});
});
});
});
describe('submit', () => {
describe('success', () => {
beforeEach(() => {
sinon.stub(account, 'verifyTotpCode').callsFake(() => Promise.resolve({success: true}));
sinon.stub(view, 'invokeBrokerMethod').callsFake(() => Promise.resolve());
view.$('.totp-code').val(TOTP_CODE);
return view.submit();
});
it('calls correct broker methods', () => {
assert.isTrue(account.verifyTotpCode.calledWith(TOTP_CODE), 'verify with correct code');
assert.isTrue(view.invokeBrokerMethod.calledWith('afterCompleteSignInWithCode', account));
});
it('logs flowEvent', () => {
assert.equal(view.logFlowEvent.callCount, 1);
});
});
describe('invalid TOTP code', () => {
beforeEach(() => {
sinon.stub(account, 'verifyTotpCode').callsFake(() => Promise.resolve({success: false}));
sinon.spy(view, 'showValidationError');
view.$('.totp-code').val(TOTP_CODE);
return view.submit();
});
it('rejects with the error for display', () => {
assert.equal(view.showValidationError.args[0][1].errno, 1054, 'correct error thrown');
});
});
describe('errors', () => {<|fim▁hole|> return view.submit();
});
it('rejects with the error for display', () => {
assert.equal(view.showValidationError.args[0][1].errno, 999, 'correct error thrown');
});
});
});
});<|fim▁end|> | beforeEach(() => {
sinon.stub(account, 'verifyTotpCode').callsFake(() => Promise.reject(AuthErrors.toError('UNEXPECTED_ERROR')));
sinon.spy(view, 'showValidationError');
view.$('.totp-code').val(TOTP_CODE); |
<|file_name|>mdialog.js<|end_file_name|><|fim▁begin|>/*
* @弹出提示层 ( 加载动画(load), 提示动画(tip), 成功(success), 错误(error), )
* @method tipBox
* @description 默认配置参数
* @time 2014-12-19
* @param {Number} width -宽度
* @param {Number} height -高度
* @param {String} str -默认文字
* @param {Object} windowDom -载入窗口 默认当前窗口
* @param {Number} setTime -定时消失(毫秒) 默认为0 不消失
* @param {Boolean} hasMask -是否显示遮罩
* @param {Boolean} hasMaskWhite -显示白色遮罩
* @param {Boolean} clickDomCancel -点击空白取消
* @param {Function} callBack -回调函数 (只在开启定时消失时才生效)
* @param {Function} hasBtn -显示按钮
* @param {String} type -动画类型 (加载,成功,失败,提示)
* @example
* new TipBox();
* new TipBox({type:'load',setTime:1000,callBack:function(){ alert(..) }});
*/
function TipBox(cfg){
this.config = {
width : 250,
height : 170,
str : '正在处理',
windowDom : window,
setTime : 0,
hasMask : true,
hasMaskWhite : false,
clickDomCancel : false,
callBack : null,
hasBtn : false,
type : 'success'
}
$.extend(this.config,cfg);
//存在就retrun
if(TipBox.prototype.boundingBox) return;
//初始化
this.render(this.config.type);
return this;
};
//外层box
TipBox.prototype.boundingBox = null;
//渲染
TipBox.prototype.render = function(tipType,container){
this.renderUI(tipType);
//绑定事件
this.bindUI();
//初始化UI
this.syncUI();
$(container || this.config.windowDom.document.body).append(TipBox.prototype.boundingBox);
};
//渲染UI
TipBox.prototype.renderUI = function(tipType){
TipBox.prototype.boundingBox = $("<div id='animationTipBox'></div>");
tipType == 'load' && this.loadRenderUI();
tipType == 'success' && this.successRenderUI();
tipType == 'error' && this.errorRenderUI();
tipType == 'tip' && this.tipRenderUI();
TipBox.prototype.boundingBox.appendTo(this.config.windowDom.document.body);
//是否显示遮罩
if(this.config.hasMask){
this.config.hasMaskWhite ? this._mask = $("<div class='mask_white'></div>") : this._mask = $("<div class='mask'></div>");
this._mask.appendTo(this.config.windowDom.document.body); <|fim▁hole|> if(this.config.hasBtn){
this.config.height = 206;
$('#animationTipBox').css("margin-top","103px");
switch(this.config.type){
case 'success':$(".success").after("<button class='okoButton'>ok</button>");
break;
case 'error':$(".lose").after("<button class='okoButton redOkoButton'>ok</button>");
break;
case 'tip':$(".tip").after("<button class='okoButton'>ok</button>");
break;
default: break;
}
$('button.okoButton').on('click',function(){_this.close();});
}
//定时消失
_this = this;
!this.config.setTime && typeof this.config.callBack === "function" && (this.config.setTime = 1);
this.config.setTime && setTimeout( function(){ _this.close(); }, _this.config.setTime );
};
TipBox.prototype.bindUI = function(){
_this = this;
//点击空白立即取消
this.config.clickDomCancel && this._mask && this._mask.click(function(){_this.close();});
};
TipBox.prototype.syncUI = function(){
TipBox.prototype.boundingBox.css({
width : this.config.width+'px',
height : this.config.height+'px',
marginLeft : "-"+(this.config.width/2)+'px',
marginTop : "-"+(this.config.height/2)+'px'
});
};
//提示效果UI
TipBox.prototype.tipRenderUI = function(){
var tip = "<div class='tip'>";
tip +=" <div class='icon'>i</div>";
tip +=" <div class='dec_txt'>"+this.config.str+"</div>";
tip += "</div>";
TipBox.prototype.boundingBox.append(tip);
};
//成功效果UI
TipBox.prototype.successRenderUI = function(){
var suc = "<div class='success'>";
suc +=" <div class='icon'>";
suc += "<div class='line_short'></div>";
suc += "<div class='line_long'></div> ";
suc += "</div>";
suc +=" <div class='dec_txt'>"+this.config.str+"</div>";
suc += "</div>";
TipBox.prototype.boundingBox.append(suc);
};
//错误效果UI
TipBox.prototype.errorRenderUI = function(){
var err = "<div class='lose'>";
err += " <div class='icon'>";
err += " <div class='icon_box'>";
err += " <div class='line_left'></div>";
err += " <div class='line_right'></div>";
err += " </div>";
err += " </div>";
err += "<div class='dec_txt'>"+this.config.str+"</div>";
err += "</div>";
TipBox.prototype.boundingBox.append(err);
};
//加载动画load UI
TipBox.prototype.loadRenderUI = function(){
var load = "<div class='load'>";
load += "<div class='icon_box'>";
for(var i = 1; i < 4; i++ ){
load += "<div class='cirBox"+i+"'>";
load += "<div class='cir1'></div>";
load += "<div class='cir2'></div>";
load += "<div class='cir3'></div>";
load += "<div class='cir4'></div>";
load += "</div>";
}
load += "</div>";
load += "</div>";
load += "<div class='dec_txt'>"+this.config.str+"</div>";
TipBox.prototype.boundingBox.append(load);
};
//关闭
TipBox.prototype.close = function(){
TipBox.prototype.destroy();
this.destroy();
this.config.setTime && typeof this.config.callBack === "function" && this.config.callBack();
};
//销毁
TipBox.prototype.destroy = function(){
this._mask && this._mask.remove();
TipBox.prototype.boundingBox && TipBox.prototype.boundingBox.remove();
TipBox.prototype.boundingBox = null;
};<|fim▁end|> | }
// 是否显示按钮 |
<|file_name|>logout-modal.js<|end_file_name|><|fim▁begin|>(function () {
'use strict';
angular.module('Pedal2Play')
.directive('logoutModal', function ()
{
return {
restrict: 'E',
templateUrl: 'partials/logout.modal.html'<|fim▁hole|> });
})();<|fim▁end|> | }; |
<|file_name|>dragdrop.js<|end_file_name|><|fim▁begin|>(function() {
"use strict";
angular.module('common.dragdrop', [])
.factory('DragDropHandler', [function() {
return {
dragObject: undefined,
addObject: function(object, objects, to) {
objects.splice(to, 0, object);
},
moveObject: function(objects, from, to) {
objects.splice(to, 0, objects.splice(from, 1)[0]);
}
};
}])
.directive('draggable', ['DragDropHandler', function(DragDropHandler) {
return {
scope: {
draggable: '='
},
link: function(scope, element, attrs){
element.draggable({
connectToSortable: attrs.draggableTarget,
helper: "clone",
revert: "invalid",
start: function() {
DragDropHandler.dragObject = scope.draggable;
},
stop: function() {
DragDropHandler.dragObject = undefined;
}
});
element.disableSelection();
}
};
}])
.directive('droppable', ['DragDropHandler', function(DragDropHandler) {
return {
scope: {
droppable: '=',
ngMove: '&',
ngCreate: '&'
},
link: function(scope, element, attrs){
element.sortable({
connectWith: ['.draggable','.sortable'],
});
element.disableSelection();
element.on("sortdeactivate", function(event, ui) {
var from = (angular.element(ui.item).scope()) ? angular.element(ui.item).scope().$index : undefined;
var to = element.children().index(ui.item);
var list = element.attr('id');
if (to >= 0 ){
scope.$apply(function(){
if (from >= 0) {
//item is coming from a sortable
if (!ui.sender || ui.sender[0] === element[0]) {
//item is coming from this sortable
DragDropHandler.moveObject(scope.droppable, from, to);
} else {
//item is coming from another sortable
scope.ngMove({
from: from,
to: to,
fromList: ui.sender.attr('id'),
toList: list
});
ui.item.remove();
}
} else {
//item is coming from a draggable
scope.ngCreate({
object: DragDropHandler.dragObject,
to: to,
list: list
});
ui.item.remove();<|fim▁hole|> });
}
});
}
};
}])
;})();<|fim▁end|> | } |
<|file_name|>ConnectedArena.java<|end_file_name|><|fim▁begin|>/*
* To change this template, choose Tools | Templates
* and open the template in the editor.
*/
package pt.jkaiui.core.messages;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import pt.jkaiui.core.KaiString;
import pt.jkaiui.manager.I_InMessage;
/**
*
* @author yuu@akron
*/
public class ConnectedArena extends Message implements I_InMessage {
public ConnectedArena() {
}
public Message parse(String s) {
Pattern p = Pattern.compile("KAI_CLIENT_CONNECTED_ARENA;");
Matcher m = p.matcher(s);<|fim▁hole|> return msg;
}
return null;
}
}<|fim▁end|> | if (m.matches()){
ConnectedArena msg = new ConnectedArena();
|
<|file_name|>Panel.js<|end_file_name|><|fim▁begin|>$(document).ready(function () {
form = $('#form');
$("#Grid tbody>tr").click(function () {
var id = $(this).find("[name='id']").attr('value');
$("#grid_selecteditem").val(id);
});
});
function ClearSelection(e) {
$("#grid_selecteditem").val("");
}
var options = {};
function Show(prefix) {
$("#" + prefix + "SearchPanel").show('slide');
}
function Hide(prefix) {
$("#" + prefix + "SearchPanel").hide('slide');
}
function OnComboboxMasterChange(e) {
var slave = $('#' + e.target.id + "SlaveName").val();
var params = $('#' + e.target.id + "ParameterString").val().replace("*value*", e.value);
var ddl = $('#' + slave).data('tComboBox');
if (ddl.ajax) {
start = '?';
if (ddl.backupAjax.selectUrl.indexOf('?') != -1) {
start = '&';
}
ddl.ajax.selectUrl = ddl.backupAjax.selectUrl + start + params;
ddl.reload();
}
}
function OnComboboxSlaveLoad(e) {
var ddl = $('#' + e.target.id).data('tComboBox');
if (ddl && ddl.ajax) {
ddl.backupAjax = new Object;
ddl.backupAjax.selectUrl = ddl.ajax.selectUrl;
}
}
function GetAllObjectProps(obj) {
var str = "";
for(p in obj)
{
str += p + ': ' + obj[p] + '\r\n';
}
return str;
}
function FillStandardRange(range, idfrom, idto) {
<|fim▁hole|> datefrom = new Date(date.getFullYear(), date.getMonth(), 1);
}
else {
if (range == "quarter") {
datefrom = new Date(date.getFullYear(), 3*(date.getMonth() / 3), 1);
}
else {
if (range == "year") {
datefrom = new Date(date.getFullYear(), 0, 1);
}
}
}
var pickerFrom = $("#" + idfrom).data("tDatePicker");
var pickerTo = $("#" + idto).data("tDatePicker");
if (pickerFrom && pickerTo) {
pickerFrom.value(datefrom);
pickerTo.value(new Date(date.getFullYear(),date.getMonth(),date.getDate())); //always to current date
}
}
function ClearSearch() {
//$("input[type='text']").val("");
$("input[class='t-input']").val("");
$("input[type='text']").val("");
}<|fim▁end|> | var date = new Date();
var datefrom;
if (range == "month") {
|
<|file_name|>game.py<|end_file_name|><|fim▁begin|>import random
from .tiles import base
INITIAL_TILES = [
base.ASSASSIN, base.BOWMAN, base.CHAMPION, base.DRAGOON, base.FOOTMAN,
base.GENERAL, base.KNIGHT, base.LONGBOWMAN, base.MARSHALL, base.PIKEMAN,
base.PIKEMAN, base.PRIEST, base.RANGER, base.SEER, base.WIZARD,
]<|fim▁hole|> def __init__(self, initial_tiles=INITIAL_TILES):
self.board = {}
self.bags = (initial_tiles[:], initial_tiles[:])
for bag in self.bags:
random.shuffle(bag)<|fim▁end|> |
class Game(object):
|
<|file_name|>test_last_binding_operation.py<|end_file_name|><|fim▁begin|>import http
from openbrokerapi.service_broker import LastOperation, OperationState
from tests import BrokerTestCase
class LastBindingOperationTest(BrokerTestCase):
def setUp(self):
self.broker.service_id.return_value = 'service-guid-here'
def test_last_operation_called_just_with_required_fields(self):
self.broker.last_binding_operation.return_value = LastOperation(OperationState.IN_PROGRESS, 'Running...')
self.client.get(
'/v2/service_instances/here-instance_id/service_bindings/binding_id/last_operation',
headers={
'X-Broker-Api-Version': '2.13',
'Authorization': self.auth_header
})
self.broker.last_binding_operation.assert_called_once_with('here-instance_id', 'binding_id', None, None, None)
def test_last_operation_called_with_operation_data(self):
self.broker.last_binding_operation.return_value = LastOperation(OperationState.IN_PROGRESS, 'Running...')
query = 'service_id=&plan_id=456&operation=service-guid-here%20operation-data'
self.client.get(
'/v2/service_instances/here-instance_id/service_bindings/binding_id/last_operation?%s' % query,
headers={
'X-Broker-Api-Version': '2.13',
'Authorization': self.auth_header
})
self.broker.last_binding_operation.assert_called_once_with('here-instance_id', 'binding_id',
'service-guid-here operation-data', "", "456")
<|fim▁hole|>
query = 'service_id=123&plan_id=456&operation=service-guid-here%20operation-data'
response = self.client.get(
'/v2/service_instances/here-instance_id/service_bindings/binding_id/last_operation?%s' % query,
headers={
'X-Broker-Api-Version': '2.13',
'Authorization': self.auth_header
})
self.broker.last_binding_operation.assert_called_once_with('here-instance_id', 'binding_id',
'service-guid-here operation-data', "123", "456")
self.assertEqual(response.status_code, http.HTTPStatus.OK)
self.assertEqual(response.json, dict(
state=OperationState.IN_PROGRESS.value,
description='Running...'
))<|fim▁end|> | def test_returns_200_with_given_state(self):
self.broker.last_binding_operation.return_value = LastOperation(OperationState.IN_PROGRESS, 'Running...') |
<|file_name|>load-doc-editor.js<|end_file_name|><|fim▁begin|>/*
<|fim▁hole|> * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/<|fim▁end|> | * Copyright (C) 2014 Xillio ([email protected])
*
|
<|file_name|>downloader.rs<|end_file_name|><|fim▁begin|>use datastore::DataStore;
use errors::*;
use fillable::*;
use futures;
use futures::{Sink, Stream};
use futures::future;
use futures::future::Future;
use manifest::{BlockRequest, ManifestWithFile};
use metainfo::{InfoHash, MetaInfo};
use peer_protocol;
use peer_protocol::{BitTorrentPeerCodec, Message, PeerID};
use slog::Logger;
use std::cmp;
use std::collections::{HashMap, HashSet, VecDeque};
use std::default::Default;
use std::net::SocketAddr;
use std::path::Path;
use std::sync::{Arc, Mutex};
use std::sync::atomic::AtomicUsize;
use std::time::Duration;
use tokio_core::net::TcpStream;
use tokio_core::reactor;
use tokio_core::reactor::Handle;
use tokio_io::AsyncRead;
use tokio_io::codec::Framed;
use tracker;
use tracker::TrackerClient;
use util::{BxFuture, FutureEnhanced, VecDequeStream, mkdirp_for_file, tcp_connect2};
type PeerFramed = Framed<TcpStream, BitTorrentPeerCodec>;
// Local number used to identify peer connections.
type PeerNum = usize;
struct DownloaderState {
info: MetaInfo,
datastore: DataStore,
manifest: ManifestWithFile,
peer_states: HashMap<PeerNum, PeerState>,
next_peer_num: AtomicUsize,
outstanding: OutstandingRequestsManager,
}
type AM<T> = Arc<Mutex<T>>;
pub fn start<P: AsRef<Path>>(log: Logger, info: MetaInfo, peer_id: PeerID, store_path: P, manifest_path: P) -> Result<()> {
let log2 = log.clone();
let mut core = reactor::Core::new()?;
let handle = core.handle();
mkdirp_for_file(&store_path)?;
mkdirp_for_file(&manifest_path)?;
let datastore = DataStore::create_or_open(&info, store_path)?;
let manifest = ManifestWithFile::load_or_new(log2, info.clone(), manifest_path)?;
let mut tc = TrackerClient::new(info.clone(), peer_id.clone())?;
debug!(log, "asking tracker");
let tracker_res = tc.easy_start()?;
debug!(log, "tracker res: {:#?}", tracker_res);
if let Some(reason) = tracker_res.failure_reason {
bail!("tracker failed: {}", reason);
}
if tracker_res.peers.is_empty() {
bail!("tracker returned no peers");
}
let num_pieces = info.num_pieces() as u64;
let info_hash = info.info_hash.clone();
let dstate = DownloaderState {
info: info,
datastore: datastore,
manifest: manifest,
peer_states: HashMap::new(),
next_peer_num: AtomicUsize::new(0),
outstanding: OutstandingRequestsManager::new(),
};
let dstate_c = Arc::new(Mutex::new(dstate));
let mut top_futures: Vec<BxFuture<(), Error>> = Vec::new();
top_futures.push(run_progress_report(log.clone(), handle.clone(), dstate_c.clone()));
let n_start_peers = cmp::min(15, tracker_res.peers.len());
info!(log,
"using {}/{} available peers",
n_start_peers,
tracker_res.peers.len());
// Connect to an initial set of peers
for peer in tracker_res.peers[..n_start_peers].iter() {
let peer_num = {
let dstate = dstate_c.lock().unwrap();
dstate
.next_peer_num
.fetch_add(1, ::std::sync::atomic::Ordering::Relaxed)
};
let dstate_c = dstate_c.clone();
let handle = handle.clone();
let log = log.new(o!("peer_num" => peer_num));
let f = run_peer(log.clone(),
handle.clone(),
dstate_c,
peer.clone(),
info_hash.clone(),
num_pieces,
peer_id.clone(),
peer_num);
top_futures.push(f);
}
let future_root = future::join_all(top_futures);
core.run(future_root)?;
Ok(())
}
/// Run a loop that prints a progress report occasionally.
fn run_progress_report(log: Logger, handle: reactor::Handle, dstate_c: AM<DownloaderState>) -> BxFuture<(), Error> {
use futures::future::Loop::{Break, Continue};
future::loop_fn((log, handle, dstate_c), |(log, handle, dstate_c)| {
let duration = Duration::from_millis(500);
match reactor::Timeout::new(duration, &handle) {
Err(err) => future::err(Into::<Error>::into(err)).bxed(),
Ok(timeout) => timeout
.map_err(|e| e.into())
.and_then(|()| {
let pres = {
let mut dstate = dstate_c.lock().unwrap();
progress_report(log.clone(), &mut dstate)
};
match pres {
Err(err) => Err(Into::<Error>::into(err)),
Ok(true) => Ok(Continue((log, handle, dstate_c))),
Ok(false) => Ok(Break(())),
}
}).bxed(),
}
})
.bxed()
}
/// Returns whether to continue looping.
fn progress_report(log: Logger, dstate: &mut DownloaderState) -> Result<bool> {
let chokers = dstate
.peer_states
.iter()
.filter(|&(ref _k, ref v)| v.peer_choking)
.count();
let n_peers = dstate.peer_states.len();
// let bar = dstate.manifest.manifest.progress_bar();
// info!(log, "progress report: {}", bar);
let p = dstate.manifest.manifest.amount_verified();
info!(log,
"progress: {:03}% chokers:{}/{}",
p * (100 as f64),
chokers,
n_peers);
let go = !dstate.manifest.manifest.is_all_verified();
Ok(go)
}
/// Connect and run a peer.
/// Connects and runs the peer loop.
/// Logs most errors. Any returned error is a programming error.
fn run_peer(log: Logger,
handle: reactor::Handle,
dstate_c: AM<DownloaderState>,
peer: tracker::Peer,
info_hash: InfoHash,
num_pieces: u64,
local_peer_id: PeerID,
peer_num: PeerNum)
-> BxFuture<(), Error> {
let log2 = log.clone();
connect_peer(&log,
peer.address,
info_hash.clone(),
local_peer_id.clone(),
peer_num,
&handle)
.and_then(move |(stream, remote_peer_id)| {
// Create peer state
{
let mut dstate = dstate_c.lock().unwrap();
let x = dstate
.peer_states
.insert(peer_num, PeerState::new(num_pieces, remote_peer_id));
if x.is_some() {
warn!(log, "peer state already existed"; "peer_num" => peer_num)
}
}
let dstate_c2 = dstate_c.clone();
drive_peer(&log, dstate_c, &handle, stream, peer_num)
.then(move |res| {
// Delete peer state
let mut dstate = dstate_c2.lock().unwrap();
let x = dstate.peer_states.remove(&peer_num);
if x.is_none() {
warn!(log, "peer state was missing"; "peer_num" => peer_num)
}
let n = dstate.outstanding.clear_peer(peer_num);
debug!(log, "cleared outstanding requests: {}", n; "peer_num" => peer_num);
res
})
.bxed()
})
.or_else(move |err| {
error!(log2, "peer error: {}", err);
Ok(())
})
.bxed()
}
/// Connect to a remote peer
fn connect_peer(log: &Logger, addr: SocketAddr, info_hash: InfoHash, peer_id: PeerID, peer_num: PeerNum, handle: &reactor::Handle) -> BxFuture<(PeerFramed, PeerID), Error> {
let info_hash2 = info_hash.clone();
let log1 = log.clone();
let log2 = log.clone();
let log3 = log.clone();
info!(log, "connecting to {} ...", addr);
tcp_connect2(&addr, Duration::from_millis(3000), &handle)
.chain_err(|| "peer connection failed")
.and_then(move |stream| {
info!(log1, "connected");
peer_protocol::handshake_send_async(stream, info_hash.clone(), peer_id.clone())
})
.and_then(|stream| peer_protocol::handshake_read_1_async(stream))
.and_then(move |(stream, remote_info_hash)| {
debug!(log2, "remote info hash: {:?}", remote_info_hash);
if remote_info_hash != info_hash2 {
bail!("peer [{}] info hash mismatch peer:{:?} me:{:?}",
peer_num,
remote_info_hash,
info_hash2);
}
Ok(stream)
})
.and_then(|stream| peer_protocol::handshake_read_2_async(stream).map(move |(stream, remote_peer_id)| (stream, remote_peer_id)))
.and_then(move |(stream, remote_peer_id)| {
debug!(log3, "remote peer id: {:?}", remote_peer_id);
// let stream: Framed<TcpStream,BitTorrentPeerCodec> = stream.framed(BitTorrentPeerCodec);
let stream: PeerFramed = stream.framed(BitTorrentPeerCodec);
Ok((stream, remote_peer_id))
})
.bxed()
}
enum HandlePeerMessageRes {
Pass,
Reply(VecDeque<Message>),
Close,
}
fn drive_peer(log: &Logger, dstate_c: AM<DownloaderState>, handle: &Handle, stream: PeerFramed, peer_num: PeerNum) -> BxFuture<(), Error> {
let (peer_tx, peer_rx) = stream.split();
// Separate send from receive so that the listener doesn't block all the time
let (buf_tx, buf_rx) = futures::sync::mpsc::channel::<Message>(5);
// debugging type assertions
// let _: &Stream<Item = Message, Error = ()> = &buf_rx;
// let _: &Sink<SinkItem = Message, SinkError = Error> = &peer_tx;
// let _: &Stream<Item = Message, Error = Error> = &buf_rx.map_err(|()| "lol".into());
// let _: &Future<Item = (_, _), Error = Error> = &peer_tx.send_all(buf_rx.map_err(|()| "lol".into()));
// Process the send channel
let log2 = log.clone();
handle.spawn(peer_tx.send_all(buf_rx.map_err(|()| Into::<Error>::into("peer send failed")))
.map(|(_sink, _stream)| ())
.map_err(move |e| {
warn!(log2, "warning: send to peer failed: {}", e);
}));
// type PeerStream = Box<Stream<Item = Message, Error = Error>>;
// type PeerSink = Box<Sink<SinkItem = Message, SinkError = Error>>;
// type LoopState = (AM<DownloaderState>, PeerStream, PeerSink);
struct LoopState<S, U>
where S: Stream<Item = Message, Error = Error>,
U: Sink<SinkItem = Message, SinkError = Error>
{
log: Logger,
dstate_c: AM<DownloaderState>,
peer_num: PeerNum,
peer_rx: S,
peer_tx: U,
}
let init = LoopState {
log: log.clone(),
dstate_c: dstate_c,
peer_num: peer_num,
peer_rx: peer_rx,
peer_tx: buf_tx.sink_map_err(|send_err| format!("error sending message: {}", send_err).into()),
};
use futures::future::Loop;
future::loop_fn(init, |LoopState {
log,
peer_num,
dstate_c,
peer_rx,
peer_tx,
}|
-> BxFuture<Loop<(), LoopState<_, _>>, Error> {
peer_rx
.into_future()
.map_err(|(err, _stream)| err)
.and_then(move |(item, peer_rx)| -> BxFuture<Loop<(), LoopState<_, _>>, Error> {
match item {
Some(msg) => {
let cmd: Result<HandlePeerMessageRes> = {
let mut dstate = dstate_c.lock().unwrap();
handle_peer_message(&log, &mut dstate, peer_num, &msg)
};
use self::HandlePeerMessageRes::*;
match cmd {
Ok(Pass) => {
future::ok(Loop::Continue(LoopState {
log,
dstate_c,
peer_num,
peer_rx,
peer_tx,
}))
.bxed()
}
Ok(Reply(outs)) => {
peer_tx
.send_all(VecDequeStream::<Message, Error>::new(outs))
.map(move |(peer_tx, _)| {
Loop::Continue(LoopState {
log,
dstate_c,
peer_num,
peer_rx,
peer_tx,
})
})
.bxed()
}
Ok(Close) => {
debug!(log, "closing peer connection");
future::ok(Loop::Break(())).bxed()
}
Err(err) => {
error!(log, "closing peer due to error: {:?}", err);
future::ok(Loop::Break(())).bxed()
}
}
}
None => {
debug!(log, "peer hung up");
future::ok(Loop::Break(())).bxed()
}
}
})
.bxed()
})
.bxed()
}
/// One synchronous step.
/// Returns messages to send.
fn handle_peer_message(log: &Logger, dstate: &mut DownloaderState, peer_num: PeerNum, msg: &Message) -> Result<HandlePeerMessageRes> {
use self::HandlePeerMessageRes::*;
debug!(log, "n-out {}", dstate.outstanding.get_num(peer_num));
let rstate = dstate
.peer_states
.get_mut(&peer_num)
.ok_or_else(|| Into::<Error>::into(format!("missing peer state: {}", peer_num).to_owned()))?;
let mut outs = VecDeque::new();
debug!(log, "recv message";
"msg" => msg.summarize(),
"n" => rstate.temp.nreceived);
rstate.temp.nreceived += 1;
match msg {
&Message::KeepAlive => {}
&Message::Choke => {
rstate.peer_choking = true;
dstate.outstanding.clear_peer(peer_num);
}
&Message::Unchoke => {
rstate.peer_choking = false;
dstate.outstanding.clear_peer(peer_num);
}
&Message::Interested => rstate.peer_interested = true,
&Message::NotInterested => rstate.peer_interested = false,
&Message::Bitfield { ref bits } => {
if bits.len() < dstate.info.num_pieces() {
bail!("bitfield has less bits {} than pieces {}",
bits.len(),
dstate.info.num_pieces());
}
let mut i_start = 0;
let mut in_interval = false;
for b in 0..dstate.info.num_pieces() {
if bits[b] && !in_interval {
i_start = b;
in_interval = true;
} else if !bits[b] && in_interval {
rstate.has.add(i_start as u64, b as u64)?;
in_interval = false;
}
}
if in_interval {
rstate
.has
.add(i_start as u64, dstate.info.num_pieces() as u64)?;
}
}
&Message::Have { piece } => {
rstate.has.add(piece as u64, piece as u64 + 1)?;
}
&Message::Request { .. } => {
bail!("not implemented");
}
&Message::Piece {
piece,
offset,
ref block,
} => {
dstate
.datastore
.write_block(piece as u64, offset as u64, &block)?;
let newly_filled = dstate
.manifest
.manifest
.add_block(piece as u64, offset as u64, block.len() as u64)?;
for p in newly_filled {
let expected_hash = dstate.info.piece_hashes[p as usize].clone();
info!(log, "filled piece: {}", p);
if let Some(verified) = dstate.datastore.verify_piece(p, expected_hash)? {
info!(log, "verified piece: {}", verified.piece);
dstate.manifest.manifest.mark_verified(verified)?;
} else {
info!(log, "flunked piece: {}", p);
dstate.manifest.manifest.remove_piece(p)?;
}
}
dstate
.outstanding
.clear(peer_num,
BlockRequest {
piece: piece as u64,
offset: offset as u64,
length: block.len() as u64,
});
dstate.manifest.store(log)?;
}
&Message::Cancel { .. } => bail!("not implemented"),
&Message::Port { .. } => {}
}
if rstate.temp.nreceived >= 1 && rstate.am_choking {
let out = Message::Unchoke {};
debug!(log, "sending message: {:?}", out);
rstate.am_choking = false;
outs.push_back(out);
}
if rstate.temp.nreceived >= 1 && !rstate.am_interested {
let out = Message::Interested {};
debug!(log, "sending message: {:?}", out);
rstate.am_interested = true;
outs.push_back(out);
}
if !rstate.peer_choking && rstate.am_interested {
for safety in 0.. {
if safety == 99 {
error!(log, "collecting too many requests to send!");
}
match next_request(log, &mut dstate.manifest, &mut dstate.outstanding, peer_num)? {
None => {
if dstate.manifest.manifest.is_all_full() {
verify_all(log,
&dstate.info,
&mut dstate.manifest,
&mut dstate.datastore)?;
if dstate.manifest.manifest.is_all_verified() {
println!("all pieces verified!");
return Ok(Close);
}
} else {
if safety == 0 {
debug!(log, "not requesting");
}
}
break;
}
Some(desire) => {
let out = Message::Request {
piece: desire.piece as u32,
offset: desire.offset as u32,
length: desire.length as u32,
};
dstate.outstanding.add(peer_num, desire);
debug!(log, "sending message: {:?}", out);
outs.push_back(out);
}
}
}
}
if outs.len() > 0 {
Ok(Reply(outs))
} else {
Ok(Pass)
}
}
/// Decide the next block to request from a peer.
fn next_request(log: &Logger, manifest: &mut ManifestWithFile, outstanding: &mut OutstandingRequestsManager, peer_num: PeerNum) -> Result<Option<BlockRequest>> {
const MAX_OUTSTANDING_PER_PEER: u64 = 5;
const MAX_OUTSTANDING_PER_BLOCK: u64 = 1;
if outstanding.get_num(peer_num) >= MAX_OUTSTANDING_PER_PEER {
// Already plenty of requests outstanding on this peer.
return Ok(None);
}
let mut after = None;
for safety in 0.. {
if safety == 99 {
error!(log, "loop has gone too far looking for next request!");
}
if let Some(desire) = manifest.manifest.next_desired_block(log, after) {
// TODO: allow multiple outstanding per block, maybe, and if so remember to cancel upon receive.
let ps = outstanding.get_peers(desire);
if ps.len() > MAX_OUTSTANDING_PER_BLOCK as usize {
after = Some(desire);
continue;
}
if ps.contains(&peer_num) {
after = Some(desire);
continue;
}
return Ok(Some(desire));
}
return Ok(None);<|fim▁hole|> Ok(None)
}
/// Call this when the download might be done.
/// Run verification on the data, save the manifest.
/// If this function returns Ok that does _not_ mean all verified.
fn verify_all(log: &Logger, info: &MetaInfo, manifest: &mut ManifestWithFile, datastore: &mut DataStore) -> Result<()> {
// No more blocks needed! Unless something fails verification.
for piece in manifest.manifest.needs_verify() {
let expected_hash = info.piece_hashes[piece as usize].clone();
info!(log, "verifying piece: {}", piece);
if let Some(verified) = datastore.verify_piece(piece, expected_hash)? {
info!(log, "verified piece: {}", verified.piece);
manifest.manifest.mark_verified(verified)?;
} else {
info!(log, "flunked piece: {}", piece);
manifest.manifest.remove_piece(piece)?;
}
}
manifest.store(log)?;
Ok(())
}
#[derive(Debug)]
pub struct PeerState {
/// ID of the remote peer
peer_id: PeerID,
/// Peer is interested in this client
peer_interested: bool,
/// Peer is choking this client
peer_choking: bool,
am_interested: bool,
am_choking: bool,
has: Fillable,
temp: TempState,
}
#[derive(Debug, Default)]
pub struct TempState {
nreceived: u64,
}
impl PeerState {
fn new(num_pieces: u64, peer_id: PeerID) -> Self {
PeerState {
peer_id: peer_id,
peer_interested: false,
peer_choking: true,
am_interested: false,
am_choking: true,
has: Fillable::new(num_pieces),
temp: TempState::default(),
}
}
}
struct OutstandingRequestsManager {
/// Blocks for each peer.
peer_blocks: HashMap<PeerNum, HashSet<BlockRequest>>,
/// Peers for each block
block_peers: HashMap<BlockRequest, HashSet<PeerNum>>,
}
impl OutstandingRequestsManager {
fn new() -> Self {
Self {
peer_blocks: HashMap::new(),
block_peers: HashMap::new(),
}
}
fn add(&mut self, peer: PeerNum, block: BlockRequest) {
self.peer_blocks
.entry(peer)
.or_insert(HashSet::new())
.insert(block);
self.block_peers
.entry(block)
.or_insert(HashSet::new())
.insert(peer);
}
/// Returns the number of cleared items: 0 or 1.
fn clear(&mut self, peer: PeerNum, block: BlockRequest) -> usize {
if let Some(peers) = self.block_peers.get_mut(&block) {
peers.remove(&peer);
if let Some(blocks) = self.peer_blocks.get_mut(&peer) {
blocks.remove(&block);
}
return 1;
} else {
return 0;
}
}
/// Clear all outstanding requests for a peer.
/// Returns the number of cleared items.
fn clear_peer(&mut self, peer: PeerNum) -> usize {
if let Some(blocks) = self.peer_blocks.remove(&peer) {
let mut x = 0;
for block in blocks.iter() {
if let Some(peers) = self.block_peers.get_mut(&block) {
peers.remove(&peer);
x += 1;
}
}
return x;
} else {
return 0;
}
}
/// Get the set of peers with outstanding requests for the block
fn get_peers(&self, block: BlockRequest) -> HashSet<PeerNum> {
return self.block_peers
.get(&block)
.map(|x| x.clone())
.unwrap_or_else(|| HashSet::new());
}
/// Get the number of requests outstanding for the peer
fn get_num(&self, peer: PeerNum) -> u64 {
return self.peer_blocks
.get(&peer)
.map(|x| x.len() as u64)
.unwrap_or(0);
}
}<|fim▁end|> | } |
<|file_name|>elapsedtime.go<|end_file_name|><|fim▁begin|>package misc
import (
"net/http"
"strconv"
"time"
)
//Writer http response writer interface.
type Writer interface {
http.ResponseWriter
http.Hijacker
}
//ElapsedTime add requset elapsed time to "Elapsed-Time" header of response.
//Elapsed time is time spent between middleware exetue and data wrote to response.
func ElapsedTime(w http.ResponseWriter, r *http.Request, next http.HandlerFunc) {
nw := elapsedTimeResponseWriter{
Writer: w.(Writer),
Timestamp: time.Now().UnixNano(),
written: false,
}
next(&nw, r)
}
type elapsedTimeResponseWriter struct {<|fim▁hole|>
func (e *elapsedTimeResponseWriter) WriteHeader(status int) {
if e.written == false {
e.written = true
e.Writer.Header().Set("Elapsed-Time", strconv.FormatInt(time.Now().UnixNano()-e.Timestamp, 10)+" ns")
}
e.Writer.WriteHeader(status)
}
func (e *elapsedTimeResponseWriter) Write(data []byte) (int, error) {
if e.written == false {
e.WriteHeader(http.StatusOK)
}
return e.Writer.Write(data)
}<|fim▁end|> | Writer
Timestamp int64
written bool
} |
<|file_name|>kpabenc_adapt_hybrid.py<|end_file_name|><|fim▁begin|>from charm.toolbox.pairinggroup import PairingGroup,GT,extract_key
from charm.toolbox.symcrypto import AuthenticatedCryptoAbstraction
from charm.toolbox.ABEnc import ABEnc
from charm.schemes.abenc.abenc_lsw08 import KPabe
debug = False
class HybridABEnc(ABEnc):
"""
>>> from charm.schemes.abenc.abenc_lsw08 import KPabe
>>> group = PairingGroup('SS512')
>>> kpabe = KPabe(group)
>>> hyb_abe = HybridABEnc(kpabe, group)
>>> access_policy = ['ONE', 'TWO', 'THREE']
>>> access_key = '((FOUR or THREE) and (TWO or ONE))'
>>> msg = b"hello world this is an important message."
>>> (master_public_key, master_key) = hyb_abe.setup()
>>> secret_key = hyb_abe.keygen(master_public_key, master_key, access_key)
>>> cipher_text = hyb_abe.encrypt(master_public_key, msg, access_policy)
>>> hyb_abe.decrypt(cipher_text, secret_key)
b'hello world this is an important message.'<|fim▁hole|> """
def __init__(self, scheme, groupObj):
ABEnc.__init__(self)
global abenc
# check properties (TODO)
abenc = scheme
self.group = groupObj
def setup(self):
return abenc.setup()
def keygen(self, pk, mk, object):
return abenc.keygen(pk, mk, object)
def encrypt(self, pk, M, object):
key = self.group.random(GT)
c1 = abenc.encrypt(pk, key, object)
# instantiate a symmetric enc scheme from this key
cipher = AuthenticatedCryptoAbstraction(extract_key(key))
c2 = cipher.encrypt(M)
return { 'c1':c1, 'c2':c2 }
def decrypt(self, ct, sk):
c1, c2 = ct['c1'], ct['c2']
key = abenc.decrypt(c1, sk)
cipher = AuthenticatedCryptoAbstraction(extract_key(key))
return cipher.decrypt(c2)
def main():
groupObj = PairingGroup('SS512')
kpabe = KPabe(groupObj)
hyb_abe = HybridABEnc(kpabe, groupObj)
access_key = '((ONE or TWO) and THREE)'
access_policy = ['ONE', 'TWO', 'THREE']
message = b"hello world this is an important message."
(pk, mk) = hyb_abe.setup()
if debug: print("pk => ", pk)
if debug: print("mk => ", mk)
sk = hyb_abe.keygen(pk, mk, access_key)
if debug: print("sk => ", sk)
ct = hyb_abe.encrypt(pk, message, access_policy)
mdec = hyb_abe.decrypt(ct, sk)
assert mdec == message, "Failed Decryption!!!"
if debug: print("Successful Decryption!!!")
if __name__ == "__main__":
debug = True
main()<|fim▁end|> | |
<|file_name|>renamer.py<|end_file_name|><|fim▁begin|>import ast
from python_minifier.rename.binding import NameBinding
from python_minifier.rename.name_generator import name_filter
from python_minifier.rename.util import is_namespace
def all_bindings(node):
"""
All bindings in a module
:param node: The module to get bindings in
:type node: :class:`ast.AST`
:rtype: Iterable[ast.AST, Binding]
"""
if is_namespace(node):
for binding in node.bindings:
yield node, binding
for child in ast.iter_child_nodes(node):
for namespace, binding in all_bindings(child):
yield namespace, binding
def sorted_bindings(module):
"""
All bindings in a modules sorted by descending number of references
:param module: The module to get bindings in
:type module: :class:`ast.AST`
:rtype: Iterable[ast.AST, Binding]
"""
def comp(tup):
namespace, binding = tup
return len(binding.references)
return sorted(all_bindings(module), key=comp, reverse=True)
def reservation_scope(namespace, binding):
"""
Get the namespaces that are in the bindings reservation scope
Returns the namespace nodes the binding name must be resolvable in
:param namespace: The local namespace of a binding
:type namespace: :class:`ast.AST`
:param binding: The binding to get the reservation scope for
:type binding: Binding
:rtype: set[ast.AST]
"""
namespaces = set([namespace])
for node in binding.references:
while node is not namespace:
namespaces.add(node.namespace)
node = node.namespace
return namespaces
def add_assigned(node):
"""
Add the assigned_names attribute to namespace nodes in a module
:param node: The module to add the assigned_names attribute to
:type node: :class:`ast.Module`
"""
if is_namespace(node):
node.assigned_names = set()
for child in ast.iter_child_nodes(node):
add_assigned(child)
def reserve_name(name, reservation_scope):
"""
Reserve a name in a reservation scope
:param str name: The name to reserve
:param reservation_scope:
:type reservation_scope: Iterable[:class:`ast.AST`]
"""
for namespace in reservation_scope:
namespace.assigned_names.add(name)
class UniqueNameAssigner(object):
"""
Assign new names to renamed bindings
Assigns a unique name to every binding
"""
def __init__(self):
self.name_generator = name_filter()
self.names = []
def available_name(self):
return next(self.name_generator)
def __call__(self, module):
assert isinstance(module, ast.Module)
for namespace, binding in sorted_bindings(module):
if binding.allow_rename:
binding.new_name = self.available_name()
return module
class NameAssigner(object):
"""
Assign new names to renamed bindings
This assigner creates a name 'reservation scope' containing each namespace a binding is referenced in, including
transitive namespaces. Bindings are then assigned the first available name that has no references in their
reservation scope. This means names will be reused in sibling namespaces, and shadowed where possible in child
namespaces.<|fim▁hole|> Bindings are assigned names in order of most references, with names assigned shortest first.
"""
def __init__(self, name_generator=None):
self.name_generator = name_generator if name_generator is not None else name_filter()
self.names = []
def iter_names(self):
for name in self.names:
yield name
while True:
name = next(self.name_generator)
self.names.append(name)
yield name
def available_name(self, reservation_scope, prefix=''):
"""
Search for the first name that is not in reservation scope
"""
for name in self.iter_names():
if self.is_available(prefix + name, reservation_scope):
return prefix + name
def is_available(self, name, reservation_scope):
"""
Is a name unreserved in a reservation scope
:param str name: the name to check availability of
:param reservation_scope: The scope to check
:type reservation_scope: Iterable[:class:`ast.AST`]
:rtype: bool
"""
for namespace in reservation_scope:
if name in namespace.assigned_names:
return False
return True
def __call__(self, module, prefix_globals, reserved_globals=None):
assert isinstance(module, ast.Module)
add_assigned(module)
for namespace, binding in all_bindings(module):
if binding.reserved is not None:
scope = reservation_scope(namespace, binding)
reserve_name(binding.reserved, scope)
if reserved_globals is not None:
for name in reserved_globals:
module.assigned_names.add(name)
for namespace, binding in sorted_bindings(module):
scope = reservation_scope(namespace, binding)
if binding.allow_rename:
if isinstance(namespace, ast.Module) and prefix_globals:
name = self.available_name(scope, prefix='_')
else:
name = self.available_name(scope)
def should_rename():
if binding.should_rename(name):
return True
# It's no longer efficient to do this rename
if isinstance(binding, NameBinding):
# Check that the original name is still available
if binding.reserved == binding.name:
# We already reserved it (this is probably an arg)
return False
if not self.is_available(binding.name, scope):
# The original name has already been assigned to another binding,
# so we need to rename this anyway.
return True
return False
if should_rename():
binding.rename(name)
else:
# Any existing name will become reserved
binding.disallow_rename()
if binding.name is not None:
reserve_name(binding.name, scope)
return module
def rename(module, prefix_globals=False, preserved_globals=None):
NameAssigner()(module, prefix_globals, preserved_globals)<|fim▁end|> | |
<|file_name|>svg.tsx<|end_file_name|><|fim▁begin|>import { styled } from '@storybook/theming';
export interface SvgProps {
inline?: boolean;
}
const Svg = styled.svg<SvgProps>(
{
// Fix rendering bugs in Chrome for hdpi
shapeRendering: 'inherit',
transform: 'translate3d(0,0,0)',
},
({ inline }) =>
inline
? {
display: 'inline-block',
}
: {
display: 'block',
}
);<|fim▁hole|>
export { Svg as default };<|fim▁end|> | Svg.displayName = 'Svg'; |
<|file_name|>test_sensor.py<|end_file_name|><|fim▁begin|>"""Tests for greeneye_monitor sensors."""
from unittest.mock import AsyncMock, MagicMock
from homeassistant.components.greeneye_monitor.sensor import (
DATA_PULSES,
DATA_WATT_SECONDS,
)
from homeassistant.const import STATE_UNKNOWN
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_registry import async_get as get_entity_registry
from .common import (
SINGLE_MONITOR_CONFIG_POWER_SENSORS,
SINGLE_MONITOR_CONFIG_PULSE_COUNTERS,
SINGLE_MONITOR_CONFIG_TEMPERATURE_SENSORS,
SINGLE_MONITOR_CONFIG_VOLTAGE_SENSORS,
SINGLE_MONITOR_SERIAL_NUMBER,
mock_monitor,
setup_greeneye_monitor_component_with_config,
)
from .conftest import assert_sensor_state
async def test_disable_sensor_before_monitor_connected(
hass: HomeAssistant, monitors: AsyncMock
) -> None:
"""Test that a sensor disabled before its monitor connected stops listening for new monitors."""
# The sensor base class handles connecting the monitor, so we test this with a single voltage sensor for ease
await setup_greeneye_monitor_component_with_config(
hass, SINGLE_MONITOR_CONFIG_VOLTAGE_SENSORS
)
assert len(monitors.listeners) == 1
await disable_entity(hass, "sensor.voltage_1")
assert len(monitors.listeners) == 0 # Make sure we cleaned up the listener
async def test_updates_state_when_monitor_connected(
hass: HomeAssistant, monitors: AsyncMock
) -> None:
"""Test that a sensor updates its state when its monitor first connects."""
# The sensor base class handles updating the state on connection, so we test this with a single voltage sensor for ease
await setup_greeneye_monitor_component_with_config(
hass, SINGLE_MONITOR_CONFIG_VOLTAGE_SENSORS
)
assert_sensor_state(hass, "sensor.voltage_1", STATE_UNKNOWN)
assert len(monitors.listeners) == 1
connect_monitor(monitors, SINGLE_MONITOR_SERIAL_NUMBER)
assert len(monitors.listeners) == 0 # Make sure we cleaned up the listener
assert_sensor_state(hass, "sensor.voltage_1", "120.0")
async def test_disable_sensor_after_monitor_connected(
hass: HomeAssistant, monitors: AsyncMock
) -> None:
"""Test that a sensor disabled after its monitor connected stops listening for sensor changes."""
# The sensor base class handles connecting the monitor, so we test this with a single voltage sensor for ease
await setup_greeneye_monitor_component_with_config(
hass, SINGLE_MONITOR_CONFIG_VOLTAGE_SENSORS
)
monitor = connect_monitor(monitors, SINGLE_MONITOR_SERIAL_NUMBER)
assert len(monitor.listeners) == 1
await disable_entity(hass, "sensor.voltage_1")
assert len(monitor.listeners) == 0
async def test_updates_state_when_sensor_pushes(
hass: HomeAssistant, monitors: AsyncMock
) -> None:
"""Test that a sensor entity updates its state when the underlying sensor pushes an update."""
# The sensor base class handles triggering state updates, so we test this with a single voltage sensor for ease
await setup_greeneye_monitor_component_with_config(
hass, SINGLE_MONITOR_CONFIG_VOLTAGE_SENSORS
)
monitor = connect_monitor(monitors, SINGLE_MONITOR_SERIAL_NUMBER)
assert_sensor_state(hass, "sensor.voltage_1", "120.0")
monitor.voltage = 119.8
monitor.notify_all_listeners()
assert_sensor_state(hass, "sensor.voltage_1", "119.8")
async def test_power_sensor_initially_unknown(
hass: HomeAssistant, monitors: AsyncMock
) -> None:
"""Test that the power sensor can handle its initial state being unknown (since the GEM API needs at least two packets to arrive before it can compute watts)."""
await setup_greeneye_monitor_component_with_config(
hass, SINGLE_MONITOR_CONFIG_POWER_SENSORS
)
connect_monitor(monitors, SINGLE_MONITOR_SERIAL_NUMBER)
assert_sensor_state(
hass, "sensor.channel_1", STATE_UNKNOWN, {DATA_WATT_SECONDS: 1000}
)
# This sensor was configured with net metering on, so we should be taking the
# polarized value
assert_sensor_state(<|fim▁hole|>async def test_power_sensor(hass: HomeAssistant, monitors: AsyncMock) -> None:
"""Test that a power sensor reports its values correctly, including handling net metering."""
await setup_greeneye_monitor_component_with_config(
hass, SINGLE_MONITOR_CONFIG_POWER_SENSORS
)
monitor = connect_monitor(monitors, SINGLE_MONITOR_SERIAL_NUMBER)
monitor.channels[0].watts = 120.0
monitor.channels[1].watts = 120.0
monitor.channels[0].notify_all_listeners()
monitor.channels[1].notify_all_listeners()
assert_sensor_state(hass, "sensor.channel_1", "120.0", {DATA_WATT_SECONDS: 1000})
# This sensor was configured with net metering on, so we should be taking the
# polarized value
assert_sensor_state(hass, "sensor.channel_two", "120.0", {DATA_WATT_SECONDS: -400})
async def test_pulse_counter(hass: HomeAssistant, monitors: AsyncMock) -> None:
"""Test that a pulse counter sensor reports its values properly, including calculating different units."""
await setup_greeneye_monitor_component_with_config(
hass, SINGLE_MONITOR_CONFIG_PULSE_COUNTERS
)
connect_monitor(monitors, SINGLE_MONITOR_SERIAL_NUMBER)
assert_sensor_state(hass, "sensor.pulse_a", "10.0", {DATA_PULSES: 1000})
# This counter was configured with each pulse meaning 0.5 gallons and
# wanting to show gallons per minute, so 10 pulses per second -> 300 gal/min
assert_sensor_state(hass, "sensor.pulse_2", "300.0", {DATA_PULSES: 1000})
# This counter was configured with each pulse meaning 0.5 gallons and
# wanting to show gallons per hour, so 10 pulses per second -> 18000 gal/hr
assert_sensor_state(hass, "sensor.pulse_3", "18000.0", {DATA_PULSES: 1000})
async def test_temperature_sensor(hass: HomeAssistant, monitors: AsyncMock) -> None:
"""Test that a temperature sensor reports its values properly, including proper handling of when its native unit is different from that configured in hass."""
await setup_greeneye_monitor_component_with_config(
hass, SINGLE_MONITOR_CONFIG_TEMPERATURE_SENSORS
)
connect_monitor(monitors, SINGLE_MONITOR_SERIAL_NUMBER)
# The config says that the sensor is reporting in Fahrenheit; if we set that up
# properly, HA will have converted that to Celsius by default.
assert_sensor_state(hass, "sensor.temp_a", "0.0")
async def test_voltage_sensor(hass: HomeAssistant, monitors: AsyncMock) -> None:
"""Test that a voltage sensor reports its values properly."""
await setup_greeneye_monitor_component_with_config(
hass, SINGLE_MONITOR_CONFIG_VOLTAGE_SENSORS
)
connect_monitor(monitors, SINGLE_MONITOR_SERIAL_NUMBER)
assert_sensor_state(hass, "sensor.voltage_1", "120.0")
def connect_monitor(monitors: AsyncMock, serial_number: int) -> MagicMock:
"""Simulate a monitor connecting to Home Assistant. Returns the mock monitor API object."""
monitor = mock_monitor(serial_number)
monitors.add_monitor(monitor)
return monitor
async def disable_entity(hass: HomeAssistant, entity_id: str) -> None:
"""Disable the given entity."""
entity_registry = get_entity_registry(hass)
entity_registry.async_update_entity(entity_id, disabled_by="user")
await hass.async_block_till_done()<|fim▁end|> | hass, "sensor.channel_two", STATE_UNKNOWN, {DATA_WATT_SECONDS: -400}
)
|
<|file_name|>routes.js<|end_file_name|><|fim▁begin|>import React from "react";
import { NotFoundRoute, Route } from "react-router";
import App from "./components/App";
import Home from "./components/Home";
import NotFound from "./components/NotFound";
import Stargazer from "./components/Stargazer";
import Stargazers from "./components/Stargazers";
export default (
<Route handler={App}>
// Query-able URLs (for POSTs & crawlers)
<Route name="query.repo" path="/repo" handler={Stargazers} />
// Canonical URLs
<Route name="home" path="/" handler={Home} />
<Route name="user" path="/:user" handler={Stargazer} /><|fim▁hole|>
<NotFoundRoute name="404" handler={NotFound} />
</Route>
);<|fim▁end|> | <Route name="repo" path="/:user/:repo" handler={Stargazers} /> |
<|file_name|>fold.rs<|end_file_name|><|fim▁begin|>// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use ast::*;
use ast;
use ast_util;
use codemap::{respan, Span, Spanned};
use parse::token;
use owned_slice::OwnedSlice;
use util::small_vector::SmallVector;
use std::rc::Rc;
use std::gc::{Gc, GC};
// We may eventually want to be able to fold over type parameters, too.
pub trait Folder {
fn fold_crate(&mut self, c: Crate) -> Crate {
noop_fold_crate(c, self)
}
fn fold_meta_items(&mut self, meta_items: &[Gc<MetaItem>]) -> Vec<Gc<MetaItem>> {
meta_items.iter().map(|x| fold_meta_item_(*x, self)).collect()
}
fn fold_view_path(&mut self, view_path: Gc<ViewPath>) -> Gc<ViewPath> {
let inner_view_path = match view_path.node {
ViewPathSimple(ref ident, ref path, node_id) => {
let id = self.new_id(node_id);
ViewPathSimple(ident.clone(),
self.fold_path(path),
id)
}
ViewPathGlob(ref path, node_id) => {
let id = self.new_id(node_id);
ViewPathGlob(self.fold_path(path), id)
}
ViewPathList(ref path, ref path_list_idents, node_id) => {
let id = self.new_id(node_id);
ViewPathList(self.fold_path(path),
path_list_idents.iter().map(|path_list_ident| {
let id = self.new_id(path_list_ident.node
.id);
Spanned {
node: PathListIdent_ {
name: path_list_ident.node
.name
.clone(),
id: id,
},
span: self.new_span(
path_list_ident.span)
}
}).collect(),
id)
}
};
box(GC) Spanned {
node: inner_view_path,
span: self.new_span(view_path.span),
}
}
fn fold_view_item(&mut self, vi: &ViewItem) -> ViewItem {
noop_fold_view_item(vi, self)
}
fn fold_foreign_item(&mut self, ni: Gc<ForeignItem>) -> Gc<ForeignItem> {
noop_fold_foreign_item(&*ni, self)
}
fn fold_item(&mut self, i: Gc<Item>) -> SmallVector<Gc<Item>> {
noop_fold_item(&*i, self)
}
fn fold_struct_field(&mut self, sf: &StructField) -> StructField {
let id = self.new_id(sf.node.id);
Spanned {
node: ast::StructField_ {
kind: sf.node.kind,
id: id,
ty: self.fold_ty(sf.node.ty),
attrs: sf.node.attrs.iter().map(|e| self.fold_attribute(*e)).collect()
},
span: self.new_span(sf.span)
}
}
fn fold_item_underscore(&mut self, i: &Item_) -> Item_ {
noop_fold_item_underscore(i, self)
}
fn fold_fn_decl(&mut self, d: &FnDecl) -> P<FnDecl> {
noop_fold_fn_decl(d, self)
}
fn fold_type_method(&mut self, m: &TypeMethod) -> TypeMethod {
noop_fold_type_method(m, self)
}
fn fold_method(&mut self, m: Gc<Method>) -> Gc<Method> {
noop_fold_method(&*m, self)
}
fn fold_block(&mut self, b: P<Block>) -> P<Block> {
noop_fold_block(b, self)
}
fn fold_stmt(&mut self, s: &Stmt) -> SmallVector<Gc<Stmt>> {
noop_fold_stmt(s, self)
}
fn fold_arm(&mut self, a: &Arm) -> Arm {
Arm {
attrs: a.attrs.iter().map(|x| self.fold_attribute(*x)).collect(),
pats: a.pats.iter().map(|x| self.fold_pat(*x)).collect(),
guard: a.guard.map(|x| self.fold_expr(x)),
body: self.fold_expr(a.body),
}
}
fn fold_pat(&mut self, p: Gc<Pat>) -> Gc<Pat> {
noop_fold_pat(p, self)
}
fn fold_decl(&mut self, d: Gc<Decl>) -> SmallVector<Gc<Decl>> {
let node = match d.node {
DeclLocal(ref l) => SmallVector::one(DeclLocal(self.fold_local(*l))),
DeclItem(it) => {
self.fold_item(it).move_iter().map(|i| DeclItem(i)).collect()
}
};
node.move_iter().map(|node| {
box(GC) Spanned {
node: node,
span: self.new_span(d.span),
}
}).collect()
}
fn fold_expr(&mut self, e: Gc<Expr>) -> Gc<Expr> {
noop_fold_expr(e, self)
}
fn fold_ty(&mut self, t: P<Ty>) -> P<Ty> {
let id = self.new_id(t.id);
let node = match t.node {
TyNil | TyBot | TyInfer => t.node.clone(),
TyBox(ty) => TyBox(self.fold_ty(ty)),
TyUniq(ty) => TyUniq(self.fold_ty(ty)),
TyVec(ty) => TyVec(self.fold_ty(ty)),
TyPtr(ref mt) => TyPtr(fold_mt(mt, self)),
TyRptr(ref region, ref mt) => {
TyRptr(fold_opt_lifetime(region, self), fold_mt(mt, self))
}
TyClosure(ref f, ref region) => {
TyClosure(box(GC) ClosureTy {
fn_style: f.fn_style,
onceness: f.onceness,
bounds: fold_opt_bounds(&f.bounds, self),
decl: self.fold_fn_decl(&*f.decl),
lifetimes: f.lifetimes.iter().map(|l| self.fold_lifetime(l)).collect(),
}, fold_opt_lifetime(region, self))
}
TyProc(ref f) => {
TyProc(box(GC) ClosureTy {
fn_style: f.fn_style,
onceness: f.onceness,
bounds: fold_opt_bounds(&f.bounds, self),
decl: self.fold_fn_decl(&*f.decl),
lifetimes: f.lifetimes.iter().map(|l| self.fold_lifetime(l)).collect(),
})
}
TyBareFn(ref f) => {
TyBareFn(box(GC) BareFnTy {
lifetimes: f.lifetimes.iter().map(|l| self.fold_lifetime(l)).collect(),
fn_style: f.fn_style,
abi: f.abi,
decl: self.fold_fn_decl(&*f.decl)
})
}
TyUnboxedFn(ref f) => {
TyUnboxedFn(box(GC) UnboxedFnTy {
decl: self.fold_fn_decl(&*f.decl),
})
}
TyTup(ref tys) => TyTup(tys.iter().map(|&ty| self.fold_ty(ty)).collect()),
TyParen(ref ty) => TyParen(self.fold_ty(*ty)),
TyPath(ref path, ref bounds, id) => {
let id = self.new_id(id);
TyPath(self.fold_path(path),
fold_opt_bounds(bounds, self),
id)
}
TyFixedLengthVec(ty, e) => {
TyFixedLengthVec(self.fold_ty(ty), self.fold_expr(e))
}
TyTypeof(expr) => TyTypeof(self.fold_expr(expr)),
};
P(Ty {
id: id,
span: self.new_span(t.span),
node: node,
})
}
fn fold_mod(&mut self, m: &Mod) -> Mod {
noop_fold_mod(m, self)
}
fn fold_foreign_mod(&mut self, nm: &ForeignMod) -> ForeignMod {
ast::ForeignMod {
abi: nm.abi,
view_items: nm.view_items
.iter()
.map(|x| self.fold_view_item(x))
.collect(),
items: nm.items
.iter()
.map(|x| self.fold_foreign_item(*x))
.collect(),
}
}
fn fold_variant(&mut self, v: &Variant) -> P<Variant> {
let id = self.new_id(v.node.id);
let kind;
match v.node.kind {
TupleVariantKind(ref variant_args) => {
kind = TupleVariantKind(variant_args.iter().map(|x|
fold_variant_arg_(x, self)).collect())
}
StructVariantKind(ref struct_def) => {
kind = StructVariantKind(box(GC) ast::StructDef {
fields: struct_def.fields.iter()
.map(|f| self.fold_struct_field(f)).collect(),
ctor_id: struct_def.ctor_id.map(|c| self.new_id(c)),
super_struct: match struct_def.super_struct {
Some(t) => Some(self.fold_ty(t)),
None => None
},
is_virtual: struct_def.is_virtual,
})
}
}
let attrs = v.node.attrs.iter().map(|x| self.fold_attribute(*x)).collect();
let de = match v.node.disr_expr {
Some(e) => Some(self.fold_expr(e)),
None => None
};
let node = ast::Variant_ {
name: v.node.name,
attrs: attrs,
kind: kind,
id: id,
disr_expr: de,
vis: v.node.vis,
};
P(Spanned {
node: node,
span: self.new_span(v.span),
})
}
fn fold_ident(&mut self, i: Ident) -> Ident {
i
}
fn fold_path(&mut self, p: &Path) -> Path {
ast::Path {
span: self.new_span(p.span),
global: p.global,
segments: p.segments.iter().map(|segment| ast::PathSegment {
identifier: self.fold_ident(segment.identifier),
lifetimes: segment.lifetimes.iter().map(|l| self.fold_lifetime(l)).collect(),
types: segment.types.iter().map(|&typ| self.fold_ty(typ)).collect(),
}).collect()
}
}
fn fold_local(&mut self, l: Gc<Local>) -> Gc<Local> {
let id = self.new_id(l.id); // Needs to be first, for ast_map.
box(GC) Local {
id: id,
ty: self.fold_ty(l.ty),
pat: self.fold_pat(l.pat),
init: l.init.map(|e| self.fold_expr(e)),
span: self.new_span(l.span),
source: l.source,
}
}
fn fold_mac(&mut self, macro: &Mac) -> Mac {
Spanned {
node: match macro.node {
MacInvocTT(ref p, ref tts, ctxt) => {
MacInvocTT(self.fold_path(p),
fold_tts(tts.as_slice(), self),
ctxt)
}
},
span: self.new_span(macro.span)
}
}
<|fim▁hole|> fn map_exprs(&self, f: |Gc<Expr>| -> Gc<Expr>,
es: &[Gc<Expr>]) -> Vec<Gc<Expr>> {
es.iter().map(|x| f(*x)).collect()
}
fn new_id(&mut self, i: NodeId) -> NodeId {
i
}
fn new_span(&mut self, sp: Span) -> Span {
sp
}
fn fold_explicit_self(&mut self, es: &ExplicitSelf) -> ExplicitSelf {
Spanned {
span: self.new_span(es.span),
node: self.fold_explicit_self_(&es.node)
}
}
fn fold_explicit_self_(&mut self, es: &ExplicitSelf_) -> ExplicitSelf_ {
match *es {
SelfStatic | SelfValue | SelfUniq => *es,
SelfRegion(ref lifetime, m) => {
SelfRegion(fold_opt_lifetime(lifetime, self), m)
}
}
}
fn fold_lifetime(&mut self, l: &Lifetime) -> Lifetime {
noop_fold_lifetime(l, self)
}
//used in noop_fold_item and noop_fold_crate
fn fold_attribute(&mut self, at: Attribute) -> Attribute {
Spanned {
span: self.new_span(at.span),
node: ast::Attribute_ {
id: at.node.id,
style: at.node.style,
value: fold_meta_item_(at.node.value, self),
is_sugared_doc: at.node.is_sugared_doc
}
}
}
}
/* some little folds that probably aren't useful to have in Folder itself*/
//used in noop_fold_item and noop_fold_crate and noop_fold_crate_directive
fn fold_meta_item_<T: Folder>(mi: Gc<MetaItem>, fld: &mut T) -> Gc<MetaItem> {
box(GC) Spanned {
node:
match mi.node {
MetaWord(ref id) => MetaWord((*id).clone()),
MetaList(ref id, ref mis) => {
MetaList((*id).clone(), mis.iter().map(|e| fold_meta_item_(*e, fld)).collect())
}
MetaNameValue(ref id, ref s) => {
MetaNameValue((*id).clone(), (*s).clone())
}
},
span: fld.new_span(mi.span) }
}
//used in noop_fold_foreign_item and noop_fold_fn_decl
fn fold_arg_<T: Folder>(a: &Arg, fld: &mut T) -> Arg {
let id = fld.new_id(a.id); // Needs to be first, for ast_map.
Arg {
id: id,
ty: fld.fold_ty(a.ty),
pat: fld.fold_pat(a.pat),
}
}
pub fn fold_tt<T: Folder>(tt: &TokenTree, fld: &mut T) -> TokenTree {
match *tt {
TTTok(span, ref tok) =>
TTTok(span, fold_token(tok,fld)),
TTDelim(ref tts) => TTDelim(Rc::new(fold_tts(tts.as_slice(), fld))),
TTSeq(span, ref pattern, ref sep, is_optional) =>
TTSeq(span,
Rc::new(fold_tts(pattern.as_slice(), fld)),
sep.as_ref().map(|tok| fold_token(tok,fld)),
is_optional),
TTNonterminal(sp,ref ident) =>
TTNonterminal(sp,fld.fold_ident(*ident))
}
}
pub fn fold_tts<T: Folder>(tts: &[TokenTree], fld: &mut T) -> Vec<TokenTree> {
tts.iter().map(|tt| fold_tt(tt,fld)).collect()
}
// apply ident folder if it's an ident, apply other folds to interpolated nodes
fn fold_token<T: Folder>(t: &token::Token, fld: &mut T) -> token::Token {
match *t {
token::IDENT(id, followed_by_colons) => {
token::IDENT(fld.fold_ident(id), followed_by_colons)
}
token::LIFETIME(id) => token::LIFETIME(fld.fold_ident(id)),
token::INTERPOLATED(ref nt) => token::INTERPOLATED(fold_interpolated(nt,fld)),
_ => (*t).clone()
}
}
// apply folder to elements of interpolated nodes
//
// NB: this can occur only when applying a fold to partially expanded code, where
// parsed pieces have gotten implanted ito *other* macro invocations. This is relevant
// for macro hygiene, but possibly not elsewhere.
//
// One problem here occurs because the types for fold_item, fold_stmt, etc. allow the
// folder to return *multiple* items; this is a problem for the nodes here, because
// they insist on having exactly one piece. One solution would be to mangle the fold
// trait to include one-to-many and one-to-one versions of these entry points, but that
// would probably confuse a lot of people and help very few. Instead, I'm just going
// to put in dynamic checks. I think the performance impact of this will be pretty much
// nonexistent. The danger is that someone will apply a fold to a partially expanded
// node, and will be confused by the fact that their "fold_item" or "fold_stmt" isn't
// getting called on NtItem or NtStmt nodes. Hopefully they'll wind up reading this
// comment, and doing something appropriate.
//
// BTW, design choice: I considered just changing the type of, e.g., NtItem to contain
// multiple items, but decided against it when I looked at parse_item_or_view_item and
// tried to figure out what I would do with multiple items there....
fn fold_interpolated<T: Folder>(nt : &token::Nonterminal, fld: &mut T) -> token::Nonterminal {
match *nt {
token::NtItem(item) =>
token::NtItem(fld.fold_item(item)
.expect_one("expected fold to produce exactly one item")),
token::NtBlock(block) => token::NtBlock(fld.fold_block(block)),
token::NtStmt(stmt) =>
token::NtStmt(fld.fold_stmt(stmt)
.expect_one("expected fold to produce exactly one statement")),
token::NtPat(pat) => token::NtPat(fld.fold_pat(pat)),
token::NtExpr(expr) => token::NtExpr(fld.fold_expr(expr)),
token::NtTy(ty) => token::NtTy(fld.fold_ty(ty)),
token::NtIdent(ref id, is_mod_name) =>
token::NtIdent(box fld.fold_ident(**id),is_mod_name),
token::NtMeta(meta_item) => token::NtMeta(fold_meta_item_(meta_item,fld)),
token::NtPath(ref path) => token::NtPath(box fld.fold_path(*path)),
token::NtTT(tt) => token::NtTT(box (GC) fold_tt(tt,fld)),
// it looks to me like we can leave out the matchers: token::NtMatchers(matchers)
_ => (*nt).clone()
}
}
pub fn noop_fold_fn_decl<T: Folder>(decl: &FnDecl, fld: &mut T) -> P<FnDecl> {
P(FnDecl {
inputs: decl.inputs.iter().map(|x| fold_arg_(x, fld)).collect(), // bad copy
output: fld.fold_ty(decl.output),
cf: decl.cf,
variadic: decl.variadic
})
}
fn fold_ty_param_bound<T: Folder>(tpb: &TyParamBound, fld: &mut T)
-> TyParamBound {
match *tpb {
TraitTyParamBound(ref ty) => TraitTyParamBound(fold_trait_ref(ty, fld)),
StaticRegionTyParamBound => StaticRegionTyParamBound,
UnboxedFnTyParamBound(ref unboxed_function_type) => {
UnboxedFnTyParamBound(UnboxedFnTy {
decl: fld.fold_fn_decl(&*unboxed_function_type.decl),
})
}
OtherRegionTyParamBound(s) => OtherRegionTyParamBound(s)
}
}
pub fn fold_ty_param<T: Folder>(tp: &TyParam, fld: &mut T) -> TyParam {
let id = fld.new_id(tp.id);
TyParam {
ident: tp.ident,
id: id,
sized: tp.sized,
bounds: tp.bounds.map(|x| fold_ty_param_bound(x, fld)),
default: tp.default.map(|x| fld.fold_ty(x)),
span: tp.span
}
}
pub fn fold_ty_params<T: Folder>(tps: &OwnedSlice<TyParam>, fld: &mut T)
-> OwnedSlice<TyParam> {
tps.map(|tp| fold_ty_param(tp, fld))
}
pub fn noop_fold_lifetime<T: Folder>(l: &Lifetime, fld: &mut T) -> Lifetime {
let id = fld.new_id(l.id);
Lifetime {
id: id,
span: fld.new_span(l.span),
name: l.name
}
}
pub fn fold_lifetimes<T: Folder>(lts: &Vec<Lifetime>, fld: &mut T)
-> Vec<Lifetime> {
lts.iter().map(|l| fld.fold_lifetime(l)).collect()
}
pub fn fold_opt_lifetime<T: Folder>(o_lt: &Option<Lifetime>, fld: &mut T)
-> Option<Lifetime> {
o_lt.as_ref().map(|lt| fld.fold_lifetime(lt))
}
pub fn fold_generics<T: Folder>(generics: &Generics, fld: &mut T) -> Generics {
Generics {ty_params: fold_ty_params(&generics.ty_params, fld),
lifetimes: fold_lifetimes(&generics.lifetimes, fld)}
}
fn fold_struct_def<T: Folder>(struct_def: Gc<StructDef>,
fld: &mut T) -> Gc<StructDef> {
box(GC) ast::StructDef {
fields: struct_def.fields.iter().map(|f| fold_struct_field(f, fld)).collect(),
ctor_id: struct_def.ctor_id.map(|cid| fld.new_id(cid)),
super_struct: match struct_def.super_struct {
Some(t) => Some(fld.fold_ty(t)),
None => None
},
is_virtual: struct_def.is_virtual,
}
}
fn fold_trait_ref<T: Folder>(p: &TraitRef, fld: &mut T) -> TraitRef {
let id = fld.new_id(p.ref_id);
ast::TraitRef {
path: fld.fold_path(&p.path),
ref_id: id,
}
}
fn fold_struct_field<T: Folder>(f: &StructField, fld: &mut T) -> StructField {
let id = fld.new_id(f.node.id);
Spanned {
node: ast::StructField_ {
kind: f.node.kind,
id: id,
ty: fld.fold_ty(f.node.ty),
attrs: f.node.attrs.iter().map(|a| fld.fold_attribute(*a)).collect(),
},
span: fld.new_span(f.span),
}
}
fn fold_field_<T: Folder>(field: Field, folder: &mut T) -> Field {
ast::Field {
ident: respan(field.ident.span, folder.fold_ident(field.ident.node)),
expr: folder.fold_expr(field.expr),
span: folder.new_span(field.span),
}
}
fn fold_mt<T: Folder>(mt: &MutTy, folder: &mut T) -> MutTy {
MutTy {
ty: folder.fold_ty(mt.ty),
mutbl: mt.mutbl,
}
}
fn fold_opt_bounds<T: Folder>(b: &Option<OwnedSlice<TyParamBound>>, folder: &mut T)
-> Option<OwnedSlice<TyParamBound>> {
b.as_ref().map(|bounds| {
bounds.map(|bound| {
fold_ty_param_bound(bound, folder)
})
})
}
fn fold_variant_arg_<T: Folder>(va: &VariantArg, folder: &mut T) -> VariantArg {
let id = folder.new_id(va.id);
ast::VariantArg {
ty: folder.fold_ty(va.ty),
id: id,
}
}
pub fn noop_fold_view_item<T: Folder>(vi: &ViewItem, folder: &mut T)
-> ViewItem{
let inner_view_item = match vi.node {
ViewItemExternCrate(ref ident, ref string, node_id) => {
ViewItemExternCrate(ident.clone(),
(*string).clone(),
folder.new_id(node_id))
}
ViewItemUse(ref view_path) => {
ViewItemUse(folder.fold_view_path(*view_path))
}
};
ViewItem {
node: inner_view_item,
attrs: vi.attrs.iter().map(|a| folder.fold_attribute(*a)).collect(),
vis: vi.vis,
span: folder.new_span(vi.span),
}
}
pub fn noop_fold_block<T: Folder>(b: P<Block>, folder: &mut T) -> P<Block> {
let id = folder.new_id(b.id); // Needs to be first, for ast_map.
let view_items = b.view_items.iter().map(|x| folder.fold_view_item(x)).collect();
let stmts = b.stmts.iter().flat_map(|s| folder.fold_stmt(&**s).move_iter()).collect();
P(Block {
id: id,
view_items: view_items,
stmts: stmts,
expr: b.expr.map(|x| folder.fold_expr(x)),
rules: b.rules,
span: folder.new_span(b.span),
})
}
pub fn noop_fold_item_underscore<T: Folder>(i: &Item_, folder: &mut T) -> Item_ {
match *i {
ItemStatic(t, m, e) => {
ItemStatic(folder.fold_ty(t), m, folder.fold_expr(e))
}
ItemFn(decl, fn_style, abi, ref generics, body) => {
ItemFn(
folder.fold_fn_decl(&*decl),
fn_style,
abi,
fold_generics(generics, folder),
folder.fold_block(body)
)
}
ItemMod(ref m) => ItemMod(folder.fold_mod(m)),
ItemForeignMod(ref nm) => ItemForeignMod(folder.fold_foreign_mod(nm)),
ItemTy(t, ref generics) => {
ItemTy(folder.fold_ty(t), fold_generics(generics, folder))
}
ItemEnum(ref enum_definition, ref generics) => {
ItemEnum(
ast::EnumDef {
variants: enum_definition.variants.iter().map(|&x| {
folder.fold_variant(&*x)
}).collect(),
},
fold_generics(generics, folder))
}
ItemStruct(ref struct_def, ref generics) => {
let struct_def = fold_struct_def(*struct_def, folder);
ItemStruct(struct_def, fold_generics(generics, folder))
}
ItemImpl(ref generics, ref ifce, ty, ref methods) => {
ItemImpl(fold_generics(generics, folder),
ifce.as_ref().map(|p| fold_trait_ref(p, folder)),
folder.fold_ty(ty),
methods.iter().map(|x| folder.fold_method(*x)).collect()
)
}
ItemTrait(ref generics, ref sized, ref traits, ref methods) => {
let methods = methods.iter().map(|method| {
match *method {
Required(ref m) => Required(folder.fold_type_method(m)),
Provided(method) => Provided(folder.fold_method(method))
}
}).collect();
ItemTrait(fold_generics(generics, folder),
*sized,
traits.iter().map(|p| fold_trait_ref(p, folder)).collect(),
methods)
}
ItemMac(ref m) => ItemMac(folder.fold_mac(m)),
}
}
pub fn noop_fold_type_method<T: Folder>(m: &TypeMethod, fld: &mut T) -> TypeMethod {
let id = fld.new_id(m.id); // Needs to be first, for ast_map.
TypeMethod {
id: id,
ident: fld.fold_ident(m.ident),
attrs: m.attrs.iter().map(|a| fld.fold_attribute(*a)).collect(),
fn_style: m.fn_style,
decl: fld.fold_fn_decl(&*m.decl),
generics: fold_generics(&m.generics, fld),
explicit_self: fld.fold_explicit_self(&m.explicit_self),
span: fld.new_span(m.span),
vis: m.vis,
}
}
pub fn noop_fold_mod<T: Folder>(m: &Mod, folder: &mut T) -> Mod {
ast::Mod {
inner: folder.new_span(m.inner),
view_items: m.view_items
.iter()
.map(|x| folder.fold_view_item(x)).collect(),
items: m.items.iter().flat_map(|x| folder.fold_item(*x).move_iter()).collect(),
}
}
pub fn noop_fold_crate<T: Folder>(c: Crate, folder: &mut T) -> Crate {
Crate {
module: folder.fold_mod(&c.module),
attrs: c.attrs.iter().map(|x| folder.fold_attribute(*x)).collect(),
config: c.config.iter().map(|x| fold_meta_item_(*x, folder)).collect(),
span: folder.new_span(c.span),
}
}
// fold one item into possibly many items
pub fn noop_fold_item<T: Folder>(i: &Item,
folder: &mut T) -> SmallVector<Gc<Item>> {
SmallVector::one(box(GC) noop_fold_item_(i,folder))
}
// fold one item into exactly one item
pub fn noop_fold_item_<T: Folder>(i: &Item, folder: &mut T) -> Item {
let id = folder.new_id(i.id); // Needs to be first, for ast_map.
let node = folder.fold_item_underscore(&i.node);
let ident = match node {
// The node may have changed, recompute the "pretty" impl name.
ItemImpl(_, ref maybe_trait, ty, _) => {
ast_util::impl_pretty_name(maybe_trait, &*ty)
}
_ => i.ident
};
Item {
id: id,
ident: folder.fold_ident(ident),
attrs: i.attrs.iter().map(|e| folder.fold_attribute(*e)).collect(),
node: node,
vis: i.vis,
span: folder.new_span(i.span)
}
}
pub fn noop_fold_foreign_item<T: Folder>(ni: &ForeignItem,
folder: &mut T) -> Gc<ForeignItem> {
let id = folder.new_id(ni.id); // Needs to be first, for ast_map.
box(GC) ForeignItem {
id: id,
ident: folder.fold_ident(ni.ident),
attrs: ni.attrs.iter().map(|x| folder.fold_attribute(*x)).collect(),
node: match ni.node {
ForeignItemFn(ref fdec, ref generics) => {
ForeignItemFn(P(FnDecl {
inputs: fdec.inputs.iter().map(|a| fold_arg_(a, folder)).collect(),
output: folder.fold_ty(fdec.output),
cf: fdec.cf,
variadic: fdec.variadic
}), fold_generics(generics, folder))
}
ForeignItemStatic(t, m) => {
ForeignItemStatic(folder.fold_ty(t), m)
}
},
span: folder.new_span(ni.span),
vis: ni.vis,
}
}
pub fn noop_fold_method<T: Folder>(m: &Method, folder: &mut T) -> Gc<Method> {
let id = folder.new_id(m.id); // Needs to be first, for ast_map.
box(GC) Method {
id: id,
ident: folder.fold_ident(m.ident),
attrs: m.attrs.iter().map(|a| folder.fold_attribute(*a)).collect(),
generics: fold_generics(&m.generics, folder),
explicit_self: folder.fold_explicit_self(&m.explicit_self),
fn_style: m.fn_style,
decl: folder.fold_fn_decl(&*m.decl),
body: folder.fold_block(m.body),
span: folder.new_span(m.span),
vis: m.vis
}
}
pub fn noop_fold_pat<T: Folder>(p: Gc<Pat>, folder: &mut T) -> Gc<Pat> {
let id = folder.new_id(p.id);
let node = match p.node {
PatWild => PatWild,
PatWildMulti => PatWildMulti,
PatIdent(binding_mode, ref pth, ref sub) => {
PatIdent(binding_mode,
folder.fold_path(pth),
sub.map(|x| folder.fold_pat(x)))
}
PatLit(e) => PatLit(folder.fold_expr(e)),
PatEnum(ref pth, ref pats) => {
PatEnum(folder.fold_path(pth),
pats.as_ref().map(|pats| pats.iter().map(|x| folder.fold_pat(*x)).collect()))
}
PatStruct(ref pth, ref fields, etc) => {
let pth_ = folder.fold_path(pth);
let fs = fields.iter().map(|f| {
ast::FieldPat {
ident: f.ident,
pat: folder.fold_pat(f.pat)
}
}).collect();
PatStruct(pth_, fs, etc)
}
PatTup(ref elts) => PatTup(elts.iter().map(|x| folder.fold_pat(*x)).collect()),
PatBox(inner) => PatBox(folder.fold_pat(inner)),
PatRegion(inner) => PatRegion(folder.fold_pat(inner)),
PatRange(e1, e2) => {
PatRange(folder.fold_expr(e1), folder.fold_expr(e2))
},
PatVec(ref before, ref slice, ref after) => {
PatVec(before.iter().map(|x| folder.fold_pat(*x)).collect(),
slice.map(|x| folder.fold_pat(x)),
after.iter().map(|x| folder.fold_pat(*x)).collect())
}
PatMac(ref mac) => PatMac(folder.fold_mac(mac)),
};
box(GC) Pat {
id: id,
span: folder.new_span(p.span),
node: node,
}
}
pub fn noop_fold_expr<T: Folder>(e: Gc<Expr>, folder: &mut T) -> Gc<Expr> {
let id = folder.new_id(e.id);
let node = match e.node {
ExprVstore(e, v) => {
ExprVstore(folder.fold_expr(e), v)
}
ExprBox(p, e) => {
ExprBox(folder.fold_expr(p), folder.fold_expr(e))
}
ExprVec(ref exprs) => {
ExprVec(exprs.iter().map(|&x| folder.fold_expr(x)).collect())
}
ExprRepeat(expr, count) => {
ExprRepeat(folder.fold_expr(expr), folder.fold_expr(count))
}
ExprTup(ref elts) => ExprTup(elts.iter().map(|x| folder.fold_expr(*x)).collect()),
ExprCall(f, ref args) => {
ExprCall(folder.fold_expr(f),
args.iter().map(|&x| folder.fold_expr(x)).collect())
}
ExprMethodCall(i, ref tps, ref args) => {
ExprMethodCall(
respan(i.span, folder.fold_ident(i.node)),
tps.iter().map(|&x| folder.fold_ty(x)).collect(),
args.iter().map(|&x| folder.fold_expr(x)).collect())
}
ExprBinary(binop, lhs, rhs) => {
ExprBinary(binop,
folder.fold_expr(lhs),
folder.fold_expr(rhs))
}
ExprUnary(binop, ohs) => {
ExprUnary(binop, folder.fold_expr(ohs))
}
ExprLit(_) => e.node.clone(),
ExprCast(expr, ty) => {
ExprCast(folder.fold_expr(expr), folder.fold_ty(ty))
}
ExprAddrOf(m, ohs) => ExprAddrOf(m, folder.fold_expr(ohs)),
ExprIf(cond, tr, fl) => {
ExprIf(folder.fold_expr(cond),
folder.fold_block(tr),
fl.map(|x| folder.fold_expr(x)))
}
ExprWhile(cond, body) => {
ExprWhile(folder.fold_expr(cond), folder.fold_block(body))
}
ExprForLoop(pat, iter, body, ref maybe_ident) => {
ExprForLoop(folder.fold_pat(pat),
folder.fold_expr(iter),
folder.fold_block(body),
maybe_ident.map(|i| folder.fold_ident(i)))
}
ExprLoop(body, opt_ident) => {
ExprLoop(folder.fold_block(body),
opt_ident.map(|x| folder.fold_ident(x)))
}
ExprMatch(expr, ref arms) => {
ExprMatch(folder.fold_expr(expr),
arms.iter().map(|x| folder.fold_arm(x)).collect())
}
ExprFnBlock(ref decl, ref body) => {
ExprFnBlock(folder.fold_fn_decl(&**decl),
folder.fold_block(body.clone()))
}
ExprProc(ref decl, ref body) => {
ExprProc(folder.fold_fn_decl(&**decl),
folder.fold_block(body.clone()))
}
ExprBlock(ref blk) => ExprBlock(folder.fold_block(blk.clone())),
ExprAssign(el, er) => {
ExprAssign(folder.fold_expr(el), folder.fold_expr(er))
}
ExprAssignOp(op, el, er) => {
ExprAssignOp(op,
folder.fold_expr(el),
folder.fold_expr(er))
}
ExprField(el, id, ref tys) => {
ExprField(folder.fold_expr(el),
respan(id.span, folder.fold_ident(id.node)),
tys.iter().map(|&x| folder.fold_ty(x)).collect())
}
ExprIndex(el, er) => {
ExprIndex(folder.fold_expr(el), folder.fold_expr(er))
}
ExprPath(ref pth) => ExprPath(folder.fold_path(pth)),
ExprBreak(opt_ident) => ExprBreak(opt_ident.map(|x| folder.fold_ident(x))),
ExprAgain(opt_ident) => ExprAgain(opt_ident.map(|x| folder.fold_ident(x))),
ExprRet(ref e) => {
ExprRet(e.map(|x| folder.fold_expr(x)))
}
ExprInlineAsm(ref a) => {
ExprInlineAsm(InlineAsm {
inputs: a.inputs.iter().map(|&(ref c, input)| {
((*c).clone(), folder.fold_expr(input))
}).collect(),
outputs: a.outputs.iter().map(|&(ref c, out)| {
((*c).clone(), folder.fold_expr(out))
}).collect(),
.. (*a).clone()
})
}
ExprMac(ref mac) => ExprMac(folder.fold_mac(mac)),
ExprStruct(ref path, ref fields, maybe_expr) => {
ExprStruct(folder.fold_path(path),
fields.iter().map(|x| fold_field_(*x, folder)).collect(),
maybe_expr.map(|x| folder.fold_expr(x)))
},
ExprParen(ex) => ExprParen(folder.fold_expr(ex))
};
box(GC) Expr {
id: id,
node: node,
span: folder.new_span(e.span),
}
}
pub fn noop_fold_stmt<T: Folder>(s: &Stmt,
folder: &mut T) -> SmallVector<Gc<Stmt>> {
let nodes = match s.node {
StmtDecl(d, id) => {
let id = folder.new_id(id);
folder.fold_decl(d).move_iter()
.map(|d| StmtDecl(d, id))
.collect()
}
StmtExpr(e, id) => {
let id = folder.new_id(id);
SmallVector::one(StmtExpr(folder.fold_expr(e), id))
}
StmtSemi(e, id) => {
let id = folder.new_id(id);
SmallVector::one(StmtSemi(folder.fold_expr(e), id))
}
StmtMac(ref mac, semi) => SmallVector::one(StmtMac(folder.fold_mac(mac), semi))
};
nodes.move_iter().map(|node| box(GC) Spanned {
node: node,
span: folder.new_span(s.span),
}).collect()
}
#[cfg(test)]
mod test {
use std::io;
use ast;
use util::parser_testing::{string_to_crate, matches_codepattern};
use parse::token;
use print::pprust;
use super::*;
// this version doesn't care about getting comments or docstrings in.
fn fake_print_crate(s: &mut pprust::State,
krate: &ast::Crate) -> io::IoResult<()> {
s.print_mod(&krate.module, krate.attrs.as_slice())
}
// change every identifier to "zz"
struct ToZzIdentFolder;
impl Folder for ToZzIdentFolder {
fn fold_ident(&mut self, _: ast::Ident) -> ast::Ident {
token::str_to_ident("zz")
}
}
// maybe add to expand.rs...
macro_rules! assert_pred (
($pred:expr, $predname:expr, $a:expr , $b:expr) => (
{
let pred_val = $pred;
let a_val = $a;
let b_val = $b;
if !(pred_val(a_val.as_slice(),b_val.as_slice())) {
fail!("expected args satisfying {}, got {:?} and {:?}",
$predname, a_val, b_val);
}
}
)
)
// make sure idents get transformed everywhere
#[test] fn ident_transformation () {
let mut zz_fold = ToZzIdentFolder;
let ast = string_to_crate(
"#[a] mod b {fn c (d : e, f : g) {h!(i,j,k);l;m}}".to_string());
let folded_crate = zz_fold.fold_crate(ast);
assert_pred!(
matches_codepattern,
"matches_codepattern",
pprust::to_str(|s| fake_print_crate(s, &folded_crate)),
"#[a]mod zz{fn zz(zz:zz,zz:zz){zz!(zz,zz,zz);zz;zz}}".to_string());
}
// even inside macro defs....
#[test] fn ident_transformation_in_defs () {
let mut zz_fold = ToZzIdentFolder;
let ast = string_to_crate(
"macro_rules! a {(b $c:expr $(d $e:token)f+ => \
(g $(d $d $e)+))} ".to_string());
let folded_crate = zz_fold.fold_crate(ast);
assert_pred!(
matches_codepattern,
"matches_codepattern",
pprust::to_str(|s| fake_print_crate(s, &folded_crate)),
"zz!zz((zz$zz:zz$(zz $zz:zz)zz+=>(zz$(zz$zz$zz)+)))".to_string());
}
}<|fim▁end|> | |
<|file_name|>storetest.go<|end_file_name|><|fim▁begin|>// Copyright 2018 Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package storetest provides the utility functions of config store for
// testing. Shouldn't be imported outside of the test.
package storetest
import (
"fmt"
"strings"
"sync"
multierror "github.com/hashicorp/go-multierror"
"istio.io/istio/mixer/pkg/config/store"
)
// Memstore is an on-memory store backend. Used only for testing.
type Memstore struct {
mu sync.Mutex
data map[store.Key]*store.BackEndResource
wch chan store.BackendEvent
donec chan struct{}
}
// NewMemstore creates a new Memstore instance.
func NewMemstore() *Memstore {
return &Memstore{data: map[store.Key]*store.BackEndResource{}, donec: make(chan struct{})}
}
// Stop implements store.Backend interface.
func (m *Memstore) Stop() {
close(m.donec)
}
// Init implements store.Backend interface.
func (m *Memstore) Init(kinds []string) error {
return nil
}
<|fim▁hole|> // Watch is not supported in the memstore, but sometimes it needs to be invoked.
c := make(chan store.BackendEvent)
go func() {
<-m.donec
close(c)
}()
m.mu.Lock()
defer m.mu.Unlock()
m.wch = c
return c, nil
}
// Get implements store.Backend interface.
func (m *Memstore) Get(key store.Key) (*store.BackEndResource, error) {
m.mu.Lock()
defer m.mu.Unlock()
r, ok := m.data[key]
if !ok {
return nil, store.ErrNotFound
}
return r, nil
}
// List implements store.Backend interface.
func (m *Memstore) List() map[store.Key]*store.BackEndResource {
return m.data
}
// Put adds a new resource to the memstore.
func (m *Memstore) Put(r *store.BackEndResource) {
m.mu.Lock()
defer m.mu.Unlock()
m.data[r.Key()] = r
if m.wch != nil {
m.wch <- store.BackendEvent{Type: store.Update, Key: r.Key(), Value: r}
}
}
// Delete removes a resource for the specified key from the memstore.
func (m *Memstore) Delete(k store.Key) {
m.mu.Lock()
defer m.mu.Unlock()
delete(m.data, k)
if m.wch != nil {
m.wch <- store.BackendEvent{Type: store.Delete, Key: k}
}
}
// SetupStoreForTest creates an on-memory store backend, initializes its
// data with the specified specs, and returns a new store with the backend.
// Note that this store can't change, Watch does not emit any events.
func SetupStoreForTest(data ...string) (store.Store, error) {
m := &Memstore{data: map[store.Key]*store.BackEndResource{}, donec: make(chan struct{})}
var errs error
for i, d := range data {
for j, chunk := range strings.Split(d, "\n---\n") {
chunk = strings.TrimSpace(chunk)
if len(chunk) == 0 {
continue
}
r, err := store.ParseChunk([]byte(chunk))
if err != nil {
errs = multierror.Append(errs, fmt.Errorf("failed to parse at %d/%d: %v", i, j, err))
continue
}
if r == nil {
continue
}
m.data[r.Key()] = r
}
}
if errs != nil {
return nil, errs
}
return store.WithBackend(m), nil
}<|fim▁end|> | // Watch implements store.Backend interface.
func (m *Memstore) Watch() (<-chan store.BackendEvent, error) { |
<|file_name|>parser2.py<|end_file_name|><|fim▁begin|># parser2.py
# parses sentences from the CSV files
# J. Hassler Thurston
# RocHack Hackathon December 7, 2013
# Modified December 11, 2013
import nltk
from random import choice
cfg_file = 'upenn_grammar.cfg'
tbank_productions = []
nonterminals = []
rightside = []
def get_initial_rules():
global tbank_productions, nonterminals
# from http://stackoverflow.com/questions/7056996/how-do-i-get-a-set-of-grammar-rules-from-penn-treebank-using-python-nltk
tbank_productions = [production for sent in nltk.corpus.treebank.parsed_sents() for production in sent.productions()]
nonterminals = [production.lhs().__str__() for production in tbank_productions]<|fim▁hole|>
# modified from http://stackoverflow.com/questions/15009656/how-to-use-nltk-to-generate-sentences-from-an-induced-grammar
def generate_sample(grammar, items=[nltk.grammar.Nonterminal('S')]):
frags = []
if len(items) == 1:
print items
if isinstance(items[0], nltk.grammar.Nonterminal):
frags.append(generate_sample(grammar, grammar.productions(lhs=items[0])))
else:
frags.append(items[0])
else:
print items[:2]
# This is where we need to make our changes
chosen_expansion = choice(items)
#print type(chosen_expansion)
frags.append(generate_sample(grammar, [chosen_expansion]))
return frags<|fim▁end|> | rightside = [production.rhs().__str__() for production in tbank_productions]
tbank_grammar = nltk.grammar.ContextFreeGrammar(nltk.grammar.Nonterminal('S'), tbank_productions)
print generate_sample(tbank_grammar) |
<|file_name|>common.js<|end_file_name|><|fim▁begin|>import fetch from '../api'
import config from '../config'
export const login = (source, token) => fetch(
'POST',
`${config.API_URL}/login`,<|fim▁hole|> {},
false,
{'X-Auth-Source': source, 'X-Auth-Token': token}
)<|fim▁end|> | |
<|file_name|>connection_interface.rs<|end_file_name|><|fim▁begin|>/*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
use interner::{Intern, StringKey};
use serde::{Deserialize, Serialize};<|fim▁hole|>#[derive(Debug, Serialize, Deserialize)]
#[serde(deny_unknown_fields, rename_all = "camelCase")]
pub struct ConnectionInterface {
pub cursor: StringKey,
pub edges: StringKey,
pub end_cursor: StringKey,
pub has_next_page: StringKey,
pub has_previous_page: StringKey,
pub node: StringKey,
pub page_info: StringKey,
pub start_cursor: StringKey,
}
impl Default for ConnectionInterface {
fn default() -> Self {
ConnectionInterface {
cursor: "cursor".intern(),
edges: "edges".intern(),
end_cursor: "endCursor".intern(),
has_next_page: "hasNextPage".intern(),
has_previous_page: "hasPreviousPage".intern(),
node: "node".intern(),
page_info: "pageInfo".intern(),
start_cursor: "startCursor".intern(),
}
}
}<|fim▁end|> |
/// Configuration where Relay should expect some fields in the schema. |
<|file_name|>conf.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
#
# dolphintools documentation build configuration file, created by
# sphinx-quickstart on Mon Oct 12 22:39:14 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'dolphintools'
copyright = u'2015, Alper Kucukural'
author = u'Alper Kucukural'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0.0'
# The full version, including alpha/beta/rc tags.
release = '1.0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.<|fim▁hole|>#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'dolphintoolsdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'dolphintools.tex', u'dolphintools Documentation',
u'Alper Kucukural', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'dolphintools', u'dolphintools Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'dolphintools', u'dolphintools Documentation',
author, 'dolphintools', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False<|fim▁end|> | #html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation". |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
from datetime import datetime, timedelta
from pathlib import Path
from flask import (Flask, session, redirect, url_for, flash, g, request,
render_template)
from flask_assets import Environment
from flask_babel import gettext
from flask_wtf.csrf import CSRFProtect, CSRFError
from os import path
import sys
from werkzeug.exceptions import default_exceptions
import i18n
import template_filters
import version
from crypto_util import CryptoUtil
from db import db
from journalist_app import account, admin, api, main, col
from journalist_app.utils import (get_source, logged_in,
JournalistInterfaceSessionInterface,
cleanup_expired_revoked_tokens)
from models import InstanceConfig, Journalist
from server_os import is_os_near_eol, is_os_past_eol
from store import Storage
import typing
# https://www.python.org/dev/peps/pep-0484/#runtime-or-type-checking
if typing.TYPE_CHECKING:
# flake8 can not understand type annotation yet.
# That is why all type annotation relative import
# statements has to be marked as noqa.
# http://flake8.pycqa.org/en/latest/user/error-codes.html?highlight=f401
from sdconfig import SDConfig # noqa: F401
from typing import Optional, Union, Tuple, Any # noqa: F401
from werkzeug import Response # noqa: F401
from werkzeug.exceptions import HTTPException # noqa: F401
_insecure_views = ['main.login', 'static']
def get_logo_url(app: Flask) -> str:
if not app.static_folder:
raise FileNotFoundError
custom_logo_filename = "i/custom_logo.png"
default_logo_filename = "i/logo.png"
custom_logo_path = Path(app.static_folder) / custom_logo_filename
default_logo_path = Path(app.static_folder) / default_logo_filename
if custom_logo_path.is_file():
return url_for("static", filename=custom_logo_filename)
elif default_logo_path.is_file():
return url_for("static", filename=default_logo_filename)
raise FileNotFoundError
def create_app(config: 'SDConfig') -> Flask:
app = Flask(__name__,
template_folder=config.JOURNALIST_TEMPLATES_DIR,
static_folder=path.join(config.SECUREDROP_ROOT, 'static'))
app.config.from_object(config.JOURNALIST_APP_FLASK_CONFIG_CLS)
app.session_interface = JournalistInterfaceSessionInterface()
csrf = CSRFProtect(app)
Environment(app)
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SQLALCHEMY_DATABASE_URI'] = config.DATABASE_URI
db.init_app(app)
app.config.update(OS_PAST_EOL=is_os_past_eol(), OS_NEAR_EOL=is_os_near_eol())
# TODO: Attaching a Storage dynamically like this disables all type checking (and
# breaks code analysis tools) for code that uses current_app.storage; it should be refactored
app.storage = Storage(config.STORE_DIR,
config.TEMP_DIR,
config.JOURNALIST_KEY)
# TODO: Attaching a CryptoUtil dynamically like this disables all type checking (and
# breaks code analysis tools) for code that uses current_app.storage; it should be refactored
app.crypto_util = CryptoUtil(
scrypt_params=config.SCRYPT_PARAMS,
scrypt_id_pepper=config.SCRYPT_ID_PEPPER,
scrypt_gpg_pepper=config.SCRYPT_GPG_PEPPER,
securedrop_root=config.SECUREDROP_ROOT,
nouns_file=config.NOUNS,
adjectives_file=config.ADJECTIVES,<|fim▁hole|>
@app.errorhandler(CSRFError)
def handle_csrf_error(e: CSRFError) -> 'Response':
# render the message first to ensure it's localized.
msg = gettext('You have been logged out due to inactivity.')
session.clear()
flash(msg, 'error')
return redirect(url_for('main.login'))
def _handle_http_exception(
error: 'HTTPException'
) -> 'Tuple[Union[Response, str], Optional[int]]':
# Workaround for no blueprint-level 404/5 error handlers, see:
# https://github.com/pallets/flask/issues/503#issuecomment-71383286
handler = list(app.error_handler_spec['api'][error.code].values())[0]
if request.path.startswith('/api/') and handler:
return handler(error)
return render_template('error.html', error=error), error.code
for code in default_exceptions:
app.errorhandler(code)(_handle_http_exception)
i18n.configure(config, app)
app.jinja_env.trim_blocks = True
app.jinja_env.lstrip_blocks = True
app.jinja_env.globals['version'] = version.__version__
app.jinja_env.filters['rel_datetime_format'] = \
template_filters.rel_datetime_format
app.jinja_env.filters['filesizeformat'] = template_filters.filesizeformat
@app.before_first_request
def expire_blacklisted_tokens() -> None:
cleanup_expired_revoked_tokens()
@app.before_request
def load_instance_config() -> None:
app.instance_config = InstanceConfig.get_current()
@app.before_request
def setup_g() -> 'Optional[Response]':
"""Store commonly used values in Flask's special g object"""
if 'expires' in session and datetime.utcnow() >= session['expires']:
session.clear()
flash(gettext('You have been logged out due to inactivity.'),
'error')
uid = session.get('uid', None)
if uid:
user = Journalist.query.get(uid)
if user and 'nonce' in session and \
session['nonce'] != user.session_nonce:
session.clear()
flash(gettext('You have been logged out due to password change'),
'error')
session['expires'] = datetime.utcnow() + \
timedelta(minutes=getattr(config,
'SESSION_EXPIRATION_MINUTES',
120))
# Work around https://github.com/lepture/flask-wtf/issues/275
# -- after upgrading from Python 2 to Python 3, any existing
# session's csrf_token value will be retrieved as bytes,
# causing a TypeError. This simple fix, deleting the existing
# token, was suggested in the issue comments. This code will
# be safe to remove after Python 2 reaches EOL in 2020, and no
# supported SecureDrop installations can still have this
# problem.
if sys.version_info.major > 2 and type(session.get('csrf_token')) is bytes:
del session['csrf_token']
uid = session.get('uid', None)
if uid:
g.user = Journalist.query.get(uid)
i18n.set_locale(config)
if app.instance_config.organization_name:
g.organization_name = app.instance_config.organization_name
else:
g.organization_name = gettext('SecureDrop')
try:
g.logo = get_logo_url(app)
except FileNotFoundError:
app.logger.error("Site logo not found.")
if app.config["OS_PAST_EOL"]:
g.show_os_past_eol_warning = True
elif app.config["OS_NEAR_EOL"]:
g.show_os_near_eol_warning = True
if request.path.split('/')[1] == 'api':
pass # We use the @token_required decorator for the API endpoints
else: # We are not using the API
if request.endpoint not in _insecure_views and not logged_in():
return redirect(url_for('main.login'))
if request.method == 'POST':
filesystem_id = request.form.get('filesystem_id')
if filesystem_id:
g.filesystem_id = filesystem_id
g.source = get_source(filesystem_id)
return None
app.register_blueprint(main.make_blueprint(config))
app.register_blueprint(account.make_blueprint(config),
url_prefix='/account')
app.register_blueprint(admin.make_blueprint(config), url_prefix='/admin')
app.register_blueprint(col.make_blueprint(config), url_prefix='/col')
api_blueprint = api.make_blueprint(config)
app.register_blueprint(api_blueprint, url_prefix='/api/v1')
csrf.exempt(api_blueprint)
return app<|fim▁end|> | gpg_key_dir=config.GPG_KEY_DIR,
) |
<|file_name|>elastos_cast_checker.py<|end_file_name|><|fim▁begin|># elastos_cast_checker.py
# encoding: UTF-8
# usages:
# sudo chmod a+x elastos_cast_checker.py
# python elastos_cast_checker.py
import re
import os
import sys
def read_file(path):
lines = []
if (path.endswith('.cpp') or path.endswith('.h')):
if(os.path.isfile(path)):
handle = open(path, 'r')
for line in handle:
lines.append(line.strip())
handle.close()
return lines
def find_declare_match(param, line):
pattern = re.compile(r'AutoPtr\s*<(.*)>\s*(.*)'+'[, ]'+param+'[; ,]')
return pattern.search(line)
def check_declare_match(usedType, param, declLine):
pattern = re.compile(r'AutoPtr\s*<\s*'+usedType+'\s*>\s*(.*)'+'[, ]'+param+'[; ,]')
return pattern.search(declLine)
def find_declare_line(param, lines, lineIndex):
if len(lines) == 0:
return -1
for i in range(lineIndex, 0, -1):
line = lines[i]
if (len(line) > 1) and (line.startswith("//") == False):
match = find_declare_match(param, line)
if match:
#print line, 'match', match.group()
return i;
return -1
def check_match(firstLog, logFile, cppFilepath, usedMatch, usedLineNum, declLine, declLineNum, isHeader = True):
usedType = usedMatch.group(2)
param = usedMatch.group(4)
matchInfo = usedMatch.group()
match = check_declare_match(usedType, param, declLine)
if match == None:
if firstLog:
firstLog = False
logInfo ='\n>> process file: ' + cppFilepath + '\n'
logFile.write(logInfo)
print logInfo
fileInfo = ''
if isHeader:
fileInfo = 'in .h file'
logInfo = " > error: invalid using of {0} at line {1:d}, it is declared as {2} '{3}' at line {4:d}.\n" \
.format(matchInfo, usedLineNum + 1, declLine, fileInfo, declLineNum + 1)
logFile.write(logInfo)
print logInfo
else:
#print 'match ', matchInfo, declLine
return firstLog
def process_declare_line_in_header(logFile, firstLog, cppFilepath, match, lines, lineNum, headerFilepath):
headerLines = read_file(headerFilepath)
param = match.group(4)
matchInfo = match.group()
declLineNum = find_declare_line(param, headerLines, len(headerLines)-1)
if (declLineNum != -1):
declLine = headerLines[declLineNum]
#print 'declLine', declLine
firstLog = check_match(firstLog, logFile, cppFilepath, match, lineNum, declLine, declLineNum)
else:
logInfo = ''
if firstLog:
firstLog = False
logInfo ='\n>> process file: ' + cppFilepath + '\n'
logFile.write(logInfo)
print logInfo
if param.startswith('m'):
logInfo = " = warning: declaration for {0} at line {1:d} not found! is it declared in super class's .h file?\n".format(matchInfo, lineNum + 1)
else:
logInfo = " = warning: declaration for {0} at line {1:d} not found!\n".format(matchInfo, lineNum + 1)
logFile.write(logInfo)
print logInfo
return firstLog
def process_file(path, logFile):
if path.endswith('.cpp') == False:
return
firstLog = True;
lines = read_file(path)
lineNum = 0
for eachLine in lines:<|fim▁hole|> pattern = re.compile(r'(\()(I\w*)(\*\*\)&)([a-zA-Z]\w*)(\))')
match = pattern.search(eachLine)
if match:
#print match.group() match.groups()
#print match.group(2), match.group(4)
usedType = match.group(2)
param = match.group(4)
# do not check weak-reference Resolve
if usedType == 'IInterface' and eachLine.find('->Resolve(') != -1:
pass
else:
declLineNum = find_declare_line(param, lines, lineNum)
if (declLineNum != -1):
declLine = lines[declLineNum]
#print 'declLine', declLine
firstLog = check_match(firstLog, logFile, path, match, lineNum, declLine, declLineNum, False)
else:
headerFilepath = path.replace("/src/", "/inc/").replace(".cpp", ".h")
firstLog = process_declare_line_in_header(logFile, firstLog, path, match, lines, lineNum, headerFilepath)
lineNum = lineNum +1
def process_dir(path, logFile):
listfile = os.listdir(path)
for filename in listfile:
filepath = path + '/' + filename
if(os.path.isdir(filepath)):
# exclude hidden dirs
if(filename[0] == '.'):
pass
else:
process_dir(filepath, logFile)
elif(os.path.isfile(filepath)):
process_file(filepath, logFile)
def summarize_log(logPath):
if(os.path.isfile(logPath)):
errorCount = 0
warningCount = 0
# summarize
logFile = open(logPath, 'r')
for line in logFile:
line = line.strip()
if line.startswith('> error:') == True:
errorCount = errorCount + 1
elif line.startswith('= warning:') == True:
warningCount = warningCount + 1
logFile.close()
# log
logFile = open(logPath, 'a')
logInfo = '\ntotal: {0:d} errors, {1:d} warnings.'.format(errorCount, warningCount)
logFile.write(logInfo)
print logInfo
logFile.close()
def process(path, logPath):
if(os.path.isfile(logPath)):
os.remove(logPath)
logFile = open(logPath, 'a')
print 'output to', logPath
if(os.path.isdir(path)):
process_dir(path, logFile)
elif(os.path.isfile(path)):
process_file(path, logFile)
else:
print 'invalid path:', path
logFile.close()
summarize_log(logPath)
#process('/home/kesalin/test/python/test.cpp', 'elastos_cast_checker.log')
#total: 2 errors, 10 warnings.
#process('/home/kesalin/Elastos5/Sources/Elastos/LibCore/src', '/home/kesalin/elastos_cast_checker.log')
#process('/home/kesalin/Elastos5/Sources/Elastos/Frameworks/Droid/Base/Core/src/', '/home/kesalin/elastos_cast_checker.log')
#total: 7 errors, 0 warnings.
process('/home/kesalin/Elastos5/Sources/Elastos/Frameworks/Droid/Base/Services/Server/src', '/home/kesalin/elastos_cast_checker.log')<|fim▁end|> | if (len(eachLine) > 1) and (eachLine.startswith("//") == False): |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|># ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2007 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''Windowing and user-interface events.
This module allows applications to create and display windows with an
OpenGL context. Windows can be created with a variety of border styles
or set fullscreen.
You can register event handlers for keyboard, mouse and window events.
For games and kiosks you can also restrict the input to your windows,
for example disabling users from switching away from the application
with certain key combinations or capturing and hiding the mouse.
Getting started
---------------
Call the Window constructor to create a new window::
from pyglet.window import Window
win = Window(width=640, height=480)
Attach your own event handlers::
@win.event
def on_key_press(symbol, modifiers):
# ... handle this event ...
Within your main run loop, you must call `Window.dispatch_events` regularly.
Windows are double-buffered by default, so you must call `Window.flip` to
update the display::
while not win.has_exit:
win.dispatch_events()
# ... drawing commands ...
win.flip()
Creating a game window
----------------------
Use `Window.set_exclusive_mouse` to hide the mouse cursor and receive relative
mouse movement events. Specify ``fullscreen=True`` as a keyword argument to
the `Window` constructor to render to the entire screen rather than opening a
window::
win = Window(fullscreen=True)
win.set_mouse_exclusive()
Working with multiple windows
-----------------------------
You can open any number of windows and render to them individually. Each
window must have the event handlers set on it that you are interested in
(i.e., each window will have its own mouse event handler).
You must call `Window.dispatch_events` for each window. Before rendering
to a window, you must call `Window.switch_to` to set the active GL context.
Here is an example run loop for a list of windows::
windows = # list of Window instances
while windows:
for win in windows:
win.dispatch_events()
if win.has_exit:
win.close()
windows = [w for w in windows if not w.has_exit]
for win in windows:
win.switch_to()
# ... drawing commands for this window ...
win.flip()
Working with multiple screens
-----------------------------
By default, fullscreen windows are opened on the primary display (typically
set by the user in their operating system settings). You can retrieve a list
of attached screens and select one manually if you prefer. This is useful for
opening a fullscreen window on each screen::
display = window.get_platform().get_default_display()
screens = display.get_screens()
windows = []
for screen in screens:
windows.append(window.Window(fullscreen=True, screen=screen))
Specifying a screen has no effect if the window is not fullscreen.
Specifying the OpenGL context properties
----------------------------------------
Each window has its own context which is created when the window is created.
You can specify the properties of the context before it is created
by creating a "template" configuration::
from pyglet import gl
# Create template config
config = gl.Config()
config.stencil_size = 8
config.aux_buffers = 4
# Create a window using this config
win = window.Window(config=config)
To determine if a given configuration is supported, query the screen (see
above, "Working with multiple screens")::
configs = screen.get_matching_configs(config)
if not configs:
# ... config is not supported
else:
win = window.Window(config=configs[0])
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: __init__.py 1195 2007-08-24 09:38:40Z Alex.Holkner $'
import pprint
import sys
from pyglet import gl
from pyglet.gl import gl_info
from pyglet.event import EventDispatcher
from pyglet.window.event import WindowExitHandler
import pyglet.window.key
class WindowException(Exception):
'''The root exception for all window-related errors.'''
pass
class NoSuchDisplayException(WindowException):
'''An exception indicating the requested display is not available.'''
pass
class NoSuchConfigException(WindowException):
'''An exception indicating the requested configuration is not
available.'''
pass
class MouseCursorException(WindowException):
'''The root exception for all mouse cursor-related errors.'''
pass
class Platform(object):
'''Operating-system-level functionality.
The platform instance can only be obtained with `get_platform`. Use
the platform to obtain a `Display` instance.
'''
def get_display(self, name):
'''Get a display device by name.
This is meaningful only under X11, where the `name` is a
string including the host name and display number; for example
``"localhost:1"``.
On platforms other than X11, `name` is ignored and the default
display is returned. pyglet does not support multiple multiple
video devices on Windows or OS X. If more than one device is
attached, they will appear as a single virtual device comprising
all the attached screens.
:Parameters:
`name` : str
The name of the display to connect to.
:rtype: `Display`
'''
return get_default_display()
def get_default_display(self):
'''Get the default display device.
:rtype: `Display`
'''
raise NotImplementedError('abstract')
class Display(object):
'''A display device supporting one or more screens.
Use `Platform.get_display` or `Platform.get_default_display` to obtain
an instance of this class. Use a display to obtain `Screen` instances.
'''
def __init__(self):
self._windows = []
def get_screens(self):
'''Get the available screens.
A typical multi-monitor workstation comprises one `Display` with
multiple `Screen` s. This method returns a list of screens which
can be enumerated to select one for full-screen display.
For the purposes of creating an OpenGL config, the default screen
will suffice.
:rtype: list of `Screen`
'''
raise NotImplementedError('abstract')
def get_default_screen(self):
'''Get the default screen as specified by the user's operating system
preferences.
:rtype: `Screen`
'''
return self.get_screens()[0]
def get_windows(self):
'''Get the windows currently attached to this display.
:rtype: sequence of `Window`
'''
return self._windows
class Screen(object):
'''A virtual monitor that supports fullscreen windows.
Screens typically map onto a physical display such as a
monitor, television or projector. Selecting a screen for a window
has no effect unless the window is made fullscreen, in which case
the window will fill only that particular virtual screen.
The `width` and `height` attributes of a screen give the current
resolution of the screen. The `x` and `y` attributes give the global
location of the top-left corner of the screen. This is useful for
determining if screens arranged above or next to one another.
You cannot always rely on the origin to give the placement of monitors.
For example, an X server with two displays without Xinerama enabled
will present two logically separate screens with no relation to each
other.
Use `Display.get_screens` or `Display.get_default_screen` to obtain an
instance of this class.
:Ivariables:
`x` : int
Left edge of the screen on the virtual desktop.
`y` : int
Top edge of the screen on the virtual desktop.
`width` : int
Width of the screen, in pixels.
`height` : int
Height of the screen, in pixels.
'''
def __init__(self, x, y, width, height):
self.x = x
self.y = y
self.width = width
self.height = height
def __repr__(self):
return '%s(x=%d, y=%d, width=%d, height=%d)' % \
(self.__class__.__name__, self.x, self.y, self.width, self.height)
def get_best_config(self, template=None):
'''Get the best available GL config.
Any required attributes can be specified in `template`. If
no configuration matches the template, `NoSuchConfigException` will
be raised.
:Parameters:
`template` : `pyglet.gl.Config`
A configuration with desired attributes filled in.
:rtype: `pyglet.gl.Config`
:return: A configuration supported by the platform that best
fulfils the needs described by the template.
'''
if template is None:
template = gl.Config()
configs = self.get_matching_configs(template)
if not configs:
raise NoSuchConfigException()
return configs[0]
def get_matching_configs(self, template):
'''Get a list of configs that match a specification.
Any attributes specified in `template` will have values equal
to or greater in each returned config. If no configs satisfy
the template, an empty list is returned.
:Parameters:
`template` : `pyglet.gl.Config`
A configuration with desired attributes filled in.
:rtype: list of `pyglet.gl.Config`
:return: A list of matching configs.
'''
raise NotImplementedError('abstract')
class MouseCursor(object):
'''An abstract mouse cursor.'''
#: Indicates if the cursor is drawn using OpenGL. This is True
#: for all mouse cursors except system cursors.
drawable = True
def draw(self, x, y):
'''Abstract render method.
The cursor should be drawn with the "hot" spot at the given
coordinates. The projection is set to the pyglet default (i.e.,
orthographic in window-space), however no other aspects of the
state can be assumed.
:Parameters:
`x` : int
X coordinate of the mouse pointer's hot spot.
`y` : int
Y coordinate of the mouse pointer's hot spot.
'''
raise NotImplementedError('abstract')
class DefaultMouseCursor(MouseCursor):
'''The default mouse cursor used by the operating system.'''
drawable = False
class ImageMouseCursor(MouseCursor):
'''A user-defined mouse cursor created from an image.
Use this class to create your own mouse cursors and assign them
to windows. There are no constraints on the image size or format.
'''
drawable = True
def __init__(self, image, hot_x, hot_y):
'''Create a mouse cursor from an image.
:Parameters:
`image` : `pyglet.image.AbstractImage`
Image to use for the mouse cursor. It must have a
valid `texture` attribute.
`hot_x` : int
X coordinate of the "hot" spot in the image.
`hot_y` : int
Y coordinate of the "hot" spot in the image, measured
from the bottom.
'''
self.texture = image.texture
self.hot_x = hot_x
self.hot_y = hot_y
def draw(self, x, y):
gl.glPushAttrib(gl.GL_ENABLE_BIT)
gl.glEnable(gl.GL_BLEND)
gl.glBlendFunc(gl.GL_SRC_ALPHA, gl.GL_ONE_MINUS_SRC_ALPHA)
self.texture.blit(x - self.hot_x, y - self.hot_y, 0)
gl.glPopAttrib()
class BaseWindow(EventDispatcher, WindowExitHandler):
'''Platform-independent application window.
A window is a "heavyweight" object occupying operating system resources.
The "client" or "content" area of a window is filled entirely with
an OpenGL viewport. Applications have no access to operating system
widgets or controls; all rendering must be done via OpenGL.
Windows may appear as floating regions or can be set to fill an entire
screen (fullscreen). When floating, windows may appear borderless or
decorated with a platform-specific frame (including, for example, the
title bar, minimize and close buttons, resize handles, and so on).
While it is possible to set the location of a window, it is recommended
that applications allow the platform to place it according to local
conventions. This will ensure it is not obscured by other windows,
and appears on an appropriate screen for the user.
To render into a window, you must first call `switch_to`, to make
it the current OpenGL context. If you use only one window in the
application, there is no need to do this.
'''
#: The default window style.
WINDOW_STYLE_DEFAULT = None
#: The window style for pop-up dialogs.
WINDOW_STYLE_DIALOG = 'dialog'
#: The window style for tool windows.
WINDOW_STYLE_TOOL = 'tool'
#: A window style without any decoration.
WINDOW_STYLE_BORDERLESS = 'borderless'
#: The default mouse cursor.
CURSOR_DEFAULT = None
#: A crosshair mouse cursor.
CURSOR_CROSSHAIR = 'crosshair'
#: A pointing hand mouse cursor.
CURSOR_HAND = 'hand'
#: A "help" mouse cursor; typically a question mark and an arrow.
CURSOR_HELP = 'help'
#: A mouse cursor indicating that the selected operation is not permitted.
CURSOR_NO = 'no'
#: A mouse cursor indicating the element can be resized.
CURSOR_SIZE = 'size'
#: A mouse cursor indicating the element can be resized from the top
#: border.
CURSOR_SIZE_UP = 'size_up'
#: A mouse cursor indicating the element can be resized from the
#: upper-right corner.
CURSOR_SIZE_UP_RIGHT = 'size_up_right'
#: A mouse cursor indicating the element can be resized from the right
#: border.
CURSOR_SIZE_RIGHT = 'size_right'
#: A mouse cursor indicating the element can be resized from the lower-right
#: corner.
CURSOR_SIZE_DOWN_RIGHT = 'size_down_right'
#: A mouse cursor indicating the element can be resized from the bottom
#: border.
CURSOR_SIZE_DOWN = 'size_down'
#: A mouse cursor indicating the element can be resized from the lower-left
#: corner.
CURSOR_SIZE_DOWN_LEFT = 'size_down_left'
#: A mouse cursor indicating the element can be resized from the left
#: border.
CURSOR_SIZE_LEFT = 'size_left'
#: A mouse cursor indicating the element can be resized from the upper-left
#: corner.
CURSOR_SIZE_UP_LEFT = 'size_up_left'
#: A mouse cursor indicating the element can be resized vertically.
CURSOR_SIZE_UP_DOWN = 'size_up_down'
#: A mouse cursor indicating the element can be resized horizontally.
CURSOR_SIZE_LEFT_RIGHT = 'size_left_right'
#: A text input mouse cursor (I-beam).
CURSOR_TEXT = 'text'
#: A "wait" mouse cursor; typically an hourglass or watch.
CURSOR_WAIT = 'wait'
#: The "wait" mouse cursor combined with an arrow.
CURSOR_WAIT_ARROW = 'wait_arrow'
# Instance variables accessible only via properties
_width = None
_height = None
_caption = None
_resizable = False
_style = WINDOW_STYLE_DEFAULT
_fullscreen = False
_visible = False
_vsync = False
_screen = None
_config = None
_context = None
# Used to restore window size and position after fullscreen
_windowed_size = None
_windowed_location = None
# Subclasses should update these after relevant events
_mouse_cursor = DefaultMouseCursor()
_mouse_x = 0
_mouse_y = 0
_mouse_visible = True
_mouse_exclusive = False
_mouse_in_window = True
_event_queue = None
_allow_dispatch_event = False # controlled by dispatch_events stack frame
def __init__(self,
width=640,
height=480,
caption=None,
resizable=False,
style=WINDOW_STYLE_DEFAULT,
fullscreen=False,
visible=True,
vsync=True,
display=None,
screen=None,
config=None,
context=None):
'''Create a window.
All parameters are optional, and reasonable defaults are assumed
where they are not specified.
The `display`, `screen`, `config` and `context` parameters form
a hierarchy of control: there is no need to specify more than
one of these. For example, if you specify `screen` the `display`
will be inferred, and a default `config` and `context` will be
created.
`config` is a special case; it can be a template created by the
user specifying the attributes desired, or it can be a complete
`config` as returned from `Screen.get_matching_configs` or similar.
The context will be active as soon as the window is created, as if
`switch_to` was just called.
:Parameters:
`width` : int
Width of the window, in pixels. Ignored if `fullscreen`
is True. Defaults to 640.
`height` : int
Height of the window, in pixels. Ignored if `fullscreen`
is True. Defaults to 480.
`caption` : str or unicode
Initial caption (title) of the window. Defaults to
``sys.argv[0]``.
`resizable` : bool
If True, the window will be resizable. Defaults to False.
`style` : int
One of the ``WINDOW_STYLE_*`` constants specifying the
border style of the window.
`fullscreen` : bool
If True, the window will cover the entire screen rather
than floating. Defaults to False.
`visible` : bool
Determines if the window is visible immediately after
creation. Defaults to True. Set this to False if you
would like to change attributes of the window before
having it appear to the user.
`vsync` : bool
If True, buffer flips are synchronised to the primary screen's
vertical retrace, eliminating flicker.
`display` : `Display`
The display device to use. Useful only under X11.
`screen` : `Screen`
The screen to use, if in fullscreen.
`config` : `pyglet.gl.Config`
Either a template from which to create a complete config,
or a complete config.
`context` : `pyglet.gl.Context`
The context to attach to this window. The context must
not already be attached to another window.
'''
EventDispatcher.__init__(self)
self._event_queue = []
if not display:
display = get_platform().get_default_display()
if not screen:
screen = display.get_default_screen()
if not config:
for template_config in [
gl.Config(double_buffer=True, depth_size=24),
gl.Config(double_buffer=True, depth_size=16)]:
try:
config = screen.get_best_config(template_config)
break
except NoSuchConfigException:
pass
if not config:
raise NoSuchConfigException('No standard config is available.')
if not config.is_complete():
config = screen.get_best_config(config)
if not context:
context = config.create_context(gl.get_current_context())
if fullscreen:
self._windowed_size = width, height
width = screen.width
height = screen.height
self._width = width
self._height = height
self._resizable = resizable
self._fullscreen = fullscreen
self._style = style
self._vsync = vsync
# Set these in reverse order to above, to ensure we get user
# preference
self._context = context
self._config = self._context.config
self._screen = self._config.screen
self._display = self._screen.display
if caption is None:
caption = sys.argv[0]
self._caption = caption
display._windows.append(self)
self._create()
self.switch_to()
if visible:
self.set_visible(True)
self.activate()
def _create(self):
raise NotImplementedError('abstract')
def _recreate(self, changes):
'''Recreate the window with current attributes.
:Parameters:
`changes` : list of str
List of attribute names that were changed since the last
`_create` or `_recreate`. For example, ``['fullscreen']``
is given if the window is to be toggled to or from fullscreen.
'''
raise NotImplementedError('abstract')
def set_fullscreen(self, fullscreen=True, screen=None):
'''Toggle to or from fullscreen.
After toggling fullscreen, the GL context should have retained its
state and objects, however the buffers will need to be cleared and
redrawn.
:Parameters:
`fullscreen` : bool
True if the window should be made fullscreen, False if it
should be windowed.
`screen` : Screen
If not None and fullscreen is True, the window is moved to the
given screen. The screen must belong to the same display as
the window.
'''
if fullscreen == self._fullscreen and screen is None:
return
if not self._fullscreen:
# Save windowed size
self._windowed_size = self.get_size()
self._windowed_location = self.get_location()
if fullscreen and screen is not None:
assert screen.display is self.display
self._screen = screen
self._fullscreen = fullscreen
if self._fullscreen:
self._width = self.screen.width
self._height = self.screen.height
else:
self._width, self._height = self._windowed_size
self._recreate(['fullscreen'])
if not self._fullscreen and self._windowed_location:
# Restore windowed location -- no effect on OS X because of
# deferred recreate. Move into platform _create? XXX
self.set_location(*self._windowed_location)
def on_resize(self, width, height):
'''A default resize event handler.
This default handler updates the GL viewport to cover the entire
window and sets the ``GL_PROJECTION`` matrix to be orthagonal in
window space. The bottom-left corner is (0, 0) and the top-right
corner is the width and height of the window in pixels.
Override this event handler with your own to create another
projection, for example in perspective.
'''
self.switch_to()
gl.glViewport(0, 0, width, height)
gl.glMatrixMode(gl.GL_PROJECTION)
gl.glLoadIdentity()
gl.glOrtho(0, width, 0, height, -1, 1)
gl.glMatrixMode(gl.GL_MODELVIEW)
def close(self):
'''Close the window.
Windows are closed automatically when the process exits, so this
method need only be called when multiple windows or console input
are being used.
After closing the window, the GL context will be invalid. The
window instance cannot be reused once closed (see also `set_visible`).
'''
self._display._windows.remove(self)
self._context.destroy()
self._config = None
self._context = None
def draw_mouse_cursor(self):
'''Draw the custom mouse cursor.
If the current mouse cursor has ``drawable`` set, this method
is called before the buffers are flipped to render it.
This method always leaves the ``GL_MODELVIEW`` matrix as current,
regardless of what it was set to previously. No other GL state
is affected.
<|fim▁hole|> # XXX leaves state in modelview regardless of starting state
if (self._mouse_cursor.drawable and
self._mouse_visible and
self._mouse_in_window):
gl.glMatrixMode(gl.GL_PROJECTION)
gl.glPushMatrix()
gl.glLoadIdentity()
gl.glOrtho(0, self.width, 0, self.height, -1, 1)
gl.glMatrixMode(gl.GL_MODELVIEW)
gl.glPushMatrix()
gl.glLoadIdentity()
self._mouse_cursor.draw(self._mouse_x, self._mouse_y)
gl.glMatrixMode(gl.GL_PROJECTION)
gl.glPopMatrix()
gl.glMatrixMode(gl.GL_MODELVIEW)
gl.glPopMatrix()
# Properties provide read-only access to instance variables. Use
# set_* methods to change them if applicable.
caption = property(lambda self: self._caption,
doc='''The window caption (title). Read-only.
:type: str
''')
resizable = property(lambda self: self._resizable,
doc='''True if the window is resizeable. Read-only.
:type: bool
''')
style = property(lambda self: self._style,
doc='''The window style; one of the ``WINDOW_STYLE_*`` constants.
Read-only.
:type: int
''')
fullscreen = property(lambda self: self._fullscreen,
doc='''True if the window is currently fullscreen. Read-only.
:type: bool
''')
visible = property(lambda self: self._visible,
doc='''True if the window is currently visible. Read-only.
:type: bool
''')
vsync = property(lambda self: self._vsync,
doc='''True if buffer flips are synchronised to the screen's vertical
retrace. Read-only.
:type: bool
''')
display = property(lambda self: self._display,
doc='''The display this window belongs to. Read-only.
:type: `Display`
''')
screen = property(lambda self: self._screen,
doc='''The screen this window is fullscreen in. Read-only.
:type: `Screen`
''')
config = property(lambda self: self._config,
doc='''A GL config describing the context of this window. Read-only.
:type: `pyglet.gl.Config`
''')
context = property(lambda self: self._context,
doc='''The OpenGL context attached to this window. Read-only.
:type: `pyglet.gl.Context`
''')
# These are the only properties that can be set
width = property(lambda self: self.get_size()[0],
lambda self, width: self.set_size(width, self.height),
doc='''The width of the window, in pixels. Read-write.
:type: int
''')
height = property(lambda self: self.get_size()[1],
lambda self, height: self.set_size(self.width, height),
doc='''The height of the window, in pixels. Read-write.
:type: int
''')
def set_caption(self, caption):
'''Set the window's caption.
The caption appears in the titlebar of the window, if it has one,
and in the taskbar on Windows and many X11 window managers.
:Parameters:
`caption` : str or unicode
The caption to set.
'''
raise NotImplementedError('abstract')
def set_minimum_size(self, width, height):
'''Set the minimum size of the window.
Once set, the user will not be able to resize the window smaller
than the given dimensions. There is no way to remove the
minimum size constraint on a window (but you could set it to 0,0).
The behaviour is undefined if the minimum size is set larger than
the current size of the window.
The window size does not include the border or title bar.
:Parameters:
`width` : int
Minimum width of the window, in pixels.
`height` : int
Minimum height of the window, in pixels.
'''
raise NotImplementedError('abstract')
def set_maximum_size(self, width, height):
'''Set the maximum size of the window.
Once set, the user will not be able to resize the window larger
than the given dimensions. There is no way to remove the
maximum size constraint on a window (but you could set it to a large
value).
The behaviour is undefined if the maximum size is set smaller than
the current size of the window.
The window size does not include the border or title bar.
:Parameters:
`width` : int
Maximum width of the window, in pixels.
`height` : int
Maximum height of the window, in pixels.
'''
raise NotImplementedError('abstract')
def set_size(self, width, height):
'''Resize the window.
The behaviour is undefined if the window is not resizable, or if
it is currently fullscreen.
The window size does not include the border or title bar.
:Parameters:
`width` : int
New width of the window, in pixels.
`height` : int
New height of the window, in pixels.
'''
raise NotImplementedError('abstract')
def get_size(self):
'''Return the current size of the window.
The window size does not include the border or title bar.
:rtype: (int, int)
:return: The width and height of the window, in pixels.
'''
raise NotImplementedError('abstract')
def set_location(self, x, y):
'''Set the position of the window.
:Parameters:
`x` : int
Distance of the left edge of the window from the left edge
of the virtual desktop, in pixels.
`y` : int
Distance of the top edge of the window from the top edge of
the virtual desktop, in pixels.
'''
raise NotImplementedError('abstract')
def get_location(self):
'''Return the current position of the window.
:rtype: (int, int)
:return: The distances of the left and top edges from their respective
edges on the virtual desktop, in pixels.
'''
raise NotImplementedError('abstract')
def activate(self):
'''Attempt to restore keyboard focus to the window.
Depending on the window manager or operating system, this may not
be successful. For example, on Windows XP an application is not
allowed to "steal" focus from another application. Instead, the
window's taskbar icon will flash, indicating it requires attention.
'''
raise NotImplementedError('abstract')
def set_visible(self, visible=True):
'''Show or hide the window.
:Parameters:
`visible` : bool
If True, the window will be shown; otherwise it will be
hidden.
'''
raise NotImplementedError('abstract')
def minimize(self):
'''Minimize the window.
'''
raise NotImplementedError('abstract')
def maximize(self):
'''Maximize the window.
The behaviour of this method is somewhat dependent on the user's
display setup. On a multi-monitor system, the window may maximize
to either a single screen or the entire virtual desktop.
'''
raise NotImplementedError('abstract')
def set_vsync(self, vsync):
'''Enable or disable vertical sync control.
When enabled, this option ensures flips from the back to the front
buffer are performed only during the vertical retrace period of the
primary display. This can prevent "tearing" or flickering when
the buffer is updated in the middle of a video scan.
Note that LCD monitors have an analagous time in which they are not
reading from the video buffer; while it does not correspond to
a vertical retrace it has the same effect.
With multi-monitor systems the secondary monitor cannot be
synchronised to, so tearing and flicker cannot be avoided when the
window is positioned outside of the primary display. In this case
it may be advisable to forcibly reduce the framerate (for example,
using `pyglet.clock.set_fps_limit`).
:Parameters:
`vsync` : bool
If True, vsync is enabled, otherwise it is disabled.
'''
raise NotImplementedError('abstract')
def set_mouse_visible(self, visible=True):
'''Show or hide the mouse cursor.
The mouse cursor will only be hidden while it is positioned within
this window. Mouse events will still be processed as usual.
:Parameters:
`visible` : bool
If True, the mouse cursor will be visible, otherwise it
will be hidden.
'''
self._mouse_visible = visible
self.set_mouse_platform_visible()
def set_mouse_platform_visible(self, platform_visible=None):
'''Set the platform-drawn mouse cursor visibility. This is called
automatically after changing the mouse cursor or exclusive mode.
Applications should not normally need to call this method, see
`set_mouse_visible` instead.
:Parameters:
`platform_visible` : bool or None
If None, sets platform visibility to the required visibility
for the current exclusive mode and cursor type. Otherwise,
a bool value will override and force a visibility.
'''
raise NotImplementedError()
def set_mouse_cursor(self, cursor=None):
'''Change the appearance of the mouse cursor.
The appearance of the mouse cursor is only changed while it is
within this window.
:Parameters:
`cursor` : `MouseCursor`
The cursor to set, or None to restore the default cursor.
'''
if cursor is None:
cursor = DefaultMouseCursor()
self._mouse_cursor = cursor
self.set_mouse_platform_visible()
def set_exclusive_mouse(self, exclusive=True):
'''Hide the mouse cursor and direct all mouse events to this
window.
When enabled, this feature prevents the mouse leaving the window. It
is useful for certain styles of games that require complete control of
the mouse. The position of the mouse as reported in subsequent events
is meaningless when exclusive mouse is enabled; you should only use
the relative motion parameters ``dx`` and ``dy``.
:Parameters:
`exclusive` : bool
If True, exclusive mouse is enabled, otherwise it is disabled.
'''
raise NotImplementedError('abstract')
def set_exclusive_keyboard(self, exclusive=True):
'''Prevent the user from switching away from this window using
keyboard accelerators.
When enabled, this feature disables certain operating-system specific
key combinations such as Alt+Tab (Command+Tab on OS X). This can be
useful in certain kiosk applications, it should be avoided in general
applications or games.
:Parameters:
`exclusive` : bool
If True, exclusive keyboard is enabled, otherwise it is
disabled.
'''
raise NotImplementedError('abstract')
def get_system_mouse_cursor(self, name):
'''Obtain a system mouse cursor.
Use `set_mouse_cursor` to make the cursor returned by this method
active. The names accepted by this method are the ``CURSOR_*``
constants defined on this class.
:Parameters:
`name` : str
Name describing the mouse cursor to return. For example,
``CURSOR_WAIT``, ``CURSOR_HELP``, etc.
:rtype: `MouseCursor`
:return: A mouse cursor which can be used with `set_mouse_cursor`.
'''
raise NotImplementedError()
def set_icon(self, *images):
'''Set the window icon.
If multiple images are provided, one with an appropriate size
will be selected (if the correct size is not provided, the image
will be scaled).
Useful sizes to provide are 16x16, 32x32, 64x64 (Mac only) and
128x128 (Mac only).
:Parameters:
`images` : sequence of `pyglet.image.AbstractImage`
List of images to use for the window icon.
'''
pass
def clear(self):
'''Clear the window.
This is a convenience method for clearing the color and depth
buffer. The window must be the active context (see `switch_to`).
'''
gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT)
def dispatch_event(self, *args):
if self._allow_dispatch_event:
EventDispatcher.dispatch_event(self, *args)
else:
self._event_queue.append(args)
def dispatch_events(self):
'''Process the operating system event queue and call attached
event handlers.
'''
raise NotImplementedError('abstract')
# If documenting, show the event methods. Otherwise, leave them out
# as they are not really methods.
if hasattr(sys, 'is_epydoc') and sys.is_epydoc:
def on_key_press(symbol, modifiers):
'''A key on the keyboard was pressed (and held down).
:Parameters:
`symbol` : int
The key symbol pressed.
`modifiers` : int
Bitwise combination of the key modifiers active.
:event:
'''
def on_key_release(symbol, modifiers):
'''A key on the keyboard was released.
:Parameters:
`symbol` : int
The key symbol pressed.
`modifiers` : int
Bitwise combination of the key modifiers active.
:event:
'''
def on_text(text):
'''The user input some text.
Typically this is called after `on_key_press` and before
`on_key_release`, but may also be called multiple times if the key
is held down (key repeating); or called without key presses if
another input method was used (e.g., a pen input).
You should always use this method for interpreting text, as the
key symbols often have complex mappings to their unicode
representation which this event takes care of.
:Parameters:
`text` : unicode
The text entered by the user.
:event:
'''
def on_text_motion(motion):
'''The user moved the text input cursor.
Typically this is called after `on_key_press` and before
`on_key_release`, but may also be called multiple times if the key
is help down (key repeating).
You should always use this method for moving the text input cursor
(caret), as different platforms have different default keyboard
mappings, and key repeats are handled correctly.
The values that `motion` can take are defined in
`pyglet.window.key`:
* MOTION_UP
* MOTION_RIGHT
* MOTION_DOWN
* MOTION_LEFT
* MOTION_NEXT_WORD
* MOTION_PREVIOUS_WORD
* MOTION_BEGINNING_OF_LINE
* MOTION_END_OF_LINE
* MOTION_NEXT_PAGE
* MOTION_PREVIOUS_PAGE
* MOTION_BEGINNING_OF_FILE
* MOTION_END_OF_FILE
* MOTION_BACKSPACE
* MOTION_DELETE
:Parameters:
`motion` : int
The direction of motion; see remarks.
:event:
'''
def on_text_motion_select(motion):
'''The user moved the text input cursor while extending the
selection.
Typically this is called after `on_key_press` and before
`on_key_release`, but may also be called multiple times if the key
is help down (key repeating).
You should always use this method for responding to text selection
events rather than the raw `on_key_press`, as different platforms
have different default keyboard mappings, and key repeats are
handled correctly.
The values that `motion` can take are defined in `pyglet.window.key`:
* MOTION_UP
* MOTION_RIGHT
* MOTION_DOWN
* MOTION_LEFT
* MOTION_NEXT_WORD
* MOTION_PREVIOUS_WORD
* MOTION_BEGINNING_OF_LINE
* MOTION_END_OF_LINE
* MOTION_NEXT_PAGE
* MOTION_PREVIOUS_PAGE
* MOTION_BEGINNING_OF_FILE
* MOTION_END_OF_FILE
:Parameters:
`motion` : int
The direction of selection motion; see remarks.
:event:
'''
def on_mouse_motion(x, y, dx, dy):
'''The mouse was moved with no buttons held down.
:Parameters:
`x` : float
Distance in pixels from the left edge of the window.
`y` : float
Distance in pixels from the bottom edge of the window.
`dx` : float
Relative X position from the previous mouse position.
`dy` : float
Relative Y position from the previous mouse position.
:event:
'''
def on_mouse_drag(x, y, dx, dy, buttons, modifiers):
'''The mouse was moved with one or more mouse buttons pressed.
This event will continue to be fired even if the mouse leaves
the window, so long as the drag buttons are continuously held down.
:Parameters:
`x` : float
Distance in pixels from the left edge of the window.
`y` : float
Distance in pixels from the bottom edge of the window.
`dx` : float
Relative X position from the previous mouse position.
`dy` : float
Relative Y position from the previous mouse position.
`buttons` : int
Bitwise combination of the mouse buttons currently pressed.
`modifiers` : int
Bitwise combination of any keyboard modifiers currently
active.
:event:
'''
def on_mouse_press(x, y, button, modifiers):
'''A mouse button was pressed (and held down).
:Parameters:
`x` : float
Distance in pixels from the left edge of the window.
`y` : float
Distance in pixels from the bottom edge of the window.
`button` : int
The mouse button that was pressed.
`modifiers` : int
Bitwise combination of any keyboard modifiers currently
active.
:event:
'''
def on_mouse_release(x, y, button, modifiers):
'''A mouse button was released.
:Parameters:
`x` : float
Distance in pixels from the left edge of the window.
`y` : float
Distance in pixels from the bottom edge of the window.
`button` : int
The mouse button that was released.
`modifiers` : int
Bitwise combination of any keyboard modifiers currently
active.
:event:
'''
def on_mouse_scroll(x, y, scroll_x, scroll_y):
'''The mouse wheel was scrolled.
Note that most mice have only a vertical scroll wheel, so
`scroll_x` is usually 0. An exception to this is the Apple Mighty
Mouse, which has a mouse ball in place of the wheel which allows
both `scroll_x` and `scroll_y` movement.
:Parameters:
`x` : float
Distance in pixels from the left edge of the window.
`y` : float
Distance in pixels from the bottom edge of the window.
`scroll_x` : int
Number of "clicks" towards the right (left if negative).
`scroll_y` : int
Number of "clicks" upwards (downards if negative).
:event:
'''
def on_close():
'''The user attempted to close the window.
This event can be triggered by clicking on the "X" control box in
the window title bar, or by some other platform-dependent manner.
:event:
'''
def on_mouse_enter(x, y):
'''The mouse was moved into the window.
This event will not be trigged if the mouse is currently being
dragged.
:Parameters:
`x` : float
Distance in pixels from the left edge of the window.
`y` : float
Distance in pixels from the bottom edge of the window.
:event:
'''
def on_mouse_leave(x, y):
'''The mouse was moved outside of the window.
This event will not be trigged if the mouse is currently being
dragged. Note that the coordinates of the mouse pointer will be
outside of the window rectangle.
:Parameters:
`x` : float
Distance in pixels from the left edge of the window.
`y` : float
Distance in pixels from the bottom edge of the window.
:event:
'''
def on_expose():
'''A portion of the window needs to be redrawn.
This event is triggered when the window first appears, and any time
the contents of the window is invalidated due to another window
obscuring it.
There is no way to determine which portion of the window needs
redrawing. Note that the use of this method is becoming
increasingly uncommon, as newer window managers composite windows
automatically and keep a backing store of the window contents.
:event:
'''
def on_resize(width, height):
'''The window was resized.
:Parameters:
`width` : int
The new width of the window, in pixels.
`height` : int
The new height of the window, in pixels.
:event:
'''
def on_move(x, y):
'''The window was moved.
:Parameters:
`x` : int
Distance from the left edge of the screen to the left edge
of the window.
`y` : int
Distance from the top edge of the screen to the top edge of
the window. Note that this is one of few methods in pyglet
which use a Y-down coordinate system.
:event:
'''
def on_activate():
'''The window was activated.
This event can be triggered by clicking on the title bar, bringing
it to the foreground; or by some platform-specific method.
When a window is "active" it has the keyboard focus.
:event:
'''
def on_deactivate():
'''The window was deactivated.
This event can be triggered by clicking on another application
window. When a window is deactivated it no longer has the
keyboard focus.
:event:
'''
def on_show():
'''The window was shown.
This event is triggered when a window is restored after being
minimised, or after being displayed for the first time.
:event:
'''
def on_hide():
'''The window was hidden.
This event is triggered when a window is minimised or (on Mac OS X)
hidden by the user.
:event:
'''
def on_context_lost():
'''The window's GL context was lost.
When the context is lost no more GL methods can be called until it
is recreated. This is a rare event, triggered perhaps by the user
switching to an incompatible video mode. When it occurs, an
application will need to reload all objects (display lists, texture
objects, shaders) as well as restore the GL state.
:event:
'''
def on_context_state_lost():
'''The state of the window's GL context was lost.
pyglet may sometimes need to recreate the window's GL context if
the window is moved to another video device, or between fullscreen
or windowed mode. In this case it will try to share the objects
(display lists, texture objects, shaders) between the old and new
contexts. If this is possible, only the current state of the GL
context is lost, and the application should simply restore state.
:event:
'''
BaseWindow.register_event_type('on_key_press')
BaseWindow.register_event_type('on_key_release')
BaseWindow.register_event_type('on_text')
BaseWindow.register_event_type('on_text_motion')
BaseWindow.register_event_type('on_text_motion_select')
BaseWindow.register_event_type('on_mouse_motion')
BaseWindow.register_event_type('on_mouse_drag')
BaseWindow.register_event_type('on_mouse_press')
BaseWindow.register_event_type('on_mouse_release')
BaseWindow.register_event_type('on_mouse_scroll')
BaseWindow.register_event_type('on_mouse_enter')
BaseWindow.register_event_type('on_mouse_leave')
BaseWindow.register_event_type('on_close')
BaseWindow.register_event_type('on_expose')
BaseWindow.register_event_type('on_resize')
BaseWindow.register_event_type('on_move')
BaseWindow.register_event_type('on_activate')
BaseWindow.register_event_type('on_deactivate')
BaseWindow.register_event_type('on_show')
BaseWindow.register_event_type('on_hide')
BaseWindow.register_event_type('on_context_lost')
BaseWindow.register_event_type('on_context_state_lost')
def get_platform():
'''Get an instance of the Platform most appropriate for this
system.
:rtype: `Platform`
:return: The platform instance.
'''
return _platform
if hasattr(sys, 'is_epydoc') and sys.is_epydoc:
# We are building documentation
Window = BaseWindow
Window.__name__ = 'Window'
del BaseWindow
else:
# Try to determine which platform to use.
if sys.platform == 'darwin':
from pyglet.window.carbon import CarbonPlatform, CarbonWindow
_platform = CarbonPlatform()
Window = CarbonWindow
elif sys.platform in ('win32', 'cygwin'):
from pyglet.window.win32 import Win32Platform, Win32Window
_platform = Win32Platform()
Window = Win32Window
else:
from pyglet.window.xlib import XlibPlatform, XlibWindow
_platform = XlibPlatform()
Window = XlibWindow<|fim▁end|> | There is little need to override this method; instead, subclass
``MouseCursor`` and provide your own ``draw`` method.
'''
# Draw mouse cursor if set and visible. |
<|file_name|>CLI.java<|end_file_name|><|fim▁begin|>/*
* Copyright (C) 2011 Klaus Reimer <[email protected]>
* See LICENSE.md for licensing information.
*/
package de.ailis.microblinks.l.lctrl.shell;
import gnu.getopt.Getopt;
import gnu.getopt.LongOpt;
import java.util.Arrays;
import de.ailis.microblinks.l.lctrl.resources.Resources;
/**
* Base class for all CLI programs.
*
* @author Klaus Reimer ([email protected])
*/
public abstract class CLI
{
/** The command-line program name. */
private final String name;
/** The short options. */
private final String shortOpts;
/** The long options. */
private final LongOpt[] longOpts;
/** Debug mode. */
private boolean debug = false;
/**
* Constructor.
*
* @param name
* The command-line program name.
* @param shortOpts
* The short options.
* @param longOpts
* The long options.
*/<|fim▁hole|> this.name = name;
this.shortOpts = shortOpts;
this.longOpts = longOpts;
}
/**
* Displays command-line help.
*/
private void showHelp()
{
System.out.println(Resources.getText("help.txt"));
}
/**
* Displays version information.
*/
private void showVersion()
{
System.out.println(Resources.getText("version.txt"));
}
/**
* Displays the help hint.
*/
protected void showHelpHint()
{
System.out.println("Use --help to show usage information.");
}
/**
* Prints error message to stderr and then exits with error code 1.
*
* @param message
* The error message.
* @param args
* The error message arguments.
*/
protected void error(final String message, final Object... args)
{
System.err.print(this.name);
System.err.print(": ");
System.err.format(message, args);
System.err.println();
showHelpHint();
System.exit(1);
}
/**
* Processes all command line options.
*
* @param args
* The command line arguments.
* @throws Exception
* When error occurs.
* @return The index of the first non option argument.
*/
private int processOptions(final String[] args) throws Exception
{
final Getopt opts = new Getopt(this.name, args, this.shortOpts, this.longOpts);
int opt;
while ((opt = opts.getopt()) >= 0)
{
switch (opt)
{
case 'h':
showHelp();
System.exit(0);
break;
case 'V':
showVersion();
System.exit(0);
break;
case 'D':
this.debug = true;
break;
case '?':
showHelpHint();
System.exit(111);
break;
default:
processOption(opt, opts.getOptarg());
}
}
return opts.getOptind();
}
/**
* Processes a single option.
*
* @param option
* The option to process
* @param arg
* The optional option argument
* @throws Exception
* When an error occurred.
*/
protected abstract void processOption(final int option, final String arg) throws Exception;
/**
* Executes the program with the specified arguments. This is called from the run() method after options has been
* processed. The specified arguments array only contains the non-option arguments.
*
* @param args
* The non-option command-line arguments.
* @throws Exception
* When something goes wrong.
*/
protected abstract void execute(String[] args) throws Exception;
/**
* Runs the program.
*
* @param args
* The command line arguments.
*/
public void run(final String[] args)
{
try
{
final int commandStart = processOptions(args);
execute(Arrays.copyOfRange(args, commandStart, args.length));
}
catch (final Exception e)
{
if (this.debug)
{
e.printStackTrace(System.err);
}
error(e.getMessage());
System.exit(1);
}
}
/**
* Checks if program runs in debug mode.
*
* @return True if debug mode, false if not.
*/
public boolean isDebug()
{
return this.debug;
}
}<|fim▁end|> | protected CLI(final String name, final String shortOpts, final LongOpt[] longOpts)
{ |
<|file_name|>urlsearchparams.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::cell::DOMRefCell;
use dom::bindings::codegen::Bindings::URLSearchParamsBinding;
use dom::bindings::codegen::Bindings::URLSearchParamsBinding::URLSearchParamsMethods;
use dom::bindings::codegen::UnionTypes::StringOrURLSearchParams;
use dom::bindings::codegen::UnionTypes::StringOrURLSearchParams::{eURLSearchParams, eString};
use dom::bindings::error::{Fallible};
use dom::bindings::global::GlobalRef;
use dom::bindings::js::{JSRef, Temporary};
use dom::bindings::utils::{Reflector, reflect_dom_object};
use util::str::DOMString;
use encoding::all::UTF_8;
use encoding::types::{EncodingRef, EncoderTrap};
use std::collections::HashMap;
use std::collections::hash_map::Entry::{Occupied, Vacant};
use std::fmt::radix;
use std::ascii::OwnedAsciiExt;
#[dom_struct]
pub struct URLSearchParams {
reflector_: Reflector,
data: DOMRefCell<HashMap<DOMString, Vec<DOMString>>>,
}
impl URLSearchParams {
fn new_inherited() -> URLSearchParams {
URLSearchParams {
reflector_: Reflector::new(),
data: DOMRefCell::new(HashMap::new()),
}
}
pub fn new(global: GlobalRef) -> Temporary<URLSearchParams> {
reflect_dom_object(box URLSearchParams::new_inherited(), global, URLSearchParamsBinding::Wrap)
}
pub fn Constructor(global: GlobalRef, init: Option<StringOrURLSearchParams>) -> Fallible<Temporary<URLSearchParams>> {
let usp = URLSearchParams::new(global).root();
match init {
Some(eString(_s)) => {
// XXXManishearth we need to parse the input here
// http://url.spec.whatwg.org/#concept-urlencoded-parser<|fim▁hole|> // We can use rust-url's implementation here:
// https://github.com/SimonSapin/rust-url/blob/master/form_urlencoded.rs#L29
},
Some(eURLSearchParams(u)) => {
let u = u.root();
let usp = usp.r();
let mut map = usp.data.borrow_mut();
// FIXME(https://github.com/rust-lang/rust/issues/23338)
let r = u.r();
let data = r.data.borrow();
*map = data.clone();
},
None => {}
}
Ok(Temporary::from_rooted(usp.r()))
}
}
impl<'a> URLSearchParamsMethods for JSRef<'a, URLSearchParams> {
fn Append(self, name: DOMString, value: DOMString) {
let mut data = self.data.borrow_mut();
match data.entry(name) {
Occupied(entry) => entry.into_mut().push(value),
Vacant(entry) => {
entry.insert(vec!(value));
}
}
self.update_steps();
}
fn Delete(self, name: DOMString) {
self.data.borrow_mut().remove(&name);
self.update_steps();
}
fn Get(self, name: DOMString) -> Option<DOMString> {
// FIXME(https://github.com/rust-lang/rust/issues/23338)
let data = self.data.borrow();
data.get(&name).map(|v| v[0].clone())
}
fn Has(self, name: DOMString) -> bool {
// FIXME(https://github.com/rust-lang/rust/issues/23338)
let data = self.data.borrow();
data.contains_key(&name)
}
fn Set(self, name: DOMString, value: DOMString) {
self.data.borrow_mut().insert(name, vec!(value));
self.update_steps();
}
}
pub trait URLSearchParamsHelpers {
fn serialize(&self, encoding: Option<EncodingRef>) -> Vec<u8>;
fn update_steps(&self);
}
impl URLSearchParamsHelpers for URLSearchParams {
fn serialize(&self, encoding: Option<EncodingRef>) -> Vec<u8> {
// http://url.spec.whatwg.org/#concept-urlencoded-serializer
fn serialize_string(value: &DOMString, encoding: EncodingRef) -> Vec<u8> {
// http://url.spec.whatwg.org/#concept-urlencoded-byte-serializer
let value = value.as_slice();
// XXXManishearth should this be a strict encoding? Can unwrap()ing the result fail?
let value = encoding.encode(value, EncoderTrap::Replace).unwrap();
let mut buf = vec!();
for i in value.iter() {
let append = match *i {
0x20 => vec!(0x2B),
0x2A | 0x2D | 0x2E |
0x30 ... 0x39 | 0x41 ... 0x5A |
0x5F | 0x61...0x7A => vec!(*i),
a => {
// http://url.spec.whatwg.org/#percent-encode
let mut encoded = vec!(0x25); // %
let s = format!("{}", radix(a, 16)).into_ascii_uppercase();
let bytes = s.as_bytes();
encoded.push_all(bytes);
encoded
}
};
buf.push_all(append.as_slice());
}
buf
}
let encoding = encoding.unwrap_or(UTF_8 as EncodingRef);
let mut buf = vec!();
let mut first_pair = true;
for (k, v) in self.data.borrow().iter() {
let name = serialize_string(k, encoding);
for val in v.iter() {
let value = serialize_string(val, encoding);
if first_pair {
first_pair = false;
} else {
buf.push(0x26); // &
}
buf.push_all(name.as_slice());
buf.push(0x3D); // =
buf.push_all(value.as_slice())
}
}
buf
}
fn update_steps(&self) {
// XXXManishearth Implement this when the URL interface is implemented
// http://url.spec.whatwg.org/#concept-uq-update
}
}<|fim▁end|> |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.